1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 The implementation of inliner is organized as follows:
25 inlining heuristics limits
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
38 The inliner itself is split into two passes:
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
62 Because of lack of whole unit knowledge, the pass can not really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
94 #include "coretypes.h"
99 #include "double-int.h"
103 #include "wide-int.h"
106 #include "fold-const.h"
107 #include "trans-mem.h"
109 #include "tree-inline.h"
110 #include "langhooks.h"
112 #include "diagnostic.h"
113 #include "gimple-pretty-print.h"
116 #include "tree-pass.h"
117 #include "coverage.h"
122 #include "hard-reg-set.h"
124 #include "function.h"
125 #include "basic-block.h"
126 #include "tree-ssa-alias.h"
127 #include "internal-fn.h"
128 #include "gimple-expr.h"
131 #include "gimple-ssa.h"
132 #include "hash-map.h"
133 #include "plugin-api.h"
136 #include "alloc-pool.h"
137 #include "symbol-summary.h"
138 #include "ipa-prop.h"
141 #include "ipa-inline.h"
142 #include "ipa-utils.h"
144 #include "auto-profile.h"
146 #include "builtins.h"
147 #include "fibonacci_heap.h"
149 typedef fibonacci_heap
<sreal
, cgraph_edge
> edge_heap_t
;
150 typedef fibonacci_node
<sreal
, cgraph_edge
> edge_heap_node_t
;
152 /* Statistics we collect about inlining algorithm. */
153 static int overall_size
;
154 static gcov_type max_count
;
155 static gcov_type spec_rem
;
157 /* Pre-computed constants 1/CGRAPH_FREQ_BASE and 1/100. */
158 static sreal cgraph_freq_base_rec
, percent_rec
;
160 /* Return false when inlining edge E would lead to violating
161 limits on function unit growth or stack usage growth.
163 The relative function body growth limit is present generally
164 to avoid problems with non-linear behavior of the compiler.
165 To allow inlining huge functions into tiny wrapper, the limit
166 is always based on the bigger of the two functions considered.
168 For stack growth limits we always base the growth in stack usage
169 of the callers. We want to prevent applications from segfaulting
170 on stack overflow when functions with huge stack frames gets
174 caller_growth_limits (struct cgraph_edge
*e
)
176 struct cgraph_node
*to
= e
->caller
;
177 struct cgraph_node
*what
= e
->callee
->ultimate_alias_target ();
180 HOST_WIDE_INT stack_size_limit
= 0, inlined_stack
;
181 inline_summary
*info
, *what_info
, *outer_info
= inline_summaries
->get (to
);
183 /* Look for function e->caller is inlined to. While doing
184 so work out the largest function body on the way. As
185 described above, we want to base our function growth
186 limits based on that. Not on the self size of the
187 outer function, not on the self size of inline code
188 we immediately inline to. This is the most relaxed
189 interpretation of the rule "do not grow large functions
190 too much in order to prevent compiler from exploding". */
193 info
= inline_summaries
->get (to
);
194 if (limit
< info
->self_size
)
195 limit
= info
->self_size
;
196 if (stack_size_limit
< info
->estimated_self_stack_size
)
197 stack_size_limit
= info
->estimated_self_stack_size
;
198 if (to
->global
.inlined_to
)
199 to
= to
->callers
->caller
;
204 what_info
= inline_summaries
->get (what
);
206 if (limit
< what_info
->self_size
)
207 limit
= what_info
->self_size
;
209 limit
+= limit
* PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH
) / 100;
211 /* Check the size after inlining against the function limits. But allow
212 the function to shrink if it went over the limits by forced inlining. */
213 newsize
= estimate_size_after_inlining (to
, e
);
214 if (newsize
>= info
->size
215 && newsize
> PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS
)
218 e
->inline_failed
= CIF_LARGE_FUNCTION_GROWTH_LIMIT
;
222 if (!what_info
->estimated_stack_size
)
225 /* FIXME: Stack size limit often prevents inlining in Fortran programs
226 due to large i/o datastructures used by the Fortran front-end.
227 We ought to ignore this limit when we know that the edge is executed
228 on every invocation of the caller (i.e. its call statement dominates
229 exit block). We do not track this information, yet. */
230 stack_size_limit
+= ((gcov_type
)stack_size_limit
231 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH
) / 100);
233 inlined_stack
= (outer_info
->stack_frame_offset
234 + outer_info
->estimated_self_stack_size
235 + what_info
->estimated_stack_size
);
236 /* Check new stack consumption with stack consumption at the place
238 if (inlined_stack
> stack_size_limit
239 /* If function already has large stack usage from sibling
240 inline call, we can inline, too.
241 This bit overoptimistically assume that we are good at stack
243 && inlined_stack
> info
->estimated_stack_size
244 && inlined_stack
> PARAM_VALUE (PARAM_LARGE_STACK_FRAME
))
246 e
->inline_failed
= CIF_LARGE_STACK_FRAME_GROWTH_LIMIT
;
252 /* Dump info about why inlining has failed. */
255 report_inline_failed_reason (struct cgraph_edge
*e
)
259 fprintf (dump_file
, " not inlinable: %s/%i -> %s/%i, %s\n",
260 xstrdup_for_dump (e
->caller
->name ()), e
->caller
->order
,
261 xstrdup_for_dump (e
->callee
->name ()), e
->callee
->order
,
262 cgraph_inline_failed_string (e
->inline_failed
));
266 /* Decide whether sanitizer-related attributes allow inlining. */
269 sanitize_attrs_match_for_inline_p (const_tree caller
, const_tree callee
)
271 /* Don't care if sanitizer is disabled */
272 if (!(flag_sanitize
& SANITIZE_ADDRESS
))
275 if (!caller
|| !callee
)
278 return !!lookup_attribute ("no_sanitize_address",
279 DECL_ATTRIBUTES (caller
)) ==
280 !!lookup_attribute ("no_sanitize_address",
281 DECL_ATTRIBUTES (callee
));
284 /* Decide if we can inline the edge and possibly update
285 inline_failed reason.
286 We check whether inlining is possible at all and whether
287 caller growth limits allow doing so.
289 if REPORT is true, output reason to the dump file.
291 if DISREGARD_LIMITS is true, ignore size limits.*/
294 can_inline_edge_p (struct cgraph_edge
*e
, bool report
,
295 bool disregard_limits
= false)
297 bool inlinable
= true;
298 enum availability avail
;
299 cgraph_node
*callee
= e
->callee
->ultimate_alias_target (&avail
);
300 tree caller_tree
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e
->caller
->decl
);
302 = callee
? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee
->decl
) : NULL
;
303 struct function
*caller_fun
= e
->caller
->get_fun ();
304 struct function
*callee_fun
= callee
? callee
->get_fun () : NULL
;
306 gcc_assert (e
->inline_failed
);
308 if (!callee
|| !callee
->definition
)
310 e
->inline_failed
= CIF_BODY_NOT_AVAILABLE
;
313 else if (callee
->calls_comdat_local
)
315 e
->inline_failed
= CIF_USES_COMDAT_LOCAL
;
318 else if (!inline_summaries
->get (callee
)->inlinable
319 || (caller_fun
&& fn_contains_cilk_spawn_p (caller_fun
)))
321 e
->inline_failed
= CIF_FUNCTION_NOT_INLINABLE
;
324 else if (avail
<= AVAIL_INTERPOSABLE
)
326 e
->inline_failed
= CIF_OVERWRITABLE
;
329 else if (e
->call_stmt_cannot_inline_p
)
331 if (e
->inline_failed
!= CIF_FUNCTION_NOT_OPTIMIZED
)
332 e
->inline_failed
= CIF_MISMATCHED_ARGUMENTS
;
335 /* Don't inline if the functions have different EH personalities. */
336 else if (DECL_FUNCTION_PERSONALITY (e
->caller
->decl
)
337 && DECL_FUNCTION_PERSONALITY (callee
->decl
)
338 && (DECL_FUNCTION_PERSONALITY (e
->caller
->decl
)
339 != DECL_FUNCTION_PERSONALITY (callee
->decl
)))
341 e
->inline_failed
= CIF_EH_PERSONALITY
;
344 /* TM pure functions should not be inlined into non-TM_pure
346 else if (is_tm_pure (callee
->decl
)
347 && !is_tm_pure (e
->caller
->decl
))
349 e
->inline_failed
= CIF_UNSPECIFIED
;
352 /* Don't inline if the callee can throw non-call exceptions but the
354 FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
355 Move the flag into cgraph node or mirror it in the inline summary. */
356 else if (callee_fun
&& callee_fun
->can_throw_non_call_exceptions
357 && !(caller_fun
&& caller_fun
->can_throw_non_call_exceptions
))
359 e
->inline_failed
= CIF_NON_CALL_EXCEPTIONS
;
362 /* Check compatibility of target optimization options. */
363 else if (!targetm
.target_option
.can_inline_p (e
->caller
->decl
,
366 e
->inline_failed
= CIF_TARGET_OPTION_MISMATCH
;
369 /* Don't inline a function with mismatched sanitization attributes. */
370 else if (!sanitize_attrs_match_for_inline_p (e
->caller
->decl
, callee
->decl
))
372 e
->inline_failed
= CIF_ATTRIBUTE_MISMATCH
;
375 /* Check if caller growth allows the inlining. */
376 else if (!DECL_DISREGARD_INLINE_LIMITS (callee
->decl
)
378 && !lookup_attribute ("flatten",
380 (e
->caller
->global
.inlined_to
381 ? e
->caller
->global
.inlined_to
->decl
383 && !caller_growth_limits (e
))
385 /* Don't inline a function with a higher optimization level than the
386 caller. FIXME: this is really just tip of iceberg of handling
387 optimization attribute. */
388 else if (caller_tree
!= callee_tree
)
390 if (((opt_for_fn (e
->caller
->decl
, optimize
)
391 > opt_for_fn (callee
->decl
, optimize
))
392 || (opt_for_fn (e
->caller
->decl
, optimize_size
)
393 != opt_for_fn (callee
->decl
, optimize_size
)))
394 /* gcc.dg/pr43564.c. Look at forced inline even in -O0. */
395 && !DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
397 e
->inline_failed
= CIF_OPTIMIZATION_MISMATCH
;
402 if (!inlinable
&& report
)
403 report_inline_failed_reason (e
);
408 /* Return true if the edge E is inlinable during early inlining. */
411 can_early_inline_edge_p (struct cgraph_edge
*e
)
413 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
414 /* Early inliner might get called at WPA stage when IPA pass adds new
415 function. In this case we can not really do any of early inlining
416 because function bodies are missing. */
417 if (!gimple_has_body_p (callee
->decl
))
419 e
->inline_failed
= CIF_BODY_NOT_AVAILABLE
;
422 /* In early inliner some of callees may not be in SSA form yet
423 (i.e. the callgraph is cyclic and we did not process
424 the callee by early inliner, yet). We don't have CIF code for this
425 case; later we will re-do the decision in the real inliner. */
426 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e
->caller
->decl
))
427 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee
->decl
)))
430 fprintf (dump_file
, " edge not inlinable: not in SSA form\n");
433 if (!can_inline_edge_p (e
, true))
439 /* Return number of calls in N. Ignore cheap builtins. */
442 num_calls (struct cgraph_node
*n
)
444 struct cgraph_edge
*e
;
447 for (e
= n
->callees
; e
; e
= e
->next_callee
)
448 if (!is_inexpensive_builtin (e
->callee
->decl
))
454 /* Return true if we are interested in inlining small function. */
457 want_early_inline_function_p (struct cgraph_edge
*e
)
459 bool want_inline
= true;
460 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
462 if (DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
464 /* For AutoFDO, we need to make sure that before profile summary, all
465 hot paths' IR look exactly the same as profiled binary. As a result,
466 in einliner, we will disregard size limit and inline those callsites
468 * inlined in the profiled binary, and
469 * the cloned callee has enough samples to be considered "hot". */
470 else if (flag_auto_profile
&& afdo_callsite_hot_enough_for_early_inline (e
))
472 else if (!DECL_DECLARED_INLINE_P (callee
->decl
)
473 && !opt_for_fn (e
->caller
->decl
, flag_inline_small_functions
))
475 e
->inline_failed
= CIF_FUNCTION_NOT_INLINE_CANDIDATE
;
476 report_inline_failed_reason (e
);
481 int growth
= estimate_edge_growth (e
);
486 else if (!e
->maybe_hot_p ()
490 fprintf (dump_file
, " will not early inline: %s/%i->%s/%i, "
491 "call is cold and code would grow by %i\n",
492 xstrdup_for_dump (e
->caller
->name ()),
494 xstrdup_for_dump (callee
->name ()), callee
->order
,
498 else if (growth
> PARAM_VALUE (PARAM_EARLY_INLINING_INSNS
))
501 fprintf (dump_file
, " will not early inline: %s/%i->%s/%i, "
502 "growth %i exceeds --param early-inlining-insns\n",
503 xstrdup_for_dump (e
->caller
->name ()),
505 xstrdup_for_dump (callee
->name ()), callee
->order
,
509 else if ((n
= num_calls (callee
)) != 0
510 && growth
* (n
+ 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS
))
513 fprintf (dump_file
, " will not early inline: %s/%i->%s/%i, "
514 "growth %i exceeds --param early-inlining-insns "
515 "divided by number of calls\n",
516 xstrdup_for_dump (e
->caller
->name ()),
518 xstrdup_for_dump (callee
->name ()), callee
->order
,
526 /* Compute time of the edge->caller + edge->callee execution when inlining
530 compute_uninlined_call_time (struct inline_summary
*callee_info
,
531 struct cgraph_edge
*edge
)
533 sreal uninlined_call_time
= (sreal
)callee_info
->time
;
534 cgraph_node
*caller
= (edge
->caller
->global
.inlined_to
535 ? edge
->caller
->global
.inlined_to
538 if (edge
->count
&& caller
->count
)
539 uninlined_call_time
*= (sreal
)edge
->count
/ caller
->count
;
541 uninlined_call_time
*= cgraph_freq_base_rec
* edge
->frequency
;
543 uninlined_call_time
= uninlined_call_time
>> 11;
545 int caller_time
= inline_summaries
->get (caller
)->time
;
546 return uninlined_call_time
+ caller_time
;
549 /* Same as compute_uinlined_call_time but compute time when inlining
553 compute_inlined_call_time (struct cgraph_edge
*edge
,
556 cgraph_node
*caller
= (edge
->caller
->global
.inlined_to
557 ? edge
->caller
->global
.inlined_to
559 int caller_time
= inline_summaries
->get (caller
)->time
;
560 sreal time
= edge_time
;
562 if (edge
->count
&& caller
->count
)
563 time
*= (sreal
)edge
->count
/ caller
->count
;
565 time
*= cgraph_freq_base_rec
* edge
->frequency
;
569 /* This calculation should match one in ipa-inline-analysis.
570 FIXME: Once ipa-inline-analysis is converted to sreal this can be
572 time
-= (sreal
) ((gcov_type
) edge
->frequency
573 * inline_edge_summary (edge
)->call_stmt_time
574 * (INLINE_TIME_SCALE
/ CGRAPH_FREQ_BASE
)) / INLINE_TIME_SCALE
;
577 time
= ((sreal
) 1) >> 8;
578 gcc_checking_assert (time
>= 0);
582 /* Return true if the speedup for inlining E is bigger than
583 PARAM_MAX_INLINE_MIN_SPEEDUP. */
586 big_speedup_p (struct cgraph_edge
*e
)
588 sreal time
= compute_uninlined_call_time (inline_summaries
->get (e
->callee
),
590 sreal inlined_time
= compute_inlined_call_time (e
, estimate_edge_time (e
));
592 if (time
- inlined_time
593 > (sreal
) time
* PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP
)
599 /* Return true if we are interested in inlining small function.
600 When REPORT is true, report reason to dump file. */
603 want_inline_small_function_p (struct cgraph_edge
*e
, bool report
)
605 bool want_inline
= true;
606 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
608 if (DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
610 else if (!DECL_DECLARED_INLINE_P (callee
->decl
)
611 && !opt_for_fn (e
->caller
->decl
, flag_inline_small_functions
))
613 e
->inline_failed
= CIF_FUNCTION_NOT_INLINE_CANDIDATE
;
616 /* Do fast and conservative check if the function can be good
617 inline candidate. At the moment we allow inline hints to
618 promote non-inline functions to inline and we increase
619 MAX_INLINE_INSNS_SINGLE 16-fold for inline functions. */
620 else if ((!DECL_DECLARED_INLINE_P (callee
->decl
)
621 && (!e
->count
|| !e
->maybe_hot_p ()))
622 && inline_summaries
->get (callee
)->min_size
623 - inline_edge_summary (e
)->call_stmt_size
624 > MAX (MAX_INLINE_INSNS_SINGLE
, MAX_INLINE_INSNS_AUTO
))
626 e
->inline_failed
= CIF_MAX_INLINE_INSNS_AUTO_LIMIT
;
629 else if ((DECL_DECLARED_INLINE_P (callee
->decl
) || e
->count
)
630 && inline_summaries
->get (callee
)->min_size
631 - inline_edge_summary (e
)->call_stmt_size
632 > 16 * MAX_INLINE_INSNS_SINGLE
)
634 e
->inline_failed
= (DECL_DECLARED_INLINE_P (callee
->decl
)
635 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
636 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT
);
641 int growth
= estimate_edge_growth (e
);
642 inline_hints hints
= estimate_edge_hints (e
);
643 bool big_speedup
= big_speedup_p (e
);
647 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
648 hints suggests that inlining given function is very profitable. */
649 else if (DECL_DECLARED_INLINE_P (callee
->decl
)
650 && growth
>= MAX_INLINE_INSNS_SINGLE
652 && !(hints
& (INLINE_HINT_indirect_call
653 | INLINE_HINT_known_hot
654 | INLINE_HINT_loop_iterations
655 | INLINE_HINT_array_index
656 | INLINE_HINT_loop_stride
)))
657 || growth
>= MAX_INLINE_INSNS_SINGLE
* 16))
659 e
->inline_failed
= CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
;
662 else if (!DECL_DECLARED_INLINE_P (callee
->decl
)
663 && !opt_for_fn (e
->caller
->decl
, flag_inline_functions
))
665 /* growth_likely_positive is expensive, always test it last. */
666 if (growth
>= MAX_INLINE_INSNS_SINGLE
667 || growth_likely_positive (callee
, growth
))
669 e
->inline_failed
= CIF_NOT_DECLARED_INLINED
;
673 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
674 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
675 inlining given function is very profitable. */
676 else if (!DECL_DECLARED_INLINE_P (callee
->decl
)
678 && !(hints
& INLINE_HINT_known_hot
)
679 && growth
>= ((hints
& (INLINE_HINT_indirect_call
680 | INLINE_HINT_loop_iterations
681 | INLINE_HINT_array_index
682 | INLINE_HINT_loop_stride
))
683 ? MAX (MAX_INLINE_INSNS_AUTO
,
684 MAX_INLINE_INSNS_SINGLE
)
685 : MAX_INLINE_INSNS_AUTO
))
687 /* growth_likely_positive is expensive, always test it last. */
688 if (growth
>= MAX_INLINE_INSNS_SINGLE
689 || growth_likely_positive (callee
, growth
))
691 e
->inline_failed
= CIF_MAX_INLINE_INSNS_AUTO_LIMIT
;
695 /* If call is cold, do not inline when function body would grow. */
696 else if (!e
->maybe_hot_p ()
697 && (growth
>= MAX_INLINE_INSNS_SINGLE
698 || growth_likely_positive (callee
, growth
)))
700 e
->inline_failed
= CIF_UNLIKELY_CALL
;
704 if (!want_inline
&& report
)
705 report_inline_failed_reason (e
);
709 /* EDGE is self recursive edge.
710 We hand two cases - when function A is inlining into itself
711 or when function A is being inlined into another inliner copy of function
714 In first case OUTER_NODE points to the toplevel copy of A, while
715 in the second case OUTER_NODE points to the outermost copy of A in B.
717 In both cases we want to be extra selective since
718 inlining the call will just introduce new recursive calls to appear. */
721 want_inline_self_recursive_call_p (struct cgraph_edge
*edge
,
722 struct cgraph_node
*outer_node
,
726 char const *reason
= NULL
;
727 bool want_inline
= true;
728 int caller_freq
= CGRAPH_FREQ_BASE
;
729 int max_depth
= PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO
);
731 if (DECL_DECLARED_INLINE_P (edge
->caller
->decl
))
732 max_depth
= PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH
);
734 if (!edge
->maybe_hot_p ())
736 reason
= "recursive call is cold";
739 else if (max_count
&& !outer_node
->count
)
741 reason
= "not executed in profile";
744 else if (depth
> max_depth
)
746 reason
= "--param max-inline-recursive-depth exceeded.";
750 if (outer_node
->global
.inlined_to
)
751 caller_freq
= outer_node
->callers
->frequency
;
755 reason
= "function is inlined and unlikely";
761 /* Inlining of self recursive function into copy of itself within other function
762 is transformation similar to loop peeling.
764 Peeling is profitable if we can inline enough copies to make probability
765 of actual call to the self recursive function very small. Be sure that
766 the probability of recursion is small.
768 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
769 This way the expected number of recision is at most max_depth. */
772 int max_prob
= CGRAPH_FREQ_BASE
- ((CGRAPH_FREQ_BASE
+ max_depth
- 1)
775 for (i
= 1; i
< depth
; i
++)
776 max_prob
= max_prob
* max_prob
/ CGRAPH_FREQ_BASE
;
778 && (edge
->count
* CGRAPH_FREQ_BASE
/ outer_node
->count
781 reason
= "profile of recursive call is too large";
785 && (edge
->frequency
* CGRAPH_FREQ_BASE
/ caller_freq
788 reason
= "frequency of recursive call is too large";
792 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
793 depth is large. We reduce function call overhead and increase chances that
794 things fit in hardware return predictor.
796 Recursive inlining might however increase cost of stack frame setup
797 actually slowing down functions whose recursion tree is wide rather than
800 Deciding reliably on when to do recursive inlining without profile feedback
801 is tricky. For now we disable recursive inlining when probability of self
804 Recursive inlining of self recursive call within loop also results in large loop
805 depths that generally optimize badly. We may want to throttle down inlining
806 in those cases. In particular this seems to happen in one of libstdc++ rb tree
811 && (edge
->count
* 100 / outer_node
->count
812 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY
)))
814 reason
= "profile of recursive call is too small";
818 && (edge
->frequency
* 100 / caller_freq
819 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY
)))
821 reason
= "frequency of recursive call is too small";
825 if (!want_inline
&& dump_file
)
826 fprintf (dump_file
, " not inlining recursively: %s\n", reason
);
830 /* Return true when NODE has uninlinable caller;
831 set HAS_HOT_CALL if it has hot call.
832 Worker for cgraph_for_node_and_aliases. */
835 check_callers (struct cgraph_node
*node
, void *has_hot_call
)
837 struct cgraph_edge
*e
;
838 for (e
= node
->callers
; e
; e
= e
->next_caller
)
840 if (!opt_for_fn (e
->caller
->decl
, flag_inline_functions_called_once
))
842 if (!can_inline_edge_p (e
, true))
844 if (!(*(bool *)has_hot_call
) && e
->maybe_hot_p ())
845 *(bool *)has_hot_call
= true;
850 /* If NODE has a caller, return true. */
853 has_caller_p (struct cgraph_node
*node
, void *data ATTRIBUTE_UNUSED
)
860 /* Decide if inlining NODE would reduce unit size by eliminating
861 the offline copy of function.
862 When COLD is true the cold calls are considered, too. */
865 want_inline_function_to_all_callers_p (struct cgraph_node
*node
, bool cold
)
867 bool has_hot_call
= false;
869 if (node
->ultimate_alias_target () != node
)
871 /* Already inlined? */
872 if (node
->global
.inlined_to
)
874 /* Does it have callers? */
875 if (!node
->call_for_symbol_thunks_and_aliases (has_caller_p
, NULL
, true))
877 /* Inlining into all callers would increase size? */
878 if (estimate_growth (node
) > 0)
880 /* All inlines must be possible. */
881 if (node
->call_for_symbol_thunks_and_aliases (check_callers
, &has_hot_call
,
884 if (!cold
&& !has_hot_call
)
889 /* A cost model driving the inlining heuristics in a way so the edges with
890 smallest badness are inlined first. After each inlining is performed
891 the costs of all caller edges of nodes affected are recomputed so the
892 metrics may accurately depend on values such as number of inlinable callers
893 of the function or function body size. */
896 edge_badness (struct cgraph_edge
*edge
, bool dump
)
899 int growth
, edge_time
;
900 struct cgraph_node
*callee
= edge
->callee
->ultimate_alias_target ();
901 struct inline_summary
*callee_info
= inline_summaries
->get (callee
);
903 cgraph_node
*caller
= (edge
->caller
->global
.inlined_to
904 ? edge
->caller
->global
.inlined_to
907 growth
= estimate_edge_growth (edge
);
908 edge_time
= estimate_edge_time (edge
);
909 hints
= estimate_edge_hints (edge
);
910 gcc_checking_assert (edge_time
>= 0);
911 gcc_checking_assert (edge_time
<= callee_info
->time
);
912 gcc_checking_assert (growth
<= callee_info
->size
);
916 fprintf (dump_file
, " Badness calculation for %s/%i -> %s/%i\n",
917 xstrdup_for_dump (edge
->caller
->name ()),
919 xstrdup_for_dump (callee
->name ()),
920 edge
->callee
->order
);
921 fprintf (dump_file
, " size growth %i, time %i ",
924 dump_inline_hints (dump_file
, hints
);
925 if (big_speedup_p (edge
))
926 fprintf (dump_file
, " big_speedup");
927 fprintf (dump_file
, "\n");
930 /* Always prefer inlining saving code size. */
933 badness
= (sreal
) (-SREAL_MIN_SIG
+ growth
) << (SREAL_MAX_EXP
/ 256);
935 fprintf (dump_file
, " %f: Growth %d <= 0\n", badness
.to_double (),
938 /* Inlining into EXTERNAL functions is not going to change anything unless
939 they are themselves inlined. */
940 else if (DECL_EXTERNAL (caller
->decl
))
943 fprintf (dump_file
, " max: function is external\n");
944 return sreal::max ();
946 /* When profile is available. Compute badness as:
948 time_saved * caller_count
949 goodness = ---------------------------------
950 growth_of_caller * overall_growth
954 Again use negative value to make calls with profile appear hotter
957 else if (opt_for_fn (caller
->decl
, flag_guess_branch_prob
) || caller
->count
)
959 sreal numerator
, denominator
;
961 numerator
= (compute_uninlined_call_time (callee_info
, edge
)
962 - compute_inlined_call_time (edge
, edge_time
));
964 numerator
= ((sreal
) 1 >> 8);
966 numerator
*= caller
->count
;
967 else if (opt_for_fn (caller
->decl
, flag_branch_probabilities
))
968 numerator
= numerator
>> 11;
969 denominator
= growth
;
970 if (callee_info
->growth
> 0)
971 denominator
*= callee_info
->growth
;
973 badness
= - numerator
/ denominator
;
978 " %f: guessed profile. frequency %f, count %"PRId64
979 " caller count %"PRId64
980 " time w/o inlining %f, time w inlining %f"
981 " overall growth %i (current) %i (original)\n",
982 badness
.to_double (), (double)edge
->frequency
/ CGRAPH_FREQ_BASE
,
983 edge
->count
, caller
->count
,
984 compute_uninlined_call_time (callee_info
, edge
).to_double (),
985 compute_inlined_call_time (edge
, edge_time
).to_double (),
986 estimate_growth (callee
),
987 callee_info
->growth
);
990 /* When function local profile is not available or it does not give
991 useful information (ie frequency is zero), base the cost on
992 loop nest and overall size growth, so we optimize for overall number
993 of functions fully inlined in program. */
996 int nest
= MIN (inline_edge_summary (edge
)->loop_depth
, 8);
999 /* Decrease badness if call is nested. */
1001 badness
= badness
>> nest
;
1003 badness
= badness
<< nest
;
1005 fprintf (dump_file
, " %f: no profile. nest %i\n", badness
.to_double (),
1008 gcc_checking_assert (badness
!= 0);
1010 if (edge
->recursive_p ())
1011 badness
= badness
.shift (badness
> 0 ? 4 : -4);
1012 if ((hints
& (INLINE_HINT_indirect_call
1013 | INLINE_HINT_loop_iterations
1014 | INLINE_HINT_array_index
1015 | INLINE_HINT_loop_stride
))
1016 || callee_info
->growth
<= 0)
1017 badness
= badness
.shift (badness
> 0 ? -2 : 2);
1018 if (hints
& (INLINE_HINT_same_scc
))
1019 badness
= badness
.shift (badness
> 0 ? 3 : -3);
1020 else if (hints
& (INLINE_HINT_in_scc
))
1021 badness
= badness
.shift (badness
> 0 ? 2 : -2);
1022 else if (hints
& (INLINE_HINT_cross_module
))
1023 badness
= badness
.shift (badness
> 0 ? 1 : -1);
1024 if (DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
1025 badness
= badness
.shift (badness
> 0 ? -4 : 4);
1026 else if ((hints
& INLINE_HINT_declared_inline
))
1027 badness
= badness
.shift (badness
> 0 ? -3 : 3);
1029 fprintf (dump_file
, " Adjusted by hints %f\n", badness
.to_double ());
1033 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1035 update_edge_key (edge_heap_t
*heap
, struct cgraph_edge
*edge
)
1037 sreal badness
= edge_badness (edge
, false);
1040 edge_heap_node_t
*n
= (edge_heap_node_t
*) edge
->aux
;
1041 gcc_checking_assert (n
->get_data () == edge
);
1043 /* fibonacci_heap::replace_key does busy updating of the
1044 heap that is unnecesarily expensive.
1045 We do lazy increases: after extracting minimum if the key
1046 turns out to be out of date, it is re-inserted into heap
1047 with correct value. */
1048 if (badness
< n
->get_key ())
1050 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1053 " decreasing badness %s/%i -> %s/%i, %f"
1055 xstrdup_for_dump (edge
->caller
->name ()),
1056 edge
->caller
->order
,
1057 xstrdup_for_dump (edge
->callee
->name ()),
1058 edge
->callee
->order
,
1059 n
->get_key ().to_double (),
1060 badness
.to_double ());
1062 heap
->decrease_key (n
, badness
);
1067 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1070 " enqueuing call %s/%i -> %s/%i, badness %f\n",
1071 xstrdup_for_dump (edge
->caller
->name ()),
1072 edge
->caller
->order
,
1073 xstrdup_for_dump (edge
->callee
->name ()),
1074 edge
->callee
->order
,
1075 badness
.to_double ());
1077 edge
->aux
= heap
->insert (badness
, edge
);
1082 /* NODE was inlined.
1083 All caller edges needs to be resetted because
1084 size estimates change. Similarly callees needs reset
1085 because better context may be known. */
1088 reset_edge_caches (struct cgraph_node
*node
)
1090 struct cgraph_edge
*edge
;
1091 struct cgraph_edge
*e
= node
->callees
;
1092 struct cgraph_node
*where
= node
;
1093 struct ipa_ref
*ref
;
1095 if (where
->global
.inlined_to
)
1096 where
= where
->global
.inlined_to
;
1098 for (edge
= where
->callers
; edge
; edge
= edge
->next_caller
)
1099 if (edge
->inline_failed
)
1100 reset_edge_growth_cache (edge
);
1102 FOR_EACH_ALIAS (where
, ref
)
1103 reset_edge_caches (dyn_cast
<cgraph_node
*> (ref
->referring
));
1109 if (!e
->inline_failed
&& e
->callee
->callees
)
1110 e
= e
->callee
->callees
;
1113 if (e
->inline_failed
)
1114 reset_edge_growth_cache (e
);
1121 if (e
->caller
== node
)
1123 e
= e
->caller
->callers
;
1125 while (!e
->next_callee
);
1131 /* Recompute HEAP nodes for each of caller of NODE.
1132 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1133 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1134 it is inlinable. Otherwise check all edges. */
1137 update_caller_keys (edge_heap_t
*heap
, struct cgraph_node
*node
,
1138 bitmap updated_nodes
,
1139 struct cgraph_edge
*check_inlinablity_for
)
1141 struct cgraph_edge
*edge
;
1142 struct ipa_ref
*ref
;
1144 if ((!node
->alias
&& !inline_summaries
->get (node
)->inlinable
)
1145 || node
->global
.inlined_to
)
1147 if (!bitmap_set_bit (updated_nodes
, node
->uid
))
1150 FOR_EACH_ALIAS (node
, ref
)
1152 struct cgraph_node
*alias
= dyn_cast
<cgraph_node
*> (ref
->referring
);
1153 update_caller_keys (heap
, alias
, updated_nodes
, check_inlinablity_for
);
1156 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
1157 if (edge
->inline_failed
)
1159 if (!check_inlinablity_for
1160 || check_inlinablity_for
== edge
)
1162 if (can_inline_edge_p (edge
, false)
1163 && want_inline_small_function_p (edge
, false))
1164 update_edge_key (heap
, edge
);
1167 report_inline_failed_reason (edge
);
1168 heap
->delete_node ((edge_heap_node_t
*) edge
->aux
);
1173 update_edge_key (heap
, edge
);
1177 /* Recompute HEAP nodes for each uninlined call in NODE.
1178 This is used when we know that edge badnesses are going only to increase
1179 (we introduced new call site) and thus all we need is to insert newly
1180 created edges into heap. */
1183 update_callee_keys (edge_heap_t
*heap
, struct cgraph_node
*node
,
1184 bitmap updated_nodes
)
1186 struct cgraph_edge
*e
= node
->callees
;
1191 if (!e
->inline_failed
&& e
->callee
->callees
)
1192 e
= e
->callee
->callees
;
1195 enum availability avail
;
1196 struct cgraph_node
*callee
;
1197 /* We do not reset callee growth cache here. Since we added a new call,
1198 growth chould have just increased and consequentely badness metric
1199 don't need updating. */
1200 if (e
->inline_failed
1201 && (callee
= e
->callee
->ultimate_alias_target (&avail
))
1202 && inline_summaries
->get (callee
)->inlinable
1203 && avail
>= AVAIL_AVAILABLE
1204 && !bitmap_bit_p (updated_nodes
, callee
->uid
))
1206 if (can_inline_edge_p (e
, false)
1207 && want_inline_small_function_p (e
, false))
1208 update_edge_key (heap
, e
);
1211 report_inline_failed_reason (e
);
1212 heap
->delete_node ((edge_heap_node_t
*) e
->aux
);
1222 if (e
->caller
== node
)
1224 e
= e
->caller
->callers
;
1226 while (!e
->next_callee
);
1232 /* Enqueue all recursive calls from NODE into priority queue depending on
1233 how likely we want to recursively inline the call. */
1236 lookup_recursive_calls (struct cgraph_node
*node
, struct cgraph_node
*where
,
1239 struct cgraph_edge
*e
;
1240 enum availability avail
;
1242 for (e
= where
->callees
; e
; e
= e
->next_callee
)
1243 if (e
->callee
== node
1244 || (e
->callee
->ultimate_alias_target (&avail
) == node
1245 && avail
> AVAIL_INTERPOSABLE
))
1247 /* When profile feedback is available, prioritize by expected number
1249 heap
->insert (!max_count
? -e
->frequency
1250 : -(e
->count
/ ((max_count
+ (1<<24) - 1) / (1<<24))),
1253 for (e
= where
->callees
; e
; e
= e
->next_callee
)
1254 if (!e
->inline_failed
)
1255 lookup_recursive_calls (node
, e
->callee
, heap
);
1258 /* Decide on recursive inlining: in the case function has recursive calls,
1259 inline until body size reaches given argument. If any new indirect edges
1260 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1264 recursive_inlining (struct cgraph_edge
*edge
,
1265 vec
<cgraph_edge
*> *new_edges
)
1267 int limit
= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO
);
1268 edge_heap_t
heap (sreal::min ());
1269 struct cgraph_node
*node
;
1270 struct cgraph_edge
*e
;
1271 struct cgraph_node
*master_clone
= NULL
, *next
;
1275 node
= edge
->caller
;
1276 if (node
->global
.inlined_to
)
1277 node
= node
->global
.inlined_to
;
1279 if (DECL_DECLARED_INLINE_P (node
->decl
))
1280 limit
= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE
);
1282 /* Make sure that function is small enough to be considered for inlining. */
1283 if (estimate_size_after_inlining (node
, edge
) >= limit
)
1285 lookup_recursive_calls (node
, node
, &heap
);
1291 " Performing recursive inlining on %s\n",
1294 /* Do the inlining and update list of recursive call during process. */
1295 while (!heap
.empty ())
1297 struct cgraph_edge
*curr
= heap
.extract_min ();
1298 struct cgraph_node
*cnode
, *dest
= curr
->callee
;
1300 if (!can_inline_edge_p (curr
, true))
1303 /* MASTER_CLONE is produced in the case we already started modified
1304 the function. Be sure to redirect edge to the original body before
1305 estimating growths otherwise we will be seeing growths after inlining
1306 the already modified body. */
1309 curr
->redirect_callee (master_clone
);
1310 reset_edge_growth_cache (curr
);
1313 if (estimate_size_after_inlining (node
, curr
) > limit
)
1315 curr
->redirect_callee (dest
);
1316 reset_edge_growth_cache (curr
);
1321 for (cnode
= curr
->caller
;
1322 cnode
->global
.inlined_to
; cnode
= cnode
->callers
->caller
)
1324 == curr
->callee
->ultimate_alias_target ()->decl
)
1327 if (!want_inline_self_recursive_call_p (curr
, node
, false, depth
))
1329 curr
->redirect_callee (dest
);
1330 reset_edge_growth_cache (curr
);
1337 " Inlining call of depth %i", depth
);
1340 fprintf (dump_file
, " called approx. %.2f times per call",
1341 (double)curr
->count
/ node
->count
);
1343 fprintf (dump_file
, "\n");
1347 /* We need original clone to copy around. */
1348 master_clone
= node
->create_clone (node
->decl
, node
->count
,
1349 CGRAPH_FREQ_BASE
, false, vNULL
,
1351 for (e
= master_clone
->callees
; e
; e
= e
->next_callee
)
1352 if (!e
->inline_failed
)
1353 clone_inlined_nodes (e
, true, false, NULL
, CGRAPH_FREQ_BASE
);
1354 curr
->redirect_callee (master_clone
);
1355 reset_edge_growth_cache (curr
);
1358 inline_call (curr
, false, new_edges
, &overall_size
, true);
1359 lookup_recursive_calls (node
, curr
->callee
, &heap
);
1363 if (!heap
.empty () && dump_file
)
1364 fprintf (dump_file
, " Recursive inlining growth limit met.\n");
1371 "\n Inlined %i times, "
1372 "body grown from size %i to %i, time %i to %i\n", n
,
1373 inline_summaries
->get (master_clone
)->size
, inline_summaries
->get (node
)->size
,
1374 inline_summaries
->get (master_clone
)->time
, inline_summaries
->get (node
)->time
);
1376 /* Remove master clone we used for inlining. We rely that clones inlined
1377 into master clone gets queued just before master clone so we don't
1379 for (node
= symtab
->first_function (); node
!= master_clone
;
1382 next
= symtab
->next_function (node
);
1383 if (node
->global
.inlined_to
== master_clone
)
1386 master_clone
->remove ();
1391 /* Given whole compilation unit estimate of INSNS, compute how large we can
1392 allow the unit to grow. */
1395 compute_max_insns (int insns
)
1397 int max_insns
= insns
;
1398 if (max_insns
< PARAM_VALUE (PARAM_LARGE_UNIT_INSNS
))
1399 max_insns
= PARAM_VALUE (PARAM_LARGE_UNIT_INSNS
);
1401 return ((int64_t) max_insns
1402 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH
)) / 100);
1406 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1409 add_new_edges_to_heap (edge_heap_t
*heap
, vec
<cgraph_edge
*> new_edges
)
1411 while (new_edges
.length () > 0)
1413 struct cgraph_edge
*edge
= new_edges
.pop ();
1415 gcc_assert (!edge
->aux
);
1416 if (edge
->inline_failed
1417 && can_inline_edge_p (edge
, true)
1418 && want_inline_small_function_p (edge
, true))
1419 edge
->aux
= heap
->insert (edge_badness (edge
, false), edge
);
1423 /* Remove EDGE from the fibheap. */
1426 heap_edge_removal_hook (struct cgraph_edge
*e
, void *data
)
1430 ((edge_heap_t
*)data
)->delete_node ((edge_heap_node_t
*)e
->aux
);
1435 /* Return true if speculation of edge E seems useful.
1436 If ANTICIPATE_INLINING is true, be conservative and hope that E
1440 speculation_useful_p (struct cgraph_edge
*e
, bool anticipate_inlining
)
1442 enum availability avail
;
1443 struct cgraph_node
*target
= e
->callee
->ultimate_alias_target (&avail
);
1444 struct cgraph_edge
*direct
, *indirect
;
1445 struct ipa_ref
*ref
;
1447 gcc_assert (e
->speculative
&& !e
->indirect_unknown_callee
);
1449 if (!e
->maybe_hot_p ())
1452 /* See if IP optimizations found something potentially useful about the
1453 function. For now we look only for CONST/PURE flags. Almost everything
1454 else we propagate is useless. */
1455 if (avail
>= AVAIL_AVAILABLE
)
1457 int ecf_flags
= flags_from_decl_or_type (target
->decl
);
1458 if (ecf_flags
& ECF_CONST
)
1460 e
->speculative_call_info (direct
, indirect
, ref
);
1461 if (!(indirect
->indirect_info
->ecf_flags
& ECF_CONST
))
1464 else if (ecf_flags
& ECF_PURE
)
1466 e
->speculative_call_info (direct
, indirect
, ref
);
1467 if (!(indirect
->indirect_info
->ecf_flags
& ECF_PURE
))
1471 /* If we did not managed to inline the function nor redirect
1472 to an ipa-cp clone (that are seen by having local flag set),
1473 it is probably pointless to inline it unless hardware is missing
1474 indirect call predictor. */
1475 if (!anticipate_inlining
&& e
->inline_failed
&& !target
->local
.local
)
1477 /* For overwritable targets there is not much to do. */
1478 if (e
->inline_failed
&& !can_inline_edge_p (e
, false, true))
1480 /* OK, speculation seems interesting. */
1484 /* We know that EDGE is not going to be inlined.
1485 See if we can remove speculation. */
1488 resolve_noninline_speculation (edge_heap_t
*edge_heap
, struct cgraph_edge
*edge
)
1490 if (edge
->speculative
&& !speculation_useful_p (edge
, false))
1492 struct cgraph_node
*node
= edge
->caller
;
1493 struct cgraph_node
*where
= node
->global
.inlined_to
1494 ? node
->global
.inlined_to
: node
;
1495 bitmap updated_nodes
= BITMAP_ALLOC (NULL
);
1497 spec_rem
+= edge
->count
;
1498 edge
->resolve_speculation ();
1499 reset_edge_caches (where
);
1500 inline_update_overall_summary (where
);
1501 update_caller_keys (edge_heap
, where
,
1502 updated_nodes
, NULL
);
1503 update_callee_keys (edge_heap
, where
,
1505 BITMAP_FREE (updated_nodes
);
1509 /* We use greedy algorithm for inlining of small functions:
1510 All inline candidates are put into prioritized heap ordered in
1513 The inlining of small functions is bounded by unit growth parameters. */
1516 inline_small_functions (void)
1518 struct cgraph_node
*node
;
1519 struct cgraph_edge
*edge
;
1520 edge_heap_t
edge_heap (sreal::min ());
1521 bitmap updated_nodes
= BITMAP_ALLOC (NULL
);
1522 int min_size
, max_size
;
1523 auto_vec
<cgraph_edge
*> new_indirect_edges
;
1524 int initial_size
= 0;
1525 struct cgraph_node
**order
= XCNEWVEC (cgraph_node
*, symtab
->cgraph_count
);
1526 struct cgraph_edge_hook_list
*edge_removal_hook_holder
;
1527 new_indirect_edges
.create (8);
1529 edge_removal_hook_holder
1530 = symtab
->add_edge_removal_hook (&heap_edge_removal_hook
, &edge_heap
);
1532 /* Compute overall unit size and other global parameters used by badness
1536 ipa_reduced_postorder (order
, true, true, NULL
);
1539 FOR_EACH_DEFINED_FUNCTION (node
)
1540 if (!node
->global
.inlined_to
)
1542 if (node
->has_gimple_body_p ()
1543 || node
->thunk
.thunk_p
)
1545 struct inline_summary
*info
= inline_summaries
->get (node
);
1546 struct ipa_dfs_info
*dfs
= (struct ipa_dfs_info
*) node
->aux
;
1548 /* Do not account external functions, they will be optimized out
1549 if not inlined. Also only count the non-cold portion of program. */
1550 if (!DECL_EXTERNAL (node
->decl
)
1551 && !opt_for_fn (node
->decl
, optimize_size
)
1552 && node
->frequency
!= NODE_FREQUENCY_UNLIKELY_EXECUTED
)
1553 initial_size
+= info
->size
;
1554 info
->growth
= estimate_growth (node
);
1555 if (dfs
&& dfs
->next_cycle
)
1557 struct cgraph_node
*n2
;
1558 int id
= dfs
->scc_no
+ 1;
1560 n2
= ((struct ipa_dfs_info
*) node
->aux
)->next_cycle
)
1562 struct inline_summary
*info2
= inline_summaries
->get (n2
);
1570 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
1571 if (max_count
< edge
->count
)
1572 max_count
= edge
->count
;
1574 ipa_free_postorder_info ();
1575 initialize_growth_caches ();
1579 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1582 overall_size
= initial_size
;
1583 max_size
= compute_max_insns (overall_size
);
1584 min_size
= overall_size
;
1586 /* Populate the heap with all edges we might inline. */
1588 FOR_EACH_DEFINED_FUNCTION (node
)
1590 bool update
= false;
1591 struct cgraph_edge
*next
;
1594 fprintf (dump_file
, "Enqueueing calls in %s/%i.\n",
1595 node
->name (), node
->order
);
1597 for (edge
= node
->callees
; edge
; edge
= next
)
1599 next
= edge
->next_callee
;
1600 if (edge
->inline_failed
1602 && can_inline_edge_p (edge
, true)
1603 && want_inline_small_function_p (edge
, true)
1604 && edge
->inline_failed
)
1606 gcc_assert (!edge
->aux
);
1607 update_edge_key (&edge_heap
, edge
);
1609 if (edge
->speculative
&& !speculation_useful_p (edge
, edge
->aux
!= NULL
))
1611 edge
->resolve_speculation ();
1617 struct cgraph_node
*where
= node
->global
.inlined_to
1618 ? node
->global
.inlined_to
: node
;
1619 inline_update_overall_summary (where
);
1620 reset_edge_caches (where
);
1621 update_caller_keys (&edge_heap
, where
,
1622 updated_nodes
, NULL
);
1623 update_callee_keys (&edge_heap
, where
,
1625 bitmap_clear (updated_nodes
);
1629 gcc_assert (in_lto_p
1631 || (profile_info
&& flag_branch_probabilities
));
1633 while (!edge_heap
.empty ())
1635 int old_size
= overall_size
;
1636 struct cgraph_node
*where
, *callee
;
1637 sreal badness
= edge_heap
.min_key ();
1638 sreal current_badness
;
1641 edge
= edge_heap
.extract_min ();
1642 gcc_assert (edge
->aux
);
1644 if (!edge
->inline_failed
|| !edge
->callee
->analyzed
)
1647 #ifdef ENABLE_CHECKING
1648 /* Be sure that caches are maintained consistent. */
1649 sreal cached_badness
= edge_badness (edge
, false);
1651 int old_size_est
= estimate_edge_size (edge
);
1652 int old_time_est
= estimate_edge_time (edge
);
1653 int old_hints_est
= estimate_edge_hints (edge
);
1655 reset_edge_growth_cache (edge
);
1656 gcc_assert (old_size_est
== estimate_edge_size (edge
));
1657 gcc_assert (old_time_est
== estimate_edge_time (edge
));
1660 gcc_assert (old_hints_est == estimate_edge_hints (edge));
1662 fails with profile feedback because some hints depends on
1663 maybe_hot_edge_p predicate and because callee gets inlined to other
1664 calls, the edge may become cold.
1665 This ought to be fixed by computing relative probabilities
1666 for given invocation but that will be better done once whole
1667 code is converted to sreals. Disable for now and revert to "wrong"
1668 value so enable/disable checking paths agree. */
1669 edge_growth_cache
[edge
->uid
].hints
= old_hints_est
+ 1;
1671 /* When updating the edge costs, we only decrease badness in the keys.
1672 Increases of badness are handled lazilly; when we see key with out
1673 of date value on it, we re-insert it now. */
1674 current_badness
= edge_badness (edge
, false);
1675 /* Disable checking for profile because roundoff errors may cause slight
1676 deviations in the order. */
1677 gcc_assert (max_count
|| cached_badness
== current_badness
);
1678 gcc_assert (current_badness
>= badness
);
1680 current_badness
= edge_badness (edge
, false);
1682 if (current_badness
!= badness
)
1684 if (edge_heap
.min () && badness
> edge_heap
.min_key ())
1686 edge
->aux
= edge_heap
.insert (current_badness
, edge
);
1690 badness
= current_badness
;
1693 if (!can_inline_edge_p (edge
, true))
1695 resolve_noninline_speculation (&edge_heap
, edge
);
1699 callee
= edge
->callee
->ultimate_alias_target ();
1700 growth
= estimate_edge_growth (edge
);
1704 "\nConsidering %s/%i with %i size\n",
1705 callee
->name (), callee
->order
,
1706 inline_summaries
->get (callee
)->size
);
1708 " to be inlined into %s/%i in %s:%i\n"
1709 " Estimated badness is %f, frequency %.2f.\n",
1710 edge
->caller
->name (), edge
->caller
->order
,
1712 ? gimple_filename ((const_gimple
) edge
->call_stmt
)
1715 ? gimple_lineno ((const_gimple
) edge
->call_stmt
)
1717 badness
.to_double (),
1718 edge
->frequency
/ (double)CGRAPH_FREQ_BASE
);
1720 fprintf (dump_file
," Called %"PRId64
"x\n",
1722 if (dump_flags
& TDF_DETAILS
)
1723 edge_badness (edge
, true);
1726 if (overall_size
+ growth
> max_size
1727 && !DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
1729 edge
->inline_failed
= CIF_INLINE_UNIT_GROWTH_LIMIT
;
1730 report_inline_failed_reason (edge
);
1731 resolve_noninline_speculation (&edge_heap
, edge
);
1735 if (!want_inline_small_function_p (edge
, true))
1737 resolve_noninline_speculation (&edge_heap
, edge
);
1741 /* Heuristics for inlining small functions work poorly for
1742 recursive calls where we do effects similar to loop unrolling.
1743 When inlining such edge seems profitable, leave decision on
1744 specific inliner. */
1745 if (edge
->recursive_p ())
1747 where
= edge
->caller
;
1748 if (where
->global
.inlined_to
)
1749 where
= where
->global
.inlined_to
;
1750 if (!recursive_inlining (edge
,
1751 opt_for_fn (edge
->caller
->decl
,
1752 flag_indirect_inlining
)
1753 ? &new_indirect_edges
: NULL
))
1755 edge
->inline_failed
= CIF_RECURSIVE_INLINING
;
1756 resolve_noninline_speculation (&edge_heap
, edge
);
1759 reset_edge_caches (where
);
1760 /* Recursive inliner inlines all recursive calls of the function
1761 at once. Consequently we need to update all callee keys. */
1762 if (opt_for_fn (edge
->caller
->decl
, flag_indirect_inlining
))
1763 add_new_edges_to_heap (&edge_heap
, new_indirect_edges
);
1764 update_callee_keys (&edge_heap
, where
, updated_nodes
);
1765 bitmap_clear (updated_nodes
);
1769 struct cgraph_node
*outer_node
= NULL
;
1772 /* Consider the case where self recursive function A is inlined
1773 into B. This is desired optimization in some cases, since it
1774 leads to effect similar of loop peeling and we might completely
1775 optimize out the recursive call. However we must be extra
1778 where
= edge
->caller
;
1779 while (where
->global
.inlined_to
)
1781 if (where
->decl
== callee
->decl
)
1782 outer_node
= where
, depth
++;
1783 where
= where
->callers
->caller
;
1786 && !want_inline_self_recursive_call_p (edge
, outer_node
,
1790 = (DECL_DISREGARD_INLINE_LIMITS (edge
->callee
->decl
)
1791 ? CIF_RECURSIVE_INLINING
: CIF_UNSPECIFIED
);
1792 resolve_noninline_speculation (&edge_heap
, edge
);
1795 else if (depth
&& dump_file
)
1796 fprintf (dump_file
, " Peeling recursion with depth %i\n", depth
);
1798 gcc_checking_assert (!callee
->global
.inlined_to
);
1799 inline_call (edge
, true, &new_indirect_edges
, &overall_size
, true);
1800 add_new_edges_to_heap (&edge_heap
, new_indirect_edges
);
1802 reset_edge_caches (edge
->callee
->function_symbol ());
1804 update_callee_keys (&edge_heap
, where
, updated_nodes
);
1806 where
= edge
->caller
;
1807 if (where
->global
.inlined_to
)
1808 where
= where
->global
.inlined_to
;
1810 /* Our profitability metric can depend on local properties
1811 such as number of inlinable calls and size of the function body.
1812 After inlining these properties might change for the function we
1813 inlined into (since it's body size changed) and for the functions
1814 called by function we inlined (since number of it inlinable callers
1816 update_caller_keys (&edge_heap
, where
, updated_nodes
, NULL
);
1817 /* Offline copy count has possibly changed, recompute if profile is
1821 struct cgraph_node
*n
= cgraph_node::get (edge
->callee
->decl
);
1822 if (n
!= edge
->callee
&& n
->analyzed
)
1823 update_callee_keys (&edge_heap
, n
, updated_nodes
);
1825 bitmap_clear (updated_nodes
);
1830 " Inlined into %s which now has time %i and size %i,"
1831 "net change of %+i.\n",
1832 edge
->caller
->name (),
1833 inline_summaries
->get (edge
->caller
)->time
,
1834 inline_summaries
->get (edge
->caller
)->size
,
1835 overall_size
- old_size
);
1837 if (min_size
> overall_size
)
1839 min_size
= overall_size
;
1840 max_size
= compute_max_insns (min_size
);
1843 fprintf (dump_file
, "New minimal size reached: %i\n", min_size
);
1847 free_growth_caches ();
1850 "Unit growth for small function inlining: %i->%i (%i%%)\n",
1851 initial_size
, overall_size
,
1852 initial_size
? overall_size
* 100 / (initial_size
) - 100: 0);
1853 BITMAP_FREE (updated_nodes
);
1854 symtab
->remove_edge_removal_hook (edge_removal_hook_holder
);
1857 /* Flatten NODE. Performed both during early inlining and
1858 at IPA inlining time. */
1861 flatten_function (struct cgraph_node
*node
, bool early
)
1863 struct cgraph_edge
*e
;
1865 /* We shouldn't be called recursively when we are being processed. */
1866 gcc_assert (node
->aux
== NULL
);
1868 node
->aux
= (void *) node
;
1870 for (e
= node
->callees
; e
; e
= e
->next_callee
)
1872 struct cgraph_node
*orig_callee
;
1873 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
1875 /* We've hit cycle? It is time to give up. */
1880 "Not inlining %s into %s to avoid cycle.\n",
1881 xstrdup_for_dump (callee
->name ()),
1882 xstrdup_for_dump (e
->caller
->name ()));
1883 e
->inline_failed
= CIF_RECURSIVE_INLINING
;
1887 /* When the edge is already inlined, we just need to recurse into
1888 it in order to fully flatten the leaves. */
1889 if (!e
->inline_failed
)
1891 flatten_function (callee
, early
);
1895 /* Flatten attribute needs to be processed during late inlining. For
1896 extra code quality we however do flattening during early optimization,
1899 ? !can_inline_edge_p (e
, true)
1900 : !can_early_inline_edge_p (e
))
1903 if (e
->recursive_p ())
1906 fprintf (dump_file
, "Not inlining: recursive call.\n");
1910 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node
->decl
))
1911 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee
->decl
)))
1914 fprintf (dump_file
, "Not inlining: SSA form does not match.\n");
1918 /* Inline the edge and flatten the inline clone. Avoid
1919 recursing through the original node if the node was cloned. */
1921 fprintf (dump_file
, " Inlining %s into %s.\n",
1922 xstrdup_for_dump (callee
->name ()),
1923 xstrdup_for_dump (e
->caller
->name ()));
1924 orig_callee
= callee
;
1925 inline_call (e
, true, NULL
, NULL
, false);
1926 if (e
->callee
!= orig_callee
)
1927 orig_callee
->aux
= (void *) node
;
1928 flatten_function (e
->callee
, early
);
1929 if (e
->callee
!= orig_callee
)
1930 orig_callee
->aux
= NULL
;
1934 if (!node
->global
.inlined_to
)
1935 inline_update_overall_summary (node
);
1938 /* Count number of callers of NODE and store it into DATA (that
1939 points to int. Worker for cgraph_for_node_and_aliases. */
1942 sum_callers (struct cgraph_node
*node
, void *data
)
1944 struct cgraph_edge
*e
;
1945 int *num_calls
= (int *)data
;
1947 for (e
= node
->callers
; e
; e
= e
->next_caller
)
1952 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
1953 DATA points to number of calls originally found so we avoid infinite
1957 inline_to_all_callers (struct cgraph_node
*node
, void *data
)
1959 int *num_calls
= (int *)data
;
1960 bool callee_removed
= false;
1962 while (node
->callers
&& !node
->global
.inlined_to
)
1964 struct cgraph_node
*caller
= node
->callers
->caller
;
1969 "\nInlining %s size %i.\n",
1971 inline_summaries
->get (node
)->size
);
1973 " Called once from %s %i insns.\n",
1974 node
->callers
->caller
->name (),
1975 inline_summaries
->get (node
->callers
->caller
)->size
);
1978 inline_call (node
->callers
, true, NULL
, NULL
, true, &callee_removed
);
1981 " Inlined into %s which now has %i size\n",
1983 inline_summaries
->get (caller
)->size
);
1984 if (!(*num_calls
)--)
1987 fprintf (dump_file
, "New calls found; giving up.\n");
1988 return callee_removed
;
1996 /* Output overall time estimate. */
1998 dump_overall_stats (void)
2000 int64_t sum_weighted
= 0, sum
= 0;
2001 struct cgraph_node
*node
;
2003 FOR_EACH_DEFINED_FUNCTION (node
)
2004 if (!node
->global
.inlined_to
2007 int time
= inline_summaries
->get (node
)->time
;
2009 sum_weighted
+= time
* node
->count
;
2011 fprintf (dump_file
, "Overall time estimate: "
2012 "%"PRId64
" weighted by profile: "
2013 "%"PRId64
"\n", sum
, sum_weighted
);
2016 /* Output some useful stats about inlining. */
2019 dump_inline_stats (void)
2021 int64_t inlined_cnt
= 0, inlined_indir_cnt
= 0;
2022 int64_t inlined_virt_cnt
= 0, inlined_virt_indir_cnt
= 0;
2023 int64_t noninlined_cnt
= 0, noninlined_indir_cnt
= 0;
2024 int64_t noninlined_virt_cnt
= 0, noninlined_virt_indir_cnt
= 0;
2025 int64_t inlined_speculative
= 0, inlined_speculative_ply
= 0;
2026 int64_t indirect_poly_cnt
= 0, indirect_cnt
= 0;
2027 int64_t reason
[CIF_N_REASONS
][3];
2029 struct cgraph_node
*node
;
2031 memset (reason
, 0, sizeof (reason
));
2032 FOR_EACH_DEFINED_FUNCTION (node
)
2034 struct cgraph_edge
*e
;
2035 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2037 if (e
->inline_failed
)
2039 reason
[(int) e
->inline_failed
][0] += e
->count
;
2040 reason
[(int) e
->inline_failed
][1] += e
->frequency
;
2041 reason
[(int) e
->inline_failed
][2] ++;
2042 if (DECL_VIRTUAL_P (e
->callee
->decl
))
2044 if (e
->indirect_inlining_edge
)
2045 noninlined_virt_indir_cnt
+= e
->count
;
2047 noninlined_virt_cnt
+= e
->count
;
2051 if (e
->indirect_inlining_edge
)
2052 noninlined_indir_cnt
+= e
->count
;
2054 noninlined_cnt
+= e
->count
;
2061 if (DECL_VIRTUAL_P (e
->callee
->decl
))
2062 inlined_speculative_ply
+= e
->count
;
2064 inlined_speculative
+= e
->count
;
2066 else if (DECL_VIRTUAL_P (e
->callee
->decl
))
2068 if (e
->indirect_inlining_edge
)
2069 inlined_virt_indir_cnt
+= e
->count
;
2071 inlined_virt_cnt
+= e
->count
;
2075 if (e
->indirect_inlining_edge
)
2076 inlined_indir_cnt
+= e
->count
;
2078 inlined_cnt
+= e
->count
;
2082 for (e
= node
->indirect_calls
; e
; e
= e
->next_callee
)
2083 if (e
->indirect_info
->polymorphic
)
2084 indirect_poly_cnt
+= e
->count
;
2086 indirect_cnt
+= e
->count
;
2091 "Inlined %"PRId64
" + speculative "
2092 "%"PRId64
" + speculative polymorphic "
2093 "%"PRId64
" + previously indirect "
2094 "%"PRId64
" + virtual "
2095 "%"PRId64
" + virtual and previously indirect "
2096 "%"PRId64
"\n" "Not inlined "
2097 "%"PRId64
" + previously indirect "
2098 "%"PRId64
" + virtual "
2099 "%"PRId64
" + virtual and previously indirect "
2100 "%"PRId64
" + stil indirect "
2101 "%"PRId64
" + still indirect polymorphic "
2102 "%"PRId64
"\n", inlined_cnt
,
2103 inlined_speculative
, inlined_speculative_ply
,
2104 inlined_indir_cnt
, inlined_virt_cnt
, inlined_virt_indir_cnt
,
2105 noninlined_cnt
, noninlined_indir_cnt
, noninlined_virt_cnt
,
2106 noninlined_virt_indir_cnt
, indirect_cnt
, indirect_poly_cnt
);
2108 "Removed speculations %"PRId64
"\n",
2111 dump_overall_stats ();
2112 fprintf (dump_file
, "\nWhy inlining failed?\n");
2113 for (i
= 0; i
< CIF_N_REASONS
; i
++)
2115 fprintf (dump_file
, "%-50s: %8i calls, %8i freq, %"PRId64
" count\n",
2116 cgraph_inline_failed_string ((cgraph_inline_failed_t
) i
),
2117 (int) reason
[i
][2], (int) reason
[i
][1], reason
[i
][0]);
2120 /* Decide on the inlining. We do so in the topological order to avoid
2121 expenses on updating data structures. */
2126 struct cgraph_node
*node
;
2128 struct cgraph_node
**order
;
2131 bool remove_functions
= false;
2136 cgraph_freq_base_rec
= (sreal
) 1 / (sreal
) CGRAPH_FREQ_BASE
;
2137 percent_rec
= (sreal
) 1 / (sreal
) 100;
2139 order
= XCNEWVEC (struct cgraph_node
*, symtab
->cgraph_count
);
2141 if (in_lto_p
&& optimize
)
2142 ipa_update_after_lto_read ();
2145 dump_inline_summaries (dump_file
);
2147 nnodes
= ipa_reverse_postorder (order
);
2149 FOR_EACH_FUNCTION (node
)
2153 fprintf (dump_file
, "\nFlattening functions:\n");
2155 /* In the first pass handle functions to be flattened. Do this with
2156 a priority so none of our later choices will make this impossible. */
2157 for (i
= nnodes
- 1; i
>= 0; i
--)
2161 /* Handle nodes to be flattened.
2162 Ideally when processing callees we stop inlining at the
2163 entry of cycles, possibly cloning that entry point and
2164 try to flatten itself turning it into a self-recursive
2166 if (lookup_attribute ("flatten",
2167 DECL_ATTRIBUTES (node
->decl
)) != NULL
)
2171 "Flattening %s\n", node
->name ());
2172 flatten_function (node
, false);
2176 dump_overall_stats ();
2178 inline_small_functions ();
2180 gcc_assert (symtab
->state
== IPA_SSA
);
2181 symtab
->state
= IPA_SSA_AFTER_INLINING
;
2182 /* Do first after-inlining removal. We want to remove all "stale" extern
2183 inline functions and virtual functions so we really know what is called
2185 symtab
->remove_unreachable_nodes (dump_file
);
2188 /* Inline functions with a property that after inlining into all callers the
2189 code size will shrink because the out-of-line copy is eliminated.
2190 We do this regardless on the callee size as long as function growth limits
2194 "\nDeciding on functions to be inlined into all callers and "
2195 "removing useless speculations:\n");
2197 /* Inlining one function called once has good chance of preventing
2198 inlining other function into the same callee. Ideally we should
2199 work in priority order, but probably inlining hot functions first
2200 is good cut without the extra pain of maintaining the queue.
2202 ??? this is not really fitting the bill perfectly: inlining function
2203 into callee often leads to better optimization of callee due to
2204 increased context for optimization.
2205 For example if main() function calls a function that outputs help
2206 and then function that does the main optmization, we should inline
2207 the second with priority even if both calls are cold by themselves.
2209 We probably want to implement new predicate replacing our use of
2210 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2212 for (cold
= 0; cold
<= 1; cold
++)
2214 FOR_EACH_DEFINED_FUNCTION (node
)
2216 struct cgraph_edge
*edge
, *next
;
2219 for (edge
= node
->callees
; edge
; edge
= next
)
2221 next
= edge
->next_callee
;
2222 if (edge
->speculative
&& !speculation_useful_p (edge
, false))
2224 edge
->resolve_speculation ();
2225 spec_rem
+= edge
->count
;
2227 remove_functions
= true;
2232 struct cgraph_node
*where
= node
->global
.inlined_to
2233 ? node
->global
.inlined_to
: node
;
2234 reset_edge_caches (where
);
2235 inline_update_overall_summary (where
);
2237 if (want_inline_function_to_all_callers_p (node
, cold
))
2240 node
->call_for_symbol_thunks_and_aliases (sum_callers
, &num_calls
,
2242 while (node
->call_for_symbol_thunks_and_aliases
2243 (inline_to_all_callers
, &num_calls
, true))
2245 remove_functions
= true;
2250 /* Free ipa-prop structures if they are no longer needed. */
2252 ipa_free_all_structures_after_iinln ();
2257 "\nInlined %i calls, eliminated %i functions\n\n",
2258 ncalls_inlined
, nfunctions_inlined
);
2259 dump_inline_stats ();
2263 dump_inline_summaries (dump_file
);
2264 /* In WPA we use inline summaries for partitioning process. */
2266 inline_free_summary ();
2267 return remove_functions
? TODO_remove_functions
: 0;
2270 /* Inline always-inline function calls in NODE. */
2273 inline_always_inline_functions (struct cgraph_node
*node
)
2275 struct cgraph_edge
*e
;
2276 bool inlined
= false;
2278 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2280 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
2281 if (!DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
2284 if (e
->recursive_p ())
2287 fprintf (dump_file
, " Not inlining recursive call to %s.\n",
2288 e
->callee
->name ());
2289 e
->inline_failed
= CIF_RECURSIVE_INLINING
;
2293 if (!can_early_inline_edge_p (e
))
2295 /* Set inlined to true if the callee is marked "always_inline" but
2296 is not inlinable. This will allow flagging an error later in
2297 expand_call_inline in tree-inline.c. */
2298 if (lookup_attribute ("always_inline",
2299 DECL_ATTRIBUTES (callee
->decl
)) != NULL
)
2305 fprintf (dump_file
, " Inlining %s into %s (always_inline).\n",
2306 xstrdup_for_dump (e
->callee
->name ()),
2307 xstrdup_for_dump (e
->caller
->name ()));
2308 inline_call (e
, true, NULL
, NULL
, false);
2312 inline_update_overall_summary (node
);
2317 /* Decide on the inlining. We do so in the topological order to avoid
2318 expenses on updating data structures. */
2321 early_inline_small_functions (struct cgraph_node
*node
)
2323 struct cgraph_edge
*e
;
2324 bool inlined
= false;
2326 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2328 struct cgraph_node
*callee
= e
->callee
->ultimate_alias_target ();
2329 if (!inline_summaries
->get (callee
)->inlinable
2330 || !e
->inline_failed
)
2333 /* Do not consider functions not declared inline. */
2334 if (!DECL_DECLARED_INLINE_P (callee
->decl
)
2335 && !opt_for_fn (node
->decl
, flag_inline_small_functions
)
2336 && !opt_for_fn (node
->decl
, flag_inline_functions
))
2340 fprintf (dump_file
, "Considering inline candidate %s.\n",
2343 if (!can_early_inline_edge_p (e
))
2346 if (e
->recursive_p ())
2349 fprintf (dump_file
, " Not inlining: recursive call.\n");
2353 if (!want_early_inline_function_p (e
))
2357 fprintf (dump_file
, " Inlining %s into %s.\n",
2358 xstrdup_for_dump (callee
->name ()),
2359 xstrdup_for_dump (e
->caller
->name ()));
2360 inline_call (e
, true, NULL
, NULL
, true);
2368 early_inliner (function
*fun
)
2370 struct cgraph_node
*node
= cgraph_node::get (current_function_decl
);
2371 struct cgraph_edge
*edge
;
2372 unsigned int todo
= 0;
2374 bool inlined
= false;
2379 /* Do nothing if datastructures for ipa-inliner are already computed. This
2380 happens when some pass decides to construct new function and
2381 cgraph_add_new_function calls lowering passes and early optimization on
2382 it. This may confuse ourself when early inliner decide to inline call to
2383 function clone, because function clones don't have parameter list in
2384 ipa-prop matching their signature. */
2385 if (ipa_node_params_sum
)
2388 #ifdef ENABLE_CHECKING
2391 node
->remove_all_references ();
2393 /* Even when not optimizing or not inlining inline always-inline
2395 inlined
= inline_always_inline_functions (node
);
2399 || !flag_early_inlining
2400 /* Never inline regular functions into always-inline functions
2401 during incremental inlining. This sucks as functions calling
2402 always inline functions will get less optimized, but at the
2403 same time inlining of functions calling always inline
2404 function into an always inline function might introduce
2405 cycles of edges to be always inlined in the callgraph.
2407 We might want to be smarter and just avoid this type of inlining. */
2408 || DECL_DISREGARD_INLINE_LIMITS (node
->decl
))
2410 else if (lookup_attribute ("flatten",
2411 DECL_ATTRIBUTES (node
->decl
)) != NULL
)
2413 /* When the function is marked to be flattened, recursively inline
2417 "Flattening %s\n", node
->name ());
2418 flatten_function (node
, true);
2423 /* We iterate incremental inlining to get trivial cases of indirect
2425 while (iterations
< PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS
)
2426 && early_inline_small_functions (node
))
2428 timevar_push (TV_INTEGRATION
);
2429 todo
|= optimize_inline_calls (current_function_decl
);
2431 /* Technically we ought to recompute inline parameters so the new
2432 iteration of early inliner works as expected. We however have
2433 values approximately right and thus we only need to update edge
2434 info that might be cleared out for newly discovered edges. */
2435 for (edge
= node
->callees
; edge
; edge
= edge
->next_callee
)
2437 /* We have no summary for new bound store calls yet. */
2438 if (inline_edge_summary_vec
.length () > (unsigned)edge
->uid
)
2440 struct inline_edge_summary
*es
= inline_edge_summary (edge
);
2442 = estimate_num_insns (edge
->call_stmt
, &eni_size_weights
);
2444 = estimate_num_insns (edge
->call_stmt
, &eni_time_weights
);
2446 if (edge
->callee
->decl
2447 && !gimple_check_call_matching_types (
2448 edge
->call_stmt
, edge
->callee
->decl
, false))
2449 edge
->call_stmt_cannot_inline_p
= true;
2451 if (iterations
< PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS
) - 1)
2452 inline_update_overall_summary (node
);
2453 timevar_pop (TV_INTEGRATION
);
2458 fprintf (dump_file
, "Iterations: %i\n", iterations
);
2463 timevar_push (TV_INTEGRATION
);
2464 todo
|= optimize_inline_calls (current_function_decl
);
2465 timevar_pop (TV_INTEGRATION
);
2468 fun
->always_inline_functions_inlined
= true;
2473 /* Do inlining of small functions. Doing so early helps profiling and other
2474 passes to be somewhat more effective and avoids some code duplication in
2475 later real inlining pass for testcases with very many function calls. */
2479 const pass_data pass_data_early_inline
=
2481 GIMPLE_PASS
, /* type */
2482 "einline", /* name */
2483 OPTGROUP_INLINE
, /* optinfo_flags */
2484 TV_EARLY_INLINING
, /* tv_id */
2485 PROP_ssa
, /* properties_required */
2486 0, /* properties_provided */
2487 0, /* properties_destroyed */
2488 0, /* todo_flags_start */
2489 0, /* todo_flags_finish */
2492 class pass_early_inline
: public gimple_opt_pass
2495 pass_early_inline (gcc::context
*ctxt
)
2496 : gimple_opt_pass (pass_data_early_inline
, ctxt
)
2499 /* opt_pass methods: */
2500 virtual unsigned int execute (function
*);
2502 }; // class pass_early_inline
2505 pass_early_inline::execute (function
*fun
)
2507 return early_inliner (fun
);
2513 make_pass_early_inline (gcc::context
*ctxt
)
2515 return new pass_early_inline (ctxt
);
2520 const pass_data pass_data_ipa_inline
=
2522 IPA_PASS
, /* type */
2523 "inline", /* name */
2524 OPTGROUP_INLINE
, /* optinfo_flags */
2525 TV_IPA_INLINING
, /* tv_id */
2526 0, /* properties_required */
2527 0, /* properties_provided */
2528 0, /* properties_destroyed */
2529 0, /* todo_flags_start */
2530 ( TODO_dump_symtab
), /* todo_flags_finish */
2533 class pass_ipa_inline
: public ipa_opt_pass_d
2536 pass_ipa_inline (gcc::context
*ctxt
)
2537 : ipa_opt_pass_d (pass_data_ipa_inline
, ctxt
,
2538 inline_generate_summary
, /* generate_summary */
2539 inline_write_summary
, /* write_summary */
2540 inline_read_summary
, /* read_summary */
2541 NULL
, /* write_optimization_summary */
2542 NULL
, /* read_optimization_summary */
2543 NULL
, /* stmt_fixup */
2544 0, /* function_transform_todo_flags_start */
2545 inline_transform
, /* function_transform */
2546 NULL
) /* variable_transform */
2549 /* opt_pass methods: */
2550 virtual unsigned int execute (function
*) { return ipa_inline (); }
2552 }; // class pass_ipa_inline
2557 make_pass_ipa_inline (gcc::context
*ctxt
)
2559 return new pass_ipa_inline (ctxt
);