1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2013 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 The implementation of inliner is organized as follows:
25 inlining heuristics limits
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
38 The inliner itself is split into two passes:
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
62 Because of lack of whole unit knowledge, the pass can not really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
94 #include "coretypes.h"
97 #include "tree-inline.h"
98 #include "langhooks.h"
100 #include "diagnostic.h"
101 #include "gimple-pretty-print.h"
105 #include "tree-pass.h"
106 #include "coverage.h"
111 #include "gimple-ssa.h"
112 #include "ipa-prop.h"
115 #include "ipa-inline.h"
116 #include "ipa-utils.h"
120 /* Statistics we collect about inlining algorithm. */
121 static int overall_size
;
122 static gcov_type max_count
;
123 static sreal max_count_real
, max_relbenefit_real
, half_int_min_real
;
125 /* Return false when inlining edge E would lead to violating
126 limits on function unit growth or stack usage growth.
128 The relative function body growth limit is present generally
129 to avoid problems with non-linear behavior of the compiler.
130 To allow inlining huge functions into tiny wrapper, the limit
131 is always based on the bigger of the two functions considered.
133 For stack growth limits we always base the growth in stack usage
134 of the callers. We want to prevent applications from segfaulting
135 on stack overflow when functions with huge stack frames gets
139 caller_growth_limits (struct cgraph_edge
*e
)
141 struct cgraph_node
*to
= e
->caller
;
142 struct cgraph_node
*what
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
145 HOST_WIDE_INT stack_size_limit
= 0, inlined_stack
;
146 struct inline_summary
*info
, *what_info
, *outer_info
= inline_summary (to
);
148 /* Look for function e->caller is inlined to. While doing
149 so work out the largest function body on the way. As
150 described above, we want to base our function growth
151 limits based on that. Not on the self size of the
152 outer function, not on the self size of inline code
153 we immediately inline to. This is the most relaxed
154 interpretation of the rule "do not grow large functions
155 too much in order to prevent compiler from exploding". */
158 info
= inline_summary (to
);
159 if (limit
< info
->self_size
)
160 limit
= info
->self_size
;
161 if (stack_size_limit
< info
->estimated_self_stack_size
)
162 stack_size_limit
= info
->estimated_self_stack_size
;
163 if (to
->global
.inlined_to
)
164 to
= to
->callers
->caller
;
169 what_info
= inline_summary (what
);
171 if (limit
< what_info
->self_size
)
172 limit
= what_info
->self_size
;
174 limit
+= limit
* PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH
) / 100;
176 /* Check the size after inlining against the function limits. But allow
177 the function to shrink if it went over the limits by forced inlining. */
178 newsize
= estimate_size_after_inlining (to
, e
);
179 if (newsize
>= info
->size
180 && newsize
> PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS
)
183 e
->inline_failed
= CIF_LARGE_FUNCTION_GROWTH_LIMIT
;
187 if (!what_info
->estimated_stack_size
)
190 /* FIXME: Stack size limit often prevents inlining in Fortran programs
191 due to large i/o datastructures used by the Fortran front-end.
192 We ought to ignore this limit when we know that the edge is executed
193 on every invocation of the caller (i.e. its call statement dominates
194 exit block). We do not track this information, yet. */
195 stack_size_limit
+= ((gcov_type
)stack_size_limit
196 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH
) / 100);
198 inlined_stack
= (outer_info
->stack_frame_offset
199 + outer_info
->estimated_self_stack_size
200 + what_info
->estimated_stack_size
);
201 /* Check new stack consumption with stack consumption at the place
203 if (inlined_stack
> stack_size_limit
204 /* If function already has large stack usage from sibling
205 inline call, we can inline, too.
206 This bit overoptimistically assume that we are good at stack
208 && inlined_stack
> info
->estimated_stack_size
209 && inlined_stack
> PARAM_VALUE (PARAM_LARGE_STACK_FRAME
))
211 e
->inline_failed
= CIF_LARGE_STACK_FRAME_GROWTH_LIMIT
;
217 /* Dump info about why inlining has failed. */
220 report_inline_failed_reason (struct cgraph_edge
*e
)
224 fprintf (dump_file
, " not inlinable: %s/%i -> %s/%i, %s\n",
225 xstrdup (e
->caller
->name ()), e
->caller
->order
,
226 xstrdup (e
->callee
->name ()), e
->callee
->order
,
227 cgraph_inline_failed_string (e
->inline_failed
));
231 /* Decide if we can inline the edge and possibly update
232 inline_failed reason.
233 We check whether inlining is possible at all and whether
234 caller growth limits allow doing so.
236 if REPORT is true, output reason to the dump file.
238 if DISREGARD_LIMITES is true, ignore size limits.*/
241 can_inline_edge_p (struct cgraph_edge
*e
, bool report
,
242 bool disregard_limits
= false)
244 bool inlinable
= true;
245 enum availability avail
;
246 struct cgraph_node
*callee
247 = cgraph_function_or_thunk_node (e
->callee
, &avail
);
248 tree caller_tree
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e
->caller
->decl
);
250 = callee
? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee
->decl
) : NULL
;
251 struct function
*caller_cfun
= DECL_STRUCT_FUNCTION (e
->caller
->decl
);
252 struct function
*callee_cfun
253 = callee
? DECL_STRUCT_FUNCTION (callee
->decl
) : NULL
;
255 if (!caller_cfun
&& e
->caller
->clone_of
)
256 caller_cfun
= DECL_STRUCT_FUNCTION (e
->caller
->clone_of
->decl
);
258 if (!callee_cfun
&& callee
&& callee
->clone_of
)
259 callee_cfun
= DECL_STRUCT_FUNCTION (callee
->clone_of
->decl
);
261 gcc_assert (e
->inline_failed
);
263 if (!callee
|| !callee
->definition
)
265 e
->inline_failed
= CIF_BODY_NOT_AVAILABLE
;
268 else if (!inline_summary (callee
)->inlinable
269 || (caller_cfun
&& fn_contains_cilk_spawn_p (caller_cfun
)))
271 e
->inline_failed
= CIF_FUNCTION_NOT_INLINABLE
;
274 else if (avail
<= AVAIL_OVERWRITABLE
)
276 e
->inline_failed
= CIF_OVERWRITABLE
;
279 else if (e
->call_stmt_cannot_inline_p
)
281 if (e
->inline_failed
!= CIF_FUNCTION_NOT_OPTIMIZED
)
282 e
->inline_failed
= CIF_MISMATCHED_ARGUMENTS
;
285 /* Don't inline if the functions have different EH personalities. */
286 else if (DECL_FUNCTION_PERSONALITY (e
->caller
->decl
)
287 && DECL_FUNCTION_PERSONALITY (callee
->decl
)
288 && (DECL_FUNCTION_PERSONALITY (e
->caller
->decl
)
289 != DECL_FUNCTION_PERSONALITY (callee
->decl
)))
291 e
->inline_failed
= CIF_EH_PERSONALITY
;
294 /* TM pure functions should not be inlined into non-TM_pure
296 else if (is_tm_pure (callee
->decl
)
297 && !is_tm_pure (e
->caller
->decl
))
299 e
->inline_failed
= CIF_UNSPECIFIED
;
302 /* Don't inline if the callee can throw non-call exceptions but the
304 FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
305 Move the flag into cgraph node or mirror it in the inline summary. */
306 else if (callee_cfun
&& callee_cfun
->can_throw_non_call_exceptions
307 && !(caller_cfun
&& caller_cfun
->can_throw_non_call_exceptions
))
309 e
->inline_failed
= CIF_NON_CALL_EXCEPTIONS
;
312 /* Check compatibility of target optimization options. */
313 else if (!targetm
.target_option
.can_inline_p (e
->caller
->decl
,
316 e
->inline_failed
= CIF_TARGET_OPTION_MISMATCH
;
319 /* Check if caller growth allows the inlining. */
320 else if (!DECL_DISREGARD_INLINE_LIMITS (callee
->decl
)
322 && !lookup_attribute ("flatten",
324 (e
->caller
->global
.inlined_to
325 ? e
->caller
->global
.inlined_to
->decl
327 && !caller_growth_limits (e
))
329 /* Don't inline a function with a higher optimization level than the
330 caller. FIXME: this is really just tip of iceberg of handling
331 optimization attribute. */
332 else if (caller_tree
!= callee_tree
)
334 struct cl_optimization
*caller_opt
335 = TREE_OPTIMIZATION ((caller_tree
)
337 : optimization_default_node
);
339 struct cl_optimization
*callee_opt
340 = TREE_OPTIMIZATION ((callee_tree
)
342 : optimization_default_node
);
344 if (((caller_opt
->x_optimize
> callee_opt
->x_optimize
)
345 || (caller_opt
->x_optimize_size
!= callee_opt
->x_optimize_size
))
346 /* gcc.dg/pr43564.c. Look at forced inline even in -O0. */
347 && !DECL_DISREGARD_INLINE_LIMITS (e
->callee
->decl
))
349 e
->inline_failed
= CIF_OPTIMIZATION_MISMATCH
;
354 if (!inlinable
&& report
)
355 report_inline_failed_reason (e
);
360 /* Return true if the edge E is inlinable during early inlining. */
363 can_early_inline_edge_p (struct cgraph_edge
*e
)
365 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
,
367 /* Early inliner might get called at WPA stage when IPA pass adds new
368 function. In this case we can not really do any of early inlining
369 because function bodies are missing. */
370 if (!gimple_has_body_p (callee
->decl
))
372 e
->inline_failed
= CIF_BODY_NOT_AVAILABLE
;
375 /* In early inliner some of callees may not be in SSA form yet
376 (i.e. the callgraph is cyclic and we did not process
377 the callee by early inliner, yet). We don't have CIF code for this
378 case; later we will re-do the decision in the real inliner. */
379 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e
->caller
->decl
))
380 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee
->decl
)))
383 fprintf (dump_file
, " edge not inlinable: not in SSA form\n");
386 if (!can_inline_edge_p (e
, true))
392 /* Return number of calls in N. Ignore cheap builtins. */
395 num_calls (struct cgraph_node
*n
)
397 struct cgraph_edge
*e
;
400 for (e
= n
->callees
; e
; e
= e
->next_callee
)
401 if (!is_inexpensive_builtin (e
->callee
->decl
))
407 /* Return true if we are interested in inlining small function. */
410 want_early_inline_function_p (struct cgraph_edge
*e
)
412 bool want_inline
= true;
413 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
415 if (DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
417 else if (!DECL_DECLARED_INLINE_P (callee
->decl
)
418 && !flag_inline_small_functions
)
420 e
->inline_failed
= CIF_FUNCTION_NOT_INLINE_CANDIDATE
;
421 report_inline_failed_reason (e
);
426 int growth
= estimate_edge_growth (e
);
431 else if (!cgraph_maybe_hot_edge_p (e
)
435 fprintf (dump_file
, " will not early inline: %s/%i->%s/%i, "
436 "call is cold and code would grow by %i\n",
437 xstrdup (e
->caller
->name ()),
439 xstrdup (callee
->name ()), callee
->order
,
443 else if (growth
> PARAM_VALUE (PARAM_EARLY_INLINING_INSNS
))
446 fprintf (dump_file
, " will not early inline: %s/%i->%s/%i, "
447 "growth %i exceeds --param early-inlining-insns\n",
448 xstrdup (e
->caller
->name ()),
450 xstrdup (callee
->name ()), callee
->order
,
454 else if ((n
= num_calls (callee
)) != 0
455 && growth
* (n
+ 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS
))
458 fprintf (dump_file
, " will not early inline: %s/%i->%s/%i, "
459 "growth %i exceeds --param early-inlining-insns "
460 "divided by number of calls\n",
461 xstrdup (e
->caller
->name ()),
463 xstrdup (callee
->name ()), callee
->order
,
471 /* Compute time of the edge->caller + edge->callee execution when inlining
475 compute_uninlined_call_time (struct inline_summary
*callee_info
,
476 struct cgraph_edge
*edge
)
478 gcov_type uninlined_call_time
=
479 RDIV ((gcov_type
)callee_info
->time
* MAX (edge
->frequency
, 1),
481 gcov_type caller_time
= inline_summary (edge
->caller
->global
.inlined_to
482 ? edge
->caller
->global
.inlined_to
483 : edge
->caller
)->time
;
484 return uninlined_call_time
+ caller_time
;
487 /* Same as compute_uinlined_call_time but compute time when inlining
491 compute_inlined_call_time (struct cgraph_edge
*edge
,
494 gcov_type caller_time
= inline_summary (edge
->caller
->global
.inlined_to
495 ? edge
->caller
->global
.inlined_to
496 : edge
->caller
)->time
;
497 gcov_type time
= (caller_time
498 + RDIV (((gcov_type
) edge_time
499 - inline_edge_summary (edge
)->call_stmt_time
)
500 * MAX (edge
->frequency
, 1), CGRAPH_FREQ_BASE
));
501 /* Possible one roundoff error, but watch for overflows. */
502 gcc_checking_assert (time
>= INT_MIN
/ 2);
508 /* Return true if the speedup for inlining E is bigger than
509 PARAM_MAX_INLINE_MIN_SPEEDUP. */
512 big_speedup_p (struct cgraph_edge
*e
)
514 gcov_type time
= compute_uninlined_call_time (inline_summary (e
->callee
),
516 gcov_type inlined_time
= compute_inlined_call_time (e
,
517 estimate_edge_time (e
));
518 if (time
- inlined_time
519 > RDIV (time
* PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP
), 100))
524 /* Return true if we are interested in inlining small function.
525 When REPORT is true, report reason to dump file. */
528 want_inline_small_function_p (struct cgraph_edge
*e
, bool report
)
530 bool want_inline
= true;
531 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
533 if (DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
535 else if (!DECL_DECLARED_INLINE_P (callee
->decl
)
536 && !flag_inline_small_functions
)
538 e
->inline_failed
= CIF_FUNCTION_NOT_INLINE_CANDIDATE
;
543 int growth
= estimate_edge_growth (e
);
544 inline_hints hints
= estimate_edge_hints (e
);
545 bool big_speedup
= big_speedup_p (e
);
549 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
550 hints suggests that inlining given function is very profitable. */
551 else if (DECL_DECLARED_INLINE_P (callee
->decl
)
552 && growth
>= MAX_INLINE_INSNS_SINGLE
554 && !(hints
& (INLINE_HINT_indirect_call
555 | INLINE_HINT_loop_iterations
556 | INLINE_HINT_array_index
557 | INLINE_HINT_loop_stride
)))
559 e
->inline_failed
= CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
;
562 /* Before giving up based on fact that caller size will grow, allow
563 functions that are called few times and eliminating the offline
564 copy will lead to overall code size reduction.
565 Not all of these will be handled by subsequent inlining of functions
566 called once: in particular weak functions are not handled or funcitons
567 that inline to multiple calls but a lot of bodies is optimized out.
568 Finally we want to inline earlier to allow inlining of callbacks.
570 This is slightly wrong on aggressive side: it is entirely possible
571 that function is called many times with a context where inlining
572 reduces code size and few times with a context where inlining increase
573 code size. Resoluting growth estimate will be negative even if it
574 would make more sense to keep offline copy and do not inline into the
575 call sites that makes the code size grow.
577 When badness orders the calls in a way that code reducing calls come
578 first, this situation is not a problem at all: after inlining all
579 "good" calls, we will realize that keeping the function around is
581 else if (growth
<= MAX_INLINE_INSNS_SINGLE
582 /* Unlike for functions called once, we play unsafe with
583 COMDATs. We can allow that since we know functions
584 in consideration are small (and thus risk is small) and
585 moreover grow estimates already accounts that COMDAT
586 functions may or may not disappear when eliminated from
587 current unit. With good probability making aggressive
588 choice in all units is going to make overall program
591 Consequently we ask cgraph_can_remove_if_no_direct_calls_p
593 cgraph_will_be_removed_from_program_if_no_direct_calls */
594 && !DECL_EXTERNAL (callee
->decl
)
595 && cgraph_can_remove_if_no_direct_calls_p (callee
)
596 && estimate_growth (callee
) <= 0)
598 else if (!DECL_DECLARED_INLINE_P (callee
->decl
)
599 && !flag_inline_functions
)
601 e
->inline_failed
= CIF_NOT_DECLARED_INLINED
;
604 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
605 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
606 inlining given function is very profitable. */
607 else if (!DECL_DECLARED_INLINE_P (callee
->decl
)
609 && growth
>= ((hints
& (INLINE_HINT_indirect_call
610 | INLINE_HINT_loop_iterations
611 | INLINE_HINT_array_index
612 | INLINE_HINT_loop_stride
))
613 ? MAX (MAX_INLINE_INSNS_AUTO
,
614 MAX_INLINE_INSNS_SINGLE
)
615 : MAX_INLINE_INSNS_AUTO
))
617 e
->inline_failed
= CIF_MAX_INLINE_INSNS_AUTO_LIMIT
;
620 /* If call is cold, do not inline when function body would grow. */
621 else if (!cgraph_maybe_hot_edge_p (e
))
623 e
->inline_failed
= CIF_UNLIKELY_CALL
;
627 if (!want_inline
&& report
)
628 report_inline_failed_reason (e
);
632 /* EDGE is self recursive edge.
633 We hand two cases - when function A is inlining into itself
634 or when function A is being inlined into another inliner copy of function
637 In first case OUTER_NODE points to the toplevel copy of A, while
638 in the second case OUTER_NODE points to the outermost copy of A in B.
640 In both cases we want to be extra selective since
641 inlining the call will just introduce new recursive calls to appear. */
644 want_inline_self_recursive_call_p (struct cgraph_edge
*edge
,
645 struct cgraph_node
*outer_node
,
649 char const *reason
= NULL
;
650 bool want_inline
= true;
651 int caller_freq
= CGRAPH_FREQ_BASE
;
652 int max_depth
= PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO
);
654 if (DECL_DECLARED_INLINE_P (edge
->caller
->decl
))
655 max_depth
= PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH
);
657 if (!cgraph_maybe_hot_edge_p (edge
))
659 reason
= "recursive call is cold";
662 else if (max_count
&& !outer_node
->count
)
664 reason
= "not executed in profile";
667 else if (depth
> max_depth
)
669 reason
= "--param max-inline-recursive-depth exceeded.";
673 if (outer_node
->global
.inlined_to
)
674 caller_freq
= outer_node
->callers
->frequency
;
678 /* Inlining of self recursive function into copy of itself within other function
679 is transformation similar to loop peeling.
681 Peeling is profitable if we can inline enough copies to make probability
682 of actual call to the self recursive function very small. Be sure that
683 the probability of recursion is small.
685 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
686 This way the expected number of recision is at most max_depth. */
689 int max_prob
= CGRAPH_FREQ_BASE
- ((CGRAPH_FREQ_BASE
+ max_depth
- 1)
692 for (i
= 1; i
< depth
; i
++)
693 max_prob
= max_prob
* max_prob
/ CGRAPH_FREQ_BASE
;
695 && (edge
->count
* CGRAPH_FREQ_BASE
/ outer_node
->count
698 reason
= "profile of recursive call is too large";
702 && (edge
->frequency
* CGRAPH_FREQ_BASE
/ caller_freq
705 reason
= "frequency of recursive call is too large";
709 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
710 depth is large. We reduce function call overhead and increase chances that
711 things fit in hardware return predictor.
713 Recursive inlining might however increase cost of stack frame setup
714 actually slowing down functions whose recursion tree is wide rather than
717 Deciding reliably on when to do recursive inlining without profile feedback
718 is tricky. For now we disable recursive inlining when probability of self
721 Recursive inlining of self recursive call within loop also results in large loop
722 depths that generally optimize badly. We may want to throttle down inlining
723 in those cases. In particular this seems to happen in one of libstdc++ rb tree
728 && (edge
->count
* 100 / outer_node
->count
729 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY
)))
731 reason
= "profile of recursive call is too small";
735 && (edge
->frequency
* 100 / caller_freq
736 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY
)))
738 reason
= "frequency of recursive call is too small";
742 if (!want_inline
&& dump_file
)
743 fprintf (dump_file
, " not inlining recursively: %s\n", reason
);
747 /* Return true when NODE has uninlinable caller;
748 set HAS_HOT_CALL if it has hot call.
749 Worker for cgraph_for_node_and_aliases. */
752 check_callers (struct cgraph_node
*node
, void *has_hot_call
)
754 struct cgraph_edge
*e
;
755 for (e
= node
->callers
; e
; e
= e
->next_caller
)
757 if (!can_inline_edge_p (e
, true))
759 if (!has_hot_call
&& cgraph_maybe_hot_edge_p (e
))
760 *(bool *)has_hot_call
= true;
765 /* If NODE has a caller, return true. */
768 has_caller_p (struct cgraph_node
*node
, void *data ATTRIBUTE_UNUSED
)
775 /* Decide if inlining NODE would reduce unit size by eliminating
776 the offline copy of function.
777 When COLD is true the cold calls are considered, too. */
780 want_inline_function_to_all_callers_p (struct cgraph_node
*node
, bool cold
)
782 struct cgraph_node
*function
= cgraph_function_or_thunk_node (node
, NULL
);
783 bool has_hot_call
= false;
785 /* Does it have callers? */
786 if (!cgraph_for_node_and_aliases (node
, has_caller_p
, NULL
, true))
788 /* Already inlined? */
789 if (function
->global
.inlined_to
)
791 if (cgraph_function_or_thunk_node (node
, NULL
) != node
)
793 /* Inlining into all callers would increase size? */
794 if (estimate_growth (node
) > 0)
796 /* All inlines must be possible. */
797 if (cgraph_for_node_and_aliases (node
, check_callers
, &has_hot_call
, true))
799 if (!cold
&& !has_hot_call
)
804 #define RELATIVE_TIME_BENEFIT_RANGE (INT_MAX / 64)
806 /* Return relative time improvement for inlining EDGE in range
807 1...RELATIVE_TIME_BENEFIT_RANGE */
810 relative_time_benefit (struct inline_summary
*callee_info
,
811 struct cgraph_edge
*edge
,
814 gcov_type relbenefit
;
815 gcov_type uninlined_call_time
= compute_uninlined_call_time (callee_info
, edge
);
816 gcov_type inlined_call_time
= compute_inlined_call_time (edge
, edge_time
);
818 /* Inlining into extern inline function is not a win. */
819 if (DECL_EXTERNAL (edge
->caller
->global
.inlined_to
820 ? edge
->caller
->global
.inlined_to
->decl
821 : edge
->caller
->decl
))
824 /* Watch overflows. */
825 gcc_checking_assert (uninlined_call_time
>= 0);
826 gcc_checking_assert (inlined_call_time
>= 0);
827 gcc_checking_assert (uninlined_call_time
>= inlined_call_time
);
829 /* Compute relative time benefit, i.e. how much the call becomes faster.
830 ??? perhaps computing how much the caller+calle together become faster
831 would lead to more realistic results. */
832 if (!uninlined_call_time
)
833 uninlined_call_time
= 1;
835 RDIV (((gcov_type
)uninlined_call_time
- inlined_call_time
) * RELATIVE_TIME_BENEFIT_RANGE
,
836 uninlined_call_time
);
837 relbenefit
= MIN (relbenefit
, RELATIVE_TIME_BENEFIT_RANGE
);
838 gcc_checking_assert (relbenefit
>= 0);
839 relbenefit
= MAX (relbenefit
, 1);
844 /* A cost model driving the inlining heuristics in a way so the edges with
845 smallest badness are inlined first. After each inlining is performed
846 the costs of all caller edges of nodes affected are recomputed so the
847 metrics may accurately depend on values such as number of inlinable callers
848 of the function or function body size. */
851 edge_badness (struct cgraph_edge
*edge
, bool dump
)
854 int growth
, edge_time
;
855 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (edge
->callee
,
857 struct inline_summary
*callee_info
= inline_summary (callee
);
860 if (DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
863 growth
= estimate_edge_growth (edge
);
864 edge_time
= estimate_edge_time (edge
);
865 hints
= estimate_edge_hints (edge
);
866 gcc_checking_assert (edge_time
>= 0);
867 gcc_checking_assert (edge_time
<= callee_info
->time
);
868 gcc_checking_assert (growth
<= callee_info
->size
);
872 fprintf (dump_file
, " Badness calculation for %s/%i -> %s/%i\n",
873 xstrdup (edge
->caller
->name ()),
875 xstrdup (callee
->name ()),
876 edge
->callee
->order
);
877 fprintf (dump_file
, " size growth %i, time %i ",
880 dump_inline_hints (dump_file
, hints
);
881 if (big_speedup_p (edge
))
882 fprintf (dump_file
, " big_speedup");
883 fprintf (dump_file
, "\n");
886 /* Always prefer inlining saving code size. */
889 badness
= INT_MIN
/ 2 + growth
;
891 fprintf (dump_file
, " %i: Growth %i <= 0\n", (int) badness
,
895 /* When profiling is available, compute badness as:
897 relative_edge_count * relative_time_benefit
898 goodness = -------------------------------------------
902 The fraction is upside down, because on edge counts and time beneits
903 the bounds are known. Edge growth is essentially unlimited. */
907 sreal tmp
, relbenefit_real
, growth_real
;
908 int relbenefit
= relative_time_benefit (callee_info
, edge
, edge_time
);
909 /* Capping edge->count to max_count. edge->count can be larger than
910 max_count if an inline adds new edges which increase max_count
911 after max_count is computed. */
912 gcov_type edge_count
= edge
->count
> max_count
? max_count
: edge
->count
;
914 sreal_init (&relbenefit_real
, relbenefit
, 0);
915 sreal_init (&growth_real
, growth
, 0);
917 /* relative_edge_count. */
918 sreal_init (&tmp
, edge_count
, 0);
919 sreal_div (&tmp
, &tmp
, &max_count_real
);
921 /* relative_time_benefit. */
922 sreal_mul (&tmp
, &tmp
, &relbenefit_real
);
923 sreal_div (&tmp
, &tmp
, &max_relbenefit_real
);
925 /* growth_f_caller. */
926 sreal_mul (&tmp
, &tmp
, &half_int_min_real
);
927 sreal_div (&tmp
, &tmp
, &growth_real
);
929 badness
= -1 * sreal_to_int (&tmp
);
934 " %i (relative %f): profile info. Relative count %f%s"
935 " * Relative benefit %f\n",
936 (int) badness
, (double) badness
/ INT_MIN
,
937 (double) edge_count
/ max_count
,
938 edge
->count
> max_count
? " (capped to max_count)" : "",
939 relbenefit
* 100.0 / RELATIVE_TIME_BENEFIT_RANGE
);
943 /* When function local profile is available. Compute badness as:
945 relative_time_benefit
946 goodness = ---------------------------------
947 growth_of_caller * overall_growth
951 compensated by the inline hints.
953 else if (flag_guess_branch_prob
)
955 badness
= (relative_time_benefit (callee_info
, edge
, edge_time
)
956 * (INT_MIN
/ 16 / RELATIVE_TIME_BENEFIT_RANGE
));
957 badness
/= (MIN (65536/2, growth
) * MIN (65536/2, MAX (1, callee_info
->growth
)));
958 gcc_checking_assert (badness
<=0 && badness
>= INT_MIN
/ 16);
959 if ((hints
& (INLINE_HINT_indirect_call
960 | INLINE_HINT_loop_iterations
961 | INLINE_HINT_array_index
962 | INLINE_HINT_loop_stride
))
963 || callee_info
->growth
<= 0)
965 if (hints
& (INLINE_HINT_same_scc
))
967 else if (hints
& (INLINE_HINT_in_scc
))
969 else if (hints
& (INLINE_HINT_cross_module
))
971 gcc_checking_assert (badness
<= 0 && badness
>= INT_MIN
/ 2);
972 if ((hints
& INLINE_HINT_declared_inline
) && badness
>= INT_MIN
/ 32)
977 " %i: guessed profile. frequency %f,"
978 " benefit %f%%, time w/o inlining %i, time w inlining %i"
979 " overall growth %i (current) %i (original)\n",
980 (int) badness
, (double)edge
->frequency
/ CGRAPH_FREQ_BASE
,
981 relative_time_benefit (callee_info
, edge
, edge_time
) * 100.0
982 / RELATIVE_TIME_BENEFIT_RANGE
,
983 (int)compute_uninlined_call_time (callee_info
, edge
),
984 (int)compute_inlined_call_time (edge
, edge_time
),
985 estimate_growth (callee
),
986 callee_info
->growth
);
989 /* When function local profile is not available or it does not give
990 useful information (ie frequency is zero), base the cost on
991 loop nest and overall size growth, so we optimize for overall number
992 of functions fully inlined in program. */
995 int nest
= MIN (inline_edge_summary (edge
)->loop_depth
, 8);
996 badness
= growth
* 256;
998 /* Decrease badness if call is nested. */
1006 fprintf (dump_file
, " %i: no profile. nest %i\n", (int) badness
,
1010 /* Ensure that we did not overflow in all the fixed point math above. */
1011 gcc_assert (badness
>= INT_MIN
);
1012 gcc_assert (badness
<= INT_MAX
- 1);
1013 /* Make recursive inlining happen always after other inlining is done. */
1014 if (cgraph_edge_recursive_p (edge
))
1020 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1022 update_edge_key (fibheap_t heap
, struct cgraph_edge
*edge
)
1024 int badness
= edge_badness (edge
, false);
1027 fibnode_t n
= (fibnode_t
) edge
->aux
;
1028 gcc_checking_assert (n
->data
== edge
);
1030 /* fibheap_replace_key only decrease the keys.
1031 When we increase the key we do not update heap
1032 and instead re-insert the element once it becomes
1033 a minimum of heap. */
1034 if (badness
< n
->key
)
1036 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1039 " decreasing badness %s/%i -> %s/%i, %i to %i\n",
1040 xstrdup (edge
->caller
->name ()),
1041 edge
->caller
->order
,
1042 xstrdup (edge
->callee
->name ()),
1043 edge
->callee
->order
,
1047 fibheap_replace_key (heap
, n
, badness
);
1048 gcc_checking_assert (n
->key
== badness
);
1053 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1056 " enqueuing call %s/%i -> %s/%i, badness %i\n",
1057 xstrdup (edge
->caller
->name ()),
1058 edge
->caller
->order
,
1059 xstrdup (edge
->callee
->name ()),
1060 edge
->callee
->order
,
1063 edge
->aux
= fibheap_insert (heap
, badness
, edge
);
1068 /* NODE was inlined.
1069 All caller edges needs to be resetted because
1070 size estimates change. Similarly callees needs reset
1071 because better context may be known. */
1074 reset_edge_caches (struct cgraph_node
*node
)
1076 struct cgraph_edge
*edge
;
1077 struct cgraph_edge
*e
= node
->callees
;
1078 struct cgraph_node
*where
= node
;
1080 struct ipa_ref
*ref
;
1082 if (where
->global
.inlined_to
)
1083 where
= where
->global
.inlined_to
;
1085 /* WHERE body size has changed, the cached growth is invalid. */
1086 reset_node_growth_cache (where
);
1088 for (edge
= where
->callers
; edge
; edge
= edge
->next_caller
)
1089 if (edge
->inline_failed
)
1090 reset_edge_growth_cache (edge
);
1091 for (i
= 0; ipa_ref_list_referring_iterate (&where
->ref_list
,
1093 if (ref
->use
== IPA_REF_ALIAS
)
1094 reset_edge_caches (ipa_ref_referring_node (ref
));
1100 if (!e
->inline_failed
&& e
->callee
->callees
)
1101 e
= e
->callee
->callees
;
1104 if (e
->inline_failed
)
1105 reset_edge_growth_cache (e
);
1112 if (e
->caller
== node
)
1114 e
= e
->caller
->callers
;
1116 while (!e
->next_callee
);
1122 /* Recompute HEAP nodes for each of caller of NODE.
1123 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1124 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1125 it is inlinable. Otherwise check all edges. */
1128 update_caller_keys (fibheap_t heap
, struct cgraph_node
*node
,
1129 bitmap updated_nodes
,
1130 struct cgraph_edge
*check_inlinablity_for
)
1132 struct cgraph_edge
*edge
;
1134 struct ipa_ref
*ref
;
1136 if ((!node
->alias
&& !inline_summary (node
)->inlinable
)
1137 || node
->global
.inlined_to
)
1139 if (!bitmap_set_bit (updated_nodes
, node
->uid
))
1142 for (i
= 0; ipa_ref_list_referring_iterate (&node
->ref_list
,
1144 if (ref
->use
== IPA_REF_ALIAS
)
1146 struct cgraph_node
*alias
= ipa_ref_referring_node (ref
);
1147 update_caller_keys (heap
, alias
, updated_nodes
, check_inlinablity_for
);
1150 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
1151 if (edge
->inline_failed
)
1153 if (!check_inlinablity_for
1154 || check_inlinablity_for
== edge
)
1156 if (can_inline_edge_p (edge
, false)
1157 && want_inline_small_function_p (edge
, false))
1158 update_edge_key (heap
, edge
);
1161 report_inline_failed_reason (edge
);
1162 fibheap_delete_node (heap
, (fibnode_t
) edge
->aux
);
1167 update_edge_key (heap
, edge
);
1171 /* Recompute HEAP nodes for each uninlined call in NODE.
1172 This is used when we know that edge badnesses are going only to increase
1173 (we introduced new call site) and thus all we need is to insert newly
1174 created edges into heap. */
1177 update_callee_keys (fibheap_t heap
, struct cgraph_node
*node
,
1178 bitmap updated_nodes
)
1180 struct cgraph_edge
*e
= node
->callees
;
1185 if (!e
->inline_failed
&& e
->callee
->callees
)
1186 e
= e
->callee
->callees
;
1189 enum availability avail
;
1190 struct cgraph_node
*callee
;
1191 /* We do not reset callee growth cache here. Since we added a new call,
1192 growth chould have just increased and consequentely badness metric
1193 don't need updating. */
1194 if (e
->inline_failed
1195 && (callee
= cgraph_function_or_thunk_node (e
->callee
, &avail
))
1196 && inline_summary (callee
)->inlinable
1197 && avail
>= AVAIL_AVAILABLE
1198 && !bitmap_bit_p (updated_nodes
, callee
->uid
))
1200 if (can_inline_edge_p (e
, false)
1201 && want_inline_small_function_p (e
, false))
1202 update_edge_key (heap
, e
);
1205 report_inline_failed_reason (e
);
1206 fibheap_delete_node (heap
, (fibnode_t
) e
->aux
);
1216 if (e
->caller
== node
)
1218 e
= e
->caller
->callers
;
1220 while (!e
->next_callee
);
1226 /* Enqueue all recursive calls from NODE into priority queue depending on
1227 how likely we want to recursively inline the call. */
1230 lookup_recursive_calls (struct cgraph_node
*node
, struct cgraph_node
*where
,
1233 struct cgraph_edge
*e
;
1234 enum availability avail
;
1236 for (e
= where
->callees
; e
; e
= e
->next_callee
)
1237 if (e
->callee
== node
1238 || (cgraph_function_or_thunk_node (e
->callee
, &avail
) == node
1239 && avail
> AVAIL_OVERWRITABLE
))
1241 /* When profile feedback is available, prioritize by expected number
1243 fibheap_insert (heap
,
1244 !max_count
? -e
->frequency
1245 : -(e
->count
/ ((max_count
+ (1<<24) - 1) / (1<<24))),
1248 for (e
= where
->callees
; e
; e
= e
->next_callee
)
1249 if (!e
->inline_failed
)
1250 lookup_recursive_calls (node
, e
->callee
, heap
);
1253 /* Decide on recursive inlining: in the case function has recursive calls,
1254 inline until body size reaches given argument. If any new indirect edges
1255 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1259 recursive_inlining (struct cgraph_edge
*edge
,
1260 vec
<cgraph_edge_p
> *new_edges
)
1262 int limit
= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO
);
1264 struct cgraph_node
*node
;
1265 struct cgraph_edge
*e
;
1266 struct cgraph_node
*master_clone
= NULL
, *next
;
1270 node
= edge
->caller
;
1271 if (node
->global
.inlined_to
)
1272 node
= node
->global
.inlined_to
;
1274 if (DECL_DECLARED_INLINE_P (node
->decl
))
1275 limit
= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE
);
1277 /* Make sure that function is small enough to be considered for inlining. */
1278 if (estimate_size_after_inlining (node
, edge
) >= limit
)
1280 heap
= fibheap_new ();
1281 lookup_recursive_calls (node
, node
, heap
);
1282 if (fibheap_empty (heap
))
1284 fibheap_delete (heap
);
1290 " Performing recursive inlining on %s\n",
1293 /* Do the inlining and update list of recursive call during process. */
1294 while (!fibheap_empty (heap
))
1296 struct cgraph_edge
*curr
1297 = (struct cgraph_edge
*) fibheap_extract_min (heap
);
1298 struct cgraph_node
*cnode
, *dest
= curr
->callee
;
1300 if (!can_inline_edge_p (curr
, true))
1303 /* MASTER_CLONE is produced in the case we already started modified
1304 the function. Be sure to redirect edge to the original body before
1305 estimating growths otherwise we will be seeing growths after inlining
1306 the already modified body. */
1309 cgraph_redirect_edge_callee (curr
, master_clone
);
1310 reset_edge_growth_cache (curr
);
1313 if (estimate_size_after_inlining (node
, curr
) > limit
)
1315 cgraph_redirect_edge_callee (curr
, dest
);
1316 reset_edge_growth_cache (curr
);
1321 for (cnode
= curr
->caller
;
1322 cnode
->global
.inlined_to
; cnode
= cnode
->callers
->caller
)
1324 == cgraph_function_or_thunk_node (curr
->callee
, NULL
)->decl
)
1327 if (!want_inline_self_recursive_call_p (curr
, node
, false, depth
))
1329 cgraph_redirect_edge_callee (curr
, dest
);
1330 reset_edge_growth_cache (curr
);
1337 " Inlining call of depth %i", depth
);
1340 fprintf (dump_file
, " called approx. %.2f times per call",
1341 (double)curr
->count
/ node
->count
);
1343 fprintf (dump_file
, "\n");
1347 /* We need original clone to copy around. */
1348 master_clone
= cgraph_clone_node (node
, node
->decl
,
1349 node
->count
, CGRAPH_FREQ_BASE
,
1350 false, vNULL
, true, NULL
);
1351 for (e
= master_clone
->callees
; e
; e
= e
->next_callee
)
1352 if (!e
->inline_failed
)
1353 clone_inlined_nodes (e
, true, false, NULL
);
1354 cgraph_redirect_edge_callee (curr
, master_clone
);
1355 reset_edge_growth_cache (curr
);
1358 inline_call (curr
, false, new_edges
, &overall_size
, true);
1359 lookup_recursive_calls (node
, curr
->callee
, heap
);
1363 if (!fibheap_empty (heap
) && dump_file
)
1364 fprintf (dump_file
, " Recursive inlining growth limit met.\n");
1365 fibheap_delete (heap
);
1372 "\n Inlined %i times, "
1373 "body grown from size %i to %i, time %i to %i\n", n
,
1374 inline_summary (master_clone
)->size
, inline_summary (node
)->size
,
1375 inline_summary (master_clone
)->time
, inline_summary (node
)->time
);
1377 /* Remove master clone we used for inlining. We rely that clones inlined
1378 into master clone gets queued just before master clone so we don't
1380 for (node
= cgraph_first_function (); node
!= master_clone
;
1383 next
= cgraph_next_function (node
);
1384 if (node
->global
.inlined_to
== master_clone
)
1385 cgraph_remove_node (node
);
1387 cgraph_remove_node (master_clone
);
1392 /* Given whole compilation unit estimate of INSNS, compute how large we can
1393 allow the unit to grow. */
1396 compute_max_insns (int insns
)
1398 int max_insns
= insns
;
1399 if (max_insns
< PARAM_VALUE (PARAM_LARGE_UNIT_INSNS
))
1400 max_insns
= PARAM_VALUE (PARAM_LARGE_UNIT_INSNS
);
1402 return ((HOST_WIDEST_INT
) max_insns
1403 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH
)) / 100);
1407 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1410 add_new_edges_to_heap (fibheap_t heap
, vec
<cgraph_edge_p
> new_edges
)
1412 while (new_edges
.length () > 0)
1414 struct cgraph_edge
*edge
= new_edges
.pop ();
1416 gcc_assert (!edge
->aux
);
1417 if (edge
->inline_failed
1418 && can_inline_edge_p (edge
, true)
1419 && want_inline_small_function_p (edge
, true))
1420 edge
->aux
= fibheap_insert (heap
, edge_badness (edge
, false), edge
);
1424 /* Remove EDGE from the fibheap. */
1427 heap_edge_removal_hook (struct cgraph_edge
*e
, void *data
)
1430 reset_node_growth_cache (e
->callee
);
1433 fibheap_delete_node ((fibheap_t
)data
, (fibnode_t
)e
->aux
);
1438 /* Return true if speculation of edge E seems useful.
1439 If ANTICIPATE_INLINING is true, be conservative and hope that E
1443 speculation_useful_p (struct cgraph_edge
*e
, bool anticipate_inlining
)
1445 enum availability avail
;
1446 struct cgraph_node
*target
= cgraph_function_or_thunk_node (e
->callee
, &avail
);
1447 struct cgraph_edge
*direct
, *indirect
;
1448 struct ipa_ref
*ref
;
1450 gcc_assert (e
->speculative
&& !e
->indirect_unknown_callee
);
1452 if (!cgraph_maybe_hot_edge_p (e
))
1455 /* See if IP optimizations found something potentially useful about the
1456 function. For now we look only for CONST/PURE flags. Almost everything
1457 else we propagate is useless. */
1458 if (avail
>= AVAIL_AVAILABLE
)
1460 int ecf_flags
= flags_from_decl_or_type (target
->decl
);
1461 if (ecf_flags
& ECF_CONST
)
1463 cgraph_speculative_call_info (e
, direct
, indirect
, ref
);
1464 if (!(indirect
->indirect_info
->ecf_flags
& ECF_CONST
))
1467 else if (ecf_flags
& ECF_PURE
)
1469 cgraph_speculative_call_info (e
, direct
, indirect
, ref
);
1470 if (!(indirect
->indirect_info
->ecf_flags
& ECF_PURE
))
1474 /* If we did not managed to inline the function nor redirect
1475 to an ipa-cp clone (that are seen by having local flag set),
1476 it is probably pointless to inline it unless hardware is missing
1477 indirect call predictor. */
1478 if (!anticipate_inlining
&& e
->inline_failed
&& !target
->local
.local
)
1480 /* For overwritable targets there is not much to do. */
1481 if (e
->inline_failed
&& !can_inline_edge_p (e
, false, true))
1483 /* OK, speculation seems interesting. */
1487 /* We know that EDGE is not going to be inlined.
1488 See if we can remove speculation. */
1491 resolve_noninline_speculation (fibheap_t edge_heap
, struct cgraph_edge
*edge
)
1493 if (edge
->speculative
&& !speculation_useful_p (edge
, false))
1495 struct cgraph_node
*node
= edge
->caller
;
1496 struct cgraph_node
*where
= node
->global
.inlined_to
1497 ? node
->global
.inlined_to
: node
;
1498 bitmap updated_nodes
= BITMAP_ALLOC (NULL
);
1500 cgraph_resolve_speculation (edge
, NULL
);
1501 reset_edge_caches (where
);
1502 inline_update_overall_summary (where
);
1503 update_caller_keys (edge_heap
, where
,
1504 updated_nodes
, NULL
);
1505 update_callee_keys (edge_heap
, where
,
1507 BITMAP_FREE (updated_nodes
);
1511 /* We use greedy algorithm for inlining of small functions:
1512 All inline candidates are put into prioritized heap ordered in
1515 The inlining of small functions is bounded by unit growth parameters. */
1518 inline_small_functions (void)
1520 struct cgraph_node
*node
;
1521 struct cgraph_edge
*edge
;
1522 fibheap_t edge_heap
= fibheap_new ();
1523 bitmap updated_nodes
= BITMAP_ALLOC (NULL
);
1524 int min_size
, max_size
;
1525 vec
<cgraph_edge_p
> new_indirect_edges
= vNULL
;
1526 int initial_size
= 0;
1527 struct cgraph_node
**order
= XCNEWVEC (struct cgraph_node
*, cgraph_n_nodes
);
1528 struct cgraph_edge_hook_list
*edge_removal_hook_holder
;
1530 if (flag_indirect_inlining
)
1531 new_indirect_edges
.create (8);
1533 edge_removal_hook_holder
1534 = cgraph_add_edge_removal_hook (&heap_edge_removal_hook
, edge_heap
);
1536 /* Compute overall unit size and other global parameters used by badness
1540 ipa_reduced_postorder (order
, true, true, NULL
);
1543 FOR_EACH_DEFINED_FUNCTION (node
)
1544 if (!node
->global
.inlined_to
)
1546 if (cgraph_function_with_gimple_body_p (node
)
1547 || node
->thunk
.thunk_p
)
1549 struct inline_summary
*info
= inline_summary (node
);
1550 struct ipa_dfs_info
*dfs
= (struct ipa_dfs_info
*) node
->aux
;
1552 if (!DECL_EXTERNAL (node
->decl
))
1553 initial_size
+= info
->size
;
1554 info
->growth
= estimate_growth (node
);
1555 if (dfs
&& dfs
->next_cycle
)
1557 struct cgraph_node
*n2
;
1558 int id
= dfs
->scc_no
+ 1;
1560 n2
= ((struct ipa_dfs_info
*) node
->aux
)->next_cycle
)
1562 struct inline_summary
*info2
= inline_summary (n2
);
1570 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
1571 if (max_count
< edge
->count
)
1572 max_count
= edge
->count
;
1574 sreal_init (&max_count_real
, max_count
, 0);
1575 sreal_init (&max_relbenefit_real
, RELATIVE_TIME_BENEFIT_RANGE
, 0);
1576 sreal_init (&half_int_min_real
, INT_MAX
/ 2, 0);
1577 ipa_free_postorder_info ();
1578 initialize_growth_caches ();
1582 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1585 overall_size
= initial_size
;
1586 max_size
= compute_max_insns (overall_size
);
1587 min_size
= overall_size
;
1589 /* Populate the heeap with all edges we might inline. */
1591 FOR_EACH_DEFINED_FUNCTION (node
)
1593 bool update
= false;
1594 struct cgraph_edge
*next
;
1597 fprintf (dump_file
, "Enqueueing calls in %s/%i.\n",
1598 node
->name (), node
->order
);
1600 for (edge
= node
->callees
; edge
; edge
= next
)
1602 next
= edge
->next_callee
;
1603 if (edge
->inline_failed
1605 && can_inline_edge_p (edge
, true)
1606 && want_inline_small_function_p (edge
, true)
1607 && edge
->inline_failed
)
1609 gcc_assert (!edge
->aux
);
1610 update_edge_key (edge_heap
, edge
);
1612 if (edge
->speculative
&& !speculation_useful_p (edge
, edge
->aux
!= NULL
))
1614 cgraph_resolve_speculation (edge
, NULL
);
1620 struct cgraph_node
*where
= node
->global
.inlined_to
1621 ? node
->global
.inlined_to
: node
;
1622 inline_update_overall_summary (where
);
1623 reset_node_growth_cache (where
);
1624 reset_edge_caches (where
);
1625 update_caller_keys (edge_heap
, where
,
1626 updated_nodes
, NULL
);
1627 bitmap_clear (updated_nodes
);
1631 gcc_assert (in_lto_p
1633 || (profile_info
&& flag_branch_probabilities
));
1635 while (!fibheap_empty (edge_heap
))
1637 int old_size
= overall_size
;
1638 struct cgraph_node
*where
, *callee
;
1639 int badness
= fibheap_min_key (edge_heap
);
1640 int current_badness
;
1644 edge
= (struct cgraph_edge
*) fibheap_extract_min (edge_heap
);
1645 gcc_assert (edge
->aux
);
1647 if (!edge
->inline_failed
)
1650 /* Be sure that caches are maintained consistent.
1651 We can not make this ENABLE_CHECKING only because it cause different
1652 updates of the fibheap queue. */
1653 cached_badness
= edge_badness (edge
, false);
1654 reset_edge_growth_cache (edge
);
1655 reset_node_growth_cache (edge
->callee
);
1657 /* When updating the edge costs, we only decrease badness in the keys.
1658 Increases of badness are handled lazilly; when we see key with out
1659 of date value on it, we re-insert it now. */
1660 current_badness
= edge_badness (edge
, false);
1661 gcc_assert (cached_badness
== current_badness
);
1662 gcc_assert (current_badness
>= badness
);
1663 if (current_badness
!= badness
)
1665 edge
->aux
= fibheap_insert (edge_heap
, current_badness
, edge
);
1669 if (!can_inline_edge_p (edge
, true))
1671 resolve_noninline_speculation (edge_heap
, edge
);
1675 callee
= cgraph_function_or_thunk_node (edge
->callee
, NULL
);
1676 growth
= estimate_edge_growth (edge
);
1680 "\nConsidering %s/%i with %i size\n",
1681 callee
->name (), callee
->order
,
1682 inline_summary (callee
)->size
);
1684 " to be inlined into %s/%i in %s:%i\n"
1685 " Estimated growth after inlined into all is %+i insns.\n"
1686 " Estimated badness is %i, frequency %.2f.\n",
1687 edge
->caller
->name (), edge
->caller
->order
,
1688 flag_wpa
? "unknown"
1689 : gimple_filename ((const_gimple
) edge
->call_stmt
),
1691 : gimple_lineno ((const_gimple
) edge
->call_stmt
),
1692 estimate_growth (callee
),
1694 edge
->frequency
/ (double)CGRAPH_FREQ_BASE
);
1696 fprintf (dump_file
," Called "HOST_WIDEST_INT_PRINT_DEC
"x\n",
1698 if (dump_flags
& TDF_DETAILS
)
1699 edge_badness (edge
, true);
1702 if (overall_size
+ growth
> max_size
1703 && !DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
1705 edge
->inline_failed
= CIF_INLINE_UNIT_GROWTH_LIMIT
;
1706 report_inline_failed_reason (edge
);
1707 resolve_noninline_speculation (edge_heap
, edge
);
1711 if (!want_inline_small_function_p (edge
, true))
1713 resolve_noninline_speculation (edge_heap
, edge
);
1717 /* Heuristics for inlining small functions works poorly for
1718 recursive calls where we do efect similar to loop unrolling.
1719 When inliing such edge seems profitable, leave decision on
1720 specific inliner. */
1721 if (cgraph_edge_recursive_p (edge
))
1723 where
= edge
->caller
;
1724 if (where
->global
.inlined_to
)
1725 where
= where
->global
.inlined_to
;
1726 if (!recursive_inlining (edge
,
1727 flag_indirect_inlining
1728 ? &new_indirect_edges
: NULL
))
1730 edge
->inline_failed
= CIF_RECURSIVE_INLINING
;
1731 resolve_noninline_speculation (edge_heap
, edge
);
1734 reset_edge_caches (where
);
1735 /* Recursive inliner inlines all recursive calls of the function
1736 at once. Consequently we need to update all callee keys. */
1737 if (flag_indirect_inlining
)
1738 add_new_edges_to_heap (edge_heap
, new_indirect_edges
);
1739 update_callee_keys (edge_heap
, where
, updated_nodes
);
1740 bitmap_clear (updated_nodes
);
1744 struct cgraph_node
*outer_node
= NULL
;
1747 /* Consider the case where self recursive function A is inlined into B.
1748 This is desired optimization in some cases, since it leads to effect
1749 similar of loop peeling and we might completely optimize out the
1750 recursive call. However we must be extra selective. */
1752 where
= edge
->caller
;
1753 while (where
->global
.inlined_to
)
1755 if (where
->decl
== callee
->decl
)
1756 outer_node
= where
, depth
++;
1757 where
= where
->callers
->caller
;
1760 && !want_inline_self_recursive_call_p (edge
, outer_node
,
1764 = (DECL_DISREGARD_INLINE_LIMITS (edge
->callee
->decl
)
1765 ? CIF_RECURSIVE_INLINING
: CIF_UNSPECIFIED
);
1766 resolve_noninline_speculation (edge_heap
, edge
);
1769 else if (depth
&& dump_file
)
1770 fprintf (dump_file
, " Peeling recursion with depth %i\n", depth
);
1772 gcc_checking_assert (!callee
->global
.inlined_to
);
1773 inline_call (edge
, true, &new_indirect_edges
, &overall_size
, true);
1774 if (flag_indirect_inlining
)
1775 add_new_edges_to_heap (edge_heap
, new_indirect_edges
);
1777 reset_edge_caches (edge
->callee
);
1778 reset_node_growth_cache (callee
);
1780 update_callee_keys (edge_heap
, where
, updated_nodes
);
1782 where
= edge
->caller
;
1783 if (where
->global
.inlined_to
)
1784 where
= where
->global
.inlined_to
;
1786 /* Our profitability metric can depend on local properties
1787 such as number of inlinable calls and size of the function body.
1788 After inlining these properties might change for the function we
1789 inlined into (since it's body size changed) and for the functions
1790 called by function we inlined (since number of it inlinable callers
1792 update_caller_keys (edge_heap
, where
, updated_nodes
, NULL
);
1793 bitmap_clear (updated_nodes
);
1798 " Inlined into %s which now has time %i and size %i,"
1799 "net change of %+i.\n",
1800 edge
->caller
->name (),
1801 inline_summary (edge
->caller
)->time
,
1802 inline_summary (edge
->caller
)->size
,
1803 overall_size
- old_size
);
1805 if (min_size
> overall_size
)
1807 min_size
= overall_size
;
1808 max_size
= compute_max_insns (min_size
);
1811 fprintf (dump_file
, "New minimal size reached: %i\n", min_size
);
1815 free_growth_caches ();
1816 new_indirect_edges
.release ();
1817 fibheap_delete (edge_heap
);
1820 "Unit growth for small function inlining: %i->%i (%i%%)\n",
1821 initial_size
, overall_size
,
1822 initial_size
? overall_size
* 100 / (initial_size
) - 100: 0);
1823 BITMAP_FREE (updated_nodes
);
1824 cgraph_remove_edge_removal_hook (edge_removal_hook_holder
);
1827 /* Flatten NODE. Performed both during early inlining and
1828 at IPA inlining time. */
1831 flatten_function (struct cgraph_node
*node
, bool early
)
1833 struct cgraph_edge
*e
;
1835 /* We shouldn't be called recursively when we are being processed. */
1836 gcc_assert (node
->aux
== NULL
);
1838 node
->aux
= (void *) node
;
1840 for (e
= node
->callees
; e
; e
= e
->next_callee
)
1842 struct cgraph_node
*orig_callee
;
1843 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
1845 /* We've hit cycle? It is time to give up. */
1850 "Not inlining %s into %s to avoid cycle.\n",
1851 xstrdup (callee
->name ()),
1852 xstrdup (e
->caller
->name ()));
1853 e
->inline_failed
= CIF_RECURSIVE_INLINING
;
1857 /* When the edge is already inlined, we just need to recurse into
1858 it in order to fully flatten the leaves. */
1859 if (!e
->inline_failed
)
1861 flatten_function (callee
, early
);
1865 /* Flatten attribute needs to be processed during late inlining. For
1866 extra code quality we however do flattening during early optimization,
1869 ? !can_inline_edge_p (e
, true)
1870 : !can_early_inline_edge_p (e
))
1873 if (cgraph_edge_recursive_p (e
))
1876 fprintf (dump_file
, "Not inlining: recursive call.\n");
1880 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node
->decl
))
1881 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee
->decl
)))
1884 fprintf (dump_file
, "Not inlining: SSA form does not match.\n");
1888 /* Inline the edge and flatten the inline clone. Avoid
1889 recursing through the original node if the node was cloned. */
1891 fprintf (dump_file
, " Inlining %s into %s.\n",
1892 xstrdup (callee
->name ()),
1893 xstrdup (e
->caller
->name ()));
1894 orig_callee
= callee
;
1895 inline_call (e
, true, NULL
, NULL
, false);
1896 if (e
->callee
!= orig_callee
)
1897 orig_callee
->aux
= (void *) node
;
1898 flatten_function (e
->callee
, early
);
1899 if (e
->callee
!= orig_callee
)
1900 orig_callee
->aux
= NULL
;
1904 if (!node
->global
.inlined_to
)
1905 inline_update_overall_summary (node
);
1908 /* Count number of callers of NODE and store it into DATA (that
1909 points to int. Worker for cgraph_for_node_and_aliases. */
1912 sum_callers (struct cgraph_node
*node
, void *data
)
1914 struct cgraph_edge
*e
;
1915 int *num_calls
= (int *)data
;
1917 for (e
= node
->callers
; e
; e
= e
->next_caller
)
1922 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
1923 DATA points to number of calls originally found so we avoid infinite
1927 inline_to_all_callers (struct cgraph_node
*node
, void *data
)
1929 int *num_calls
= (int *)data
;
1930 while (node
->callers
&& !node
->global
.inlined_to
)
1932 struct cgraph_node
*caller
= node
->callers
->caller
;
1937 "\nInlining %s size %i.\n",
1939 inline_summary (node
)->size
);
1941 " Called once from %s %i insns.\n",
1942 node
->callers
->caller
->name (),
1943 inline_summary (node
->callers
->caller
)->size
);
1946 inline_call (node
->callers
, true, NULL
, NULL
, true);
1949 " Inlined into %s which now has %i size\n",
1951 inline_summary (caller
)->size
);
1952 if (!(*num_calls
)--)
1955 fprintf (dump_file
, "New calls found; giving up.\n");
1962 /* Decide on the inlining. We do so in the topological order to avoid
1963 expenses on updating data structures. */
1968 struct cgraph_node
*node
;
1970 struct cgraph_node
**order
;
1973 bool remove_functions
= false;
1978 order
= XCNEWVEC (struct cgraph_node
*, cgraph_n_nodes
);
1980 if (in_lto_p
&& optimize
)
1981 ipa_update_after_lto_read ();
1984 dump_inline_summaries (dump_file
);
1986 nnodes
= ipa_reverse_postorder (order
);
1988 FOR_EACH_FUNCTION (node
)
1992 fprintf (dump_file
, "\nFlattening functions:\n");
1994 /* In the first pass handle functions to be flattened. Do this with
1995 a priority so none of our later choices will make this impossible. */
1996 for (i
= nnodes
- 1; i
>= 0; i
--)
2000 /* Handle nodes to be flattened.
2001 Ideally when processing callees we stop inlining at the
2002 entry of cycles, possibly cloning that entry point and
2003 try to flatten itself turning it into a self-recursive
2005 if (lookup_attribute ("flatten",
2006 DECL_ATTRIBUTES (node
->decl
)) != NULL
)
2010 "Flattening %s\n", node
->name ());
2011 flatten_function (node
, false);
2015 inline_small_functions ();
2017 /* Do first after-inlining removal. We want to remove all "stale" extern inline
2018 functions and virtual functions so we really know what is called once. */
2019 symtab_remove_unreachable_nodes (false, dump_file
);
2022 /* Inline functions with a property that after inlining into all callers the
2023 code size will shrink because the out-of-line copy is eliminated.
2024 We do this regardless on the callee size as long as function growth limits
2028 "\nDeciding on functions to be inlined into all callers and removing useless speculations:\n");
2030 /* Inlining one function called once has good chance of preventing
2031 inlining other function into the same callee. Ideally we should
2032 work in priority order, but probably inlining hot functions first
2033 is good cut without the extra pain of maintaining the queue.
2035 ??? this is not really fitting the bill perfectly: inlining function
2036 into callee often leads to better optimization of callee due to
2037 increased context for optimization.
2038 For example if main() function calls a function that outputs help
2039 and then function that does the main optmization, we should inline
2040 the second with priority even if both calls are cold by themselves.
2042 We probably want to implement new predicate replacing our use of
2043 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2045 for (cold
= 0; cold
<= 1; cold
++)
2047 FOR_EACH_DEFINED_FUNCTION (node
)
2049 struct cgraph_edge
*edge
, *next
;
2052 for (edge
= node
->callees
; edge
; edge
= next
)
2054 next
= edge
->next_callee
;
2055 if (edge
->speculative
&& !speculation_useful_p (edge
, false))
2057 cgraph_resolve_speculation (edge
, NULL
);
2059 remove_functions
= true;
2064 struct cgraph_node
*where
= node
->global
.inlined_to
2065 ? node
->global
.inlined_to
: node
;
2066 reset_node_growth_cache (where
);
2067 reset_edge_caches (where
);
2068 inline_update_overall_summary (where
);
2070 if (flag_inline_functions_called_once
2071 && want_inline_function_to_all_callers_p (node
, cold
))
2074 cgraph_for_node_and_aliases (node
, sum_callers
,
2076 cgraph_for_node_and_aliases (node
, inline_to_all_callers
,
2078 remove_functions
= true;
2083 /* Free ipa-prop structures if they are no longer needed. */
2085 ipa_free_all_structures_after_iinln ();
2089 "\nInlined %i calls, eliminated %i functions\n\n",
2090 ncalls_inlined
, nfunctions_inlined
);
2093 dump_inline_summaries (dump_file
);
2094 /* In WPA we use inline summaries for partitioning process. */
2096 inline_free_summary ();
2097 return remove_functions
? TODO_remove_functions
: 0;
2100 /* Inline always-inline function calls in NODE. */
2103 inline_always_inline_functions (struct cgraph_node
*node
)
2105 struct cgraph_edge
*e
;
2106 bool inlined
= false;
2108 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2110 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
2111 if (!DECL_DISREGARD_INLINE_LIMITS (callee
->decl
))
2114 if (cgraph_edge_recursive_p (e
))
2117 fprintf (dump_file
, " Not inlining recursive call to %s.\n",
2118 e
->callee
->name ());
2119 e
->inline_failed
= CIF_RECURSIVE_INLINING
;
2123 if (!can_early_inline_edge_p (e
))
2125 /* Set inlined to true if the callee is marked "always_inline" but
2126 is not inlinable. This will allow flagging an error later in
2127 expand_call_inline in tree-inline.c. */
2128 if (lookup_attribute ("always_inline",
2129 DECL_ATTRIBUTES (callee
->decl
)) != NULL
)
2135 fprintf (dump_file
, " Inlining %s into %s (always_inline).\n",
2136 xstrdup (e
->callee
->name ()),
2137 xstrdup (e
->caller
->name ()));
2138 inline_call (e
, true, NULL
, NULL
, false);
2142 inline_update_overall_summary (node
);
2147 /* Decide on the inlining. We do so in the topological order to avoid
2148 expenses on updating data structures. */
2151 early_inline_small_functions (struct cgraph_node
*node
)
2153 struct cgraph_edge
*e
;
2154 bool inlined
= false;
2156 for (e
= node
->callees
; e
; e
= e
->next_callee
)
2158 struct cgraph_node
*callee
= cgraph_function_or_thunk_node (e
->callee
, NULL
);
2159 if (!inline_summary (callee
)->inlinable
2160 || !e
->inline_failed
)
2163 /* Do not consider functions not declared inline. */
2164 if (!DECL_DECLARED_INLINE_P (callee
->decl
)
2165 && !flag_inline_small_functions
2166 && !flag_inline_functions
)
2170 fprintf (dump_file
, "Considering inline candidate %s.\n",
2173 if (!can_early_inline_edge_p (e
))
2176 if (cgraph_edge_recursive_p (e
))
2179 fprintf (dump_file
, " Not inlining: recursive call.\n");
2183 if (!want_early_inline_function_p (e
))
2187 fprintf (dump_file
, " Inlining %s into %s.\n",
2188 xstrdup (callee
->name ()),
2189 xstrdup (e
->caller
->name ()));
2190 inline_call (e
, true, NULL
, NULL
, true);
2197 /* Do inlining of small functions. Doing so early helps profiling and other
2198 passes to be somewhat more effective and avoids some code duplication in
2199 later real inlining pass for testcases with very many function calls. */
2201 early_inliner (void)
2203 struct cgraph_node
*node
= cgraph_get_node (current_function_decl
);
2204 struct cgraph_edge
*edge
;
2205 unsigned int todo
= 0;
2207 bool inlined
= false;
2212 /* Do nothing if datastructures for ipa-inliner are already computed. This
2213 happens when some pass decides to construct new function and
2214 cgraph_add_new_function calls lowering passes and early optimization on
2215 it. This may confuse ourself when early inliner decide to inline call to
2216 function clone, because function clones don't have parameter list in
2217 ipa-prop matching their signature. */
2218 if (ipa_node_params_vector
.exists ())
2221 #ifdef ENABLE_CHECKING
2222 verify_cgraph_node (node
);
2224 ipa_remove_all_references (&node
->ref_list
);
2226 /* Even when not optimizing or not inlining inline always-inline
2228 inlined
= inline_always_inline_functions (node
);
2232 || !flag_early_inlining
2233 /* Never inline regular functions into always-inline functions
2234 during incremental inlining. This sucks as functions calling
2235 always inline functions will get less optimized, but at the
2236 same time inlining of functions calling always inline
2237 function into an always inline function might introduce
2238 cycles of edges to be always inlined in the callgraph.
2240 We might want to be smarter and just avoid this type of inlining. */
2241 || DECL_DISREGARD_INLINE_LIMITS (node
->decl
))
2243 else if (lookup_attribute ("flatten",
2244 DECL_ATTRIBUTES (node
->decl
)) != NULL
)
2246 /* When the function is marked to be flattened, recursively inline
2250 "Flattening %s\n", node
->name ());
2251 flatten_function (node
, true);
2256 /* We iterate incremental inlining to get trivial cases of indirect
2258 while (iterations
< PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS
)
2259 && early_inline_small_functions (node
))
2261 timevar_push (TV_INTEGRATION
);
2262 todo
|= optimize_inline_calls (current_function_decl
);
2264 /* Technically we ought to recompute inline parameters so the new
2265 iteration of early inliner works as expected. We however have
2266 values approximately right and thus we only need to update edge
2267 info that might be cleared out for newly discovered edges. */
2268 for (edge
= node
->callees
; edge
; edge
= edge
->next_callee
)
2270 struct inline_edge_summary
*es
= inline_edge_summary (edge
);
2272 = estimate_num_insns (edge
->call_stmt
, &eni_size_weights
);
2274 = estimate_num_insns (edge
->call_stmt
, &eni_time_weights
);
2275 if (edge
->callee
->decl
2276 && !gimple_check_call_matching_types (
2277 edge
->call_stmt
, edge
->callee
->decl
, false))
2278 edge
->call_stmt_cannot_inline_p
= true;
2280 timevar_pop (TV_INTEGRATION
);
2285 fprintf (dump_file
, "Iterations: %i\n", iterations
);
2290 timevar_push (TV_INTEGRATION
);
2291 todo
|= optimize_inline_calls (current_function_decl
);
2292 timevar_pop (TV_INTEGRATION
);
2295 cfun
->always_inline_functions_inlined
= true;
2302 const pass_data pass_data_early_inline
=
2304 GIMPLE_PASS
, /* type */
2305 "einline", /* name */
2306 OPTGROUP_INLINE
, /* optinfo_flags */
2307 false, /* has_gate */
2308 true, /* has_execute */
2309 TV_EARLY_INLINING
, /* tv_id */
2310 PROP_ssa
, /* properties_required */
2311 0, /* properties_provided */
2312 0, /* properties_destroyed */
2313 0, /* todo_flags_start */
2314 0, /* todo_flags_finish */
2317 class pass_early_inline
: public gimple_opt_pass
2320 pass_early_inline (gcc::context
*ctxt
)
2321 : gimple_opt_pass (pass_data_early_inline
, ctxt
)
2324 /* opt_pass methods: */
2325 unsigned int execute () { return early_inliner (); }
2327 }; // class pass_early_inline
2332 make_pass_early_inline (gcc::context
*ctxt
)
2334 return new pass_early_inline (ctxt
);
2338 /* When to run IPA inlining. Inlining of always-inline functions
2339 happens during early inlining.
2341 Enable inlining unconditoinally, because callgraph redirection
2345 gate_ipa_inline (void)
2352 const pass_data pass_data_ipa_inline
=
2354 IPA_PASS
, /* type */
2355 "inline", /* name */
2356 OPTGROUP_INLINE
, /* optinfo_flags */
2357 true, /* has_gate */
2358 true, /* has_execute */
2359 TV_IPA_INLINING
, /* tv_id */
2360 0, /* properties_required */
2361 0, /* properties_provided */
2362 0, /* properties_destroyed */
2363 TODO_remove_functions
, /* todo_flags_start */
2364 ( TODO_dump_symtab
), /* todo_flags_finish */
2367 class pass_ipa_inline
: public ipa_opt_pass_d
2370 pass_ipa_inline (gcc::context
*ctxt
)
2371 : ipa_opt_pass_d (pass_data_ipa_inline
, ctxt
,
2372 inline_generate_summary
, /* generate_summary */
2373 inline_write_summary
, /* write_summary */
2374 inline_read_summary
, /* read_summary */
2375 NULL
, /* write_optimization_summary */
2376 NULL
, /* read_optimization_summary */
2377 NULL
, /* stmt_fixup */
2378 0, /* function_transform_todo_flags_start */
2379 inline_transform
, /* function_transform */
2380 NULL
) /* variable_transform */
2383 /* opt_pass methods: */
2384 bool gate () { return gate_ipa_inline (); }
2385 unsigned int execute () { return ipa_inline (); }
2387 }; // class pass_ipa_inline
2392 make_pass_ipa_inline (gcc::context
*ctxt
)
2394 return new pass_ipa_inline (ctxt
);