1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 We separate inlining decisions from the inliner itself and store it
24 inside callgraph as so called inline plan. Refer to cgraph.c
25 documentation about particular representation of inline plans in the
28 There are three major parts of this file:
30 cgraph_mark_inline implementation
32 This function allows to mark given call inline and performs necessary
33 modifications of cgraph (production of the clones and updating overall
36 inlining heuristics limits
38 These functions allow to check that particular inlining is allowed
39 by the limits specified by user (allowed function growth, overall unit
44 This is implementation of IPA pass aiming to get as much of benefit
45 from inlining obeying the limits checked above.
47 The implementation of particular heuristics is separated from
48 the rest of code to make it easier to replace it with more complicated
49 implementation in the future. The rest of inlining code acts as a
50 library aimed to modify the callgraph and verify that the parameters
51 on code size growth fits.
53 To mark given call inline, use cgraph_mark_inline function, the
54 verification is performed by cgraph_default_inline_p and
55 cgraph_check_inline_limits.
57 The heuristics implements simple knapsack style algorithm ordering
58 all functions by their "profitability" (estimated by code size growth)
59 and inlining them in priority order.
61 cgraph_decide_inlining implements heuristics taking whole callgraph
62 into account, while cgraph_decide_inlining_incrementally considers
63 only one function at a time and is used by early inliner.
65 The inliner itself is split into several passes:
67 pass_inline_parameters
69 This pass computes local properties of functions that are used by inliner:
70 estimated function body size, whether function is inlinable at all and
71 stack frame consumption.
73 Before executing any of inliner passes, this local pass has to be applied
74 to each function in the callgraph (ie run as subpass of some earlier
75 IPA pass). The results are made out of date by any optimization applied
80 Simple local inlining pass inlining callees into current function. This
81 pass makes no global whole compilation unit analysis and this when allowed
82 to do inlining expanding code size it might result in unbounded growth of
85 The pass is run during conversion into SSA form. Only functions already
86 converted into SSA form are inlined, so the conversion must happen in
87 topological order on the callgraph (that is maintained by pass manager).
88 The functions after inlining are early optimized so the early inliner sees
89 unoptimized function itself, but all considered callees are already
90 optimized allowing it to unfold abstraction penalty on C++ effectively and
93 pass_ipa_early_inlining
95 With profiling, the early inlining is also necessary to reduce
96 instrumentation costs on program with high abstraction penalty (doing
97 many redundant calls). This can't happen in parallel with early
98 optimization and profile instrumentation, because we would end up
99 re-instrumenting already instrumented function bodies we brought in via
102 To avoid this, this pass is executed as IPA pass before profiling. It is
103 simple wrapper to pass_early_inlining and ensures first inlining.
107 This is the main pass implementing simple greedy algorithm to do inlining
108 of small functions that results in overall growth of compilation unit and
109 inlining of functions called once. The pass compute just so called inline
110 plan (representation of inlining to be done in callgraph) and unlike early
111 inlining it is not performing the inlining itself.
115 This pass performs actual inlining according to pass_ipa_inline on given
116 function. Possible the function body before inlining is saved when it is
117 needed for further inlining later.
122 #include "coretypes.h"
125 #include "tree-inline.h"
126 #include "langhooks.h"
129 #include "diagnostic.h"
134 #include "tree-pass.h"
136 #include "coverage.h"
138 #include "tree-flow.h"
140 #include "ipa-prop.h"
142 /* Mode incremental inliner operate on:
144 In ALWAYS_INLINE only functions marked
145 always_inline are inlined. This mode is used after detecting cycle during
148 In SIZE mode, only functions that reduce function body size after inlining
149 are inlined, this is used during early inlining.
151 in ALL mode, everything is inlined. This is used during flattening. */
154 INLINE_ALWAYS_INLINE
,
159 cgraph_decide_inlining_incrementally (struct cgraph_node
*, enum inlining_mode
,
163 /* Statistics we collect about inlining algorithm. */
164 static int ncalls_inlined
;
165 static int nfunctions_inlined
;
166 static int overall_insns
;
167 static gcov_type max_count
;
169 /* Holders of ipa cgraph hooks: */
170 static struct cgraph_node_hook_list
*function_insertion_hook_holder
;
172 static inline struct inline_summary
*
173 inline_summary (struct cgraph_node
*node
)
175 return &node
->local
.inline_summary
;
178 /* Estimate size of the function after inlining WHAT into TO. */
181 cgraph_estimate_size_after_inlining (int times
, struct cgraph_node
*to
,
182 struct cgraph_node
*what
)
185 tree fndecl
= what
->decl
, arg
;
186 int call_insns
= PARAM_VALUE (PARAM_INLINE_CALL_COST
);
188 for (arg
= DECL_ARGUMENTS (fndecl
); arg
; arg
= TREE_CHAIN (arg
))
189 call_insns
+= estimate_move_cost (TREE_TYPE (arg
));
190 size
= (what
->global
.insns
- call_insns
) * times
+ to
->global
.insns
;
191 gcc_assert (size
>= 0);
195 /* E is expected to be an edge being inlined. Clone destination node of
196 the edge and redirect it to the new clone.
197 DUPLICATE is used for bookkeeping on whether we are actually creating new
198 clones or re-using node originally representing out-of-line function call.
201 cgraph_clone_inlined_nodes (struct cgraph_edge
*e
, bool duplicate
,
202 bool update_original
)
208 /* We may eliminate the need for out-of-line copy to be output.
209 In that case just go ahead and re-use it. */
210 if (!e
->callee
->callers
->next_caller
211 && !e
->callee
->needed
212 && !cgraph_new_nodes
)
214 gcc_assert (!e
->callee
->global
.inlined_to
);
215 if (e
->callee
->analyzed
)
216 overall_insns
-= e
->callee
->global
.insns
, nfunctions_inlined
++;
221 struct cgraph_node
*n
;
222 n
= cgraph_clone_node (e
->callee
, e
->count
, e
->frequency
, e
->loop_nest
,
224 cgraph_redirect_edge_callee (e
, n
);
228 if (e
->caller
->global
.inlined_to
)
229 e
->callee
->global
.inlined_to
= e
->caller
->global
.inlined_to
;
231 e
->callee
->global
.inlined_to
= e
->caller
;
232 e
->callee
->global
.stack_frame_offset
233 = e
->caller
->global
.stack_frame_offset
234 + inline_summary (e
->caller
)->estimated_self_stack_size
;
235 peak
= e
->callee
->global
.stack_frame_offset
236 + inline_summary (e
->callee
)->estimated_self_stack_size
;
237 if (e
->callee
->global
.inlined_to
->global
.estimated_stack_size
< peak
)
238 e
->callee
->global
.inlined_to
->global
.estimated_stack_size
= peak
;
240 /* Recursively clone all bodies. */
241 for (e
= e
->callee
->callees
; e
; e
= e
->next_callee
)
242 if (!e
->inline_failed
)
243 cgraph_clone_inlined_nodes (e
, duplicate
, update_original
);
246 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
247 specify whether profile of original function should be updated. If any new
248 indirect edges are discovered in the process, add them to NEW_EDGES, unless
249 it is NULL. Return true iff any new callgraph edges were discovered as a
250 result of inlining. */
253 cgraph_mark_inline_edge (struct cgraph_edge
*e
, bool update_original
,
254 VEC (cgraph_edge_p
, heap
) **new_edges
)
256 int old_insns
= 0, new_insns
= 0;
257 struct cgraph_node
*to
= NULL
, *what
;
258 struct cgraph_edge
*curr
= e
;
260 if (e
->callee
->inline_decl
)
261 cgraph_redirect_edge_callee (e
, cgraph_node (e
->callee
->inline_decl
));
263 gcc_assert (e
->inline_failed
);
264 e
->inline_failed
= CIF_OK
;
266 if (!e
->callee
->global
.inlined
)
267 DECL_POSSIBLY_INLINED (e
->callee
->decl
) = true;
268 e
->callee
->global
.inlined
= true;
270 cgraph_clone_inlined_nodes (e
, true, update_original
);
274 /* Now update size of caller and all functions caller is inlined into. */
275 for (;e
&& !e
->inline_failed
; e
= e
->caller
->callers
)
277 old_insns
= e
->caller
->global
.insns
;
278 new_insns
= cgraph_estimate_size_after_inlining (1, e
->caller
,
280 gcc_assert (new_insns
>= 0);
282 to
->global
.insns
= new_insns
;
284 gcc_assert (what
->global
.inlined_to
== to
);
285 if (new_insns
> old_insns
)
286 overall_insns
+= new_insns
- old_insns
;
289 if (flag_indirect_inlining
)
290 return ipa_propagate_indirect_call_infos (curr
, new_edges
);
295 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
296 Return following unredirected edge in the list of callers
299 static struct cgraph_edge
*
300 cgraph_mark_inline (struct cgraph_edge
*edge
)
302 struct cgraph_node
*to
= edge
->caller
;
303 struct cgraph_node
*what
= edge
->callee
;
304 struct cgraph_edge
*e
, *next
;
306 gcc_assert (!gimple_call_cannot_inline_p (edge
->call_stmt
));
307 /* Look for all calls, mark them inline and clone recursively
308 all inlined functions. */
309 for (e
= what
->callers
; e
; e
= next
)
311 next
= e
->next_caller
;
312 if (e
->caller
== to
&& e
->inline_failed
)
314 cgraph_mark_inline_edge (e
, true, NULL
);
323 /* Estimate the growth caused by inlining NODE into all callees. */
326 cgraph_estimate_growth (struct cgraph_node
*node
)
329 struct cgraph_edge
*e
;
330 bool self_recursive
= false;
332 if (node
->global
.estimated_growth
!= INT_MIN
)
333 return node
->global
.estimated_growth
;
335 for (e
= node
->callers
; e
; e
= e
->next_caller
)
337 if (e
->caller
== node
)
338 self_recursive
= true;
339 if (e
->inline_failed
)
340 growth
+= (cgraph_estimate_size_after_inlining (1, e
->caller
, node
)
341 - e
->caller
->global
.insns
);
344 /* ??? Wrong for non-trivially self recursive functions or cases where
345 we decide to not inline for different reasons, but it is not big deal
346 as in that case we will keep the body around, but we will also avoid
348 if (!node
->needed
&& !DECL_EXTERNAL (node
->decl
) && !self_recursive
)
349 growth
-= node
->global
.insns
;
351 node
->global
.estimated_growth
= growth
;
355 /* Return false when inlining WHAT into TO is not good idea
356 as it would cause too large growth of function bodies.
357 When ONE_ONLY is true, assume that only one call site is going
358 to be inlined, otherwise figure out how many call sites in
359 TO calls WHAT and verify that all can be inlined.
363 cgraph_check_inline_limits (struct cgraph_node
*to
, struct cgraph_node
*what
,
364 cgraph_inline_failed_t
*reason
, bool one_only
)
367 struct cgraph_edge
*e
;
370 HOST_WIDE_INT stack_size_limit
, inlined_stack
;
375 for (e
= to
->callees
; e
; e
= e
->next_callee
)
376 if (e
->callee
== what
)
379 if (to
->global
.inlined_to
)
380 to
= to
->global
.inlined_to
;
382 /* When inlining large function body called once into small function,
383 take the inlined function as base for limiting the growth. */
384 if (inline_summary (to
)->self_insns
> inline_summary(what
)->self_insns
)
385 limit
= inline_summary (to
)->self_insns
;
387 limit
= inline_summary (what
)->self_insns
;
389 limit
+= limit
* PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH
) / 100;
391 /* Check the size after inlining against the function limits. But allow
392 the function to shrink if it went over the limits by forced inlining. */
393 newsize
= cgraph_estimate_size_after_inlining (times
, to
, what
);
394 if (newsize
>= to
->global
.insns
395 && newsize
> PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS
)
399 *reason
= CIF_LARGE_FUNCTION_GROWTH_LIMIT
;
403 stack_size_limit
= inline_summary (to
)->estimated_self_stack_size
;
405 stack_size_limit
+= stack_size_limit
* PARAM_VALUE (PARAM_STACK_FRAME_GROWTH
) / 100;
407 inlined_stack
= (to
->global
.stack_frame_offset
408 + inline_summary (to
)->estimated_self_stack_size
409 + what
->global
.estimated_stack_size
);
410 if (inlined_stack
> stack_size_limit
411 && inlined_stack
> PARAM_VALUE (PARAM_LARGE_STACK_FRAME
))
414 *reason
= CIF_LARGE_STACK_FRAME_GROWTH_LIMIT
;
420 /* Return true when function N is small enough to be inlined. */
423 cgraph_default_inline_p (struct cgraph_node
*n
, cgraph_inline_failed_t
*reason
)
428 decl
= n
->inline_decl
;
429 if (!flag_inline_small_functions
&& !DECL_DECLARED_INLINE_P (decl
))
432 *reason
= CIF_FUNCTION_NOT_INLINE_CANDIDATE
;
436 if (!DECL_STRUCT_FUNCTION (decl
)->cfg
)
439 *reason
= CIF_BODY_NOT_AVAILABLE
;
443 if (DECL_DECLARED_INLINE_P (decl
))
445 if (n
->global
.insns
>= MAX_INLINE_INSNS_SINGLE
)
448 *reason
= CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
;
454 if (n
->global
.insns
>= MAX_INLINE_INSNS_AUTO
)
457 *reason
= CIF_MAX_INLINE_INSNS_AUTO_LIMIT
;
465 /* Return true when inlining WHAT would create recursive inlining.
466 We call recursive inlining all cases where same function appears more than
467 once in the single recursion nest path in the inline graph. */
470 cgraph_recursive_inlining_p (struct cgraph_node
*to
,
471 struct cgraph_node
*what
,
472 cgraph_inline_failed_t
*reason
)
475 if (to
->global
.inlined_to
)
476 recursive
= what
->decl
== to
->global
.inlined_to
->decl
;
478 recursive
= what
->decl
== to
->decl
;
479 /* Marking recursive function inline has sane semantic and thus we should
481 if (recursive
&& reason
)
482 *reason
= (what
->local
.disregard_inline_limits
483 ? CIF_RECURSIVE_INLINING
: CIF_UNSPECIFIED
);
487 /* A cost model driving the inlining heuristics in a way so the edges with
488 smallest badness are inlined first. After each inlining is performed
489 the costs of all caller edges of nodes affected are recomputed so the
490 metrics may accurately depend on values such as number of inlinable callers
491 of the function or function body size. */
494 cgraph_edge_badness (struct cgraph_edge
*edge
)
498 cgraph_estimate_size_after_inlining (1, edge
->caller
, edge
->callee
);
500 growth
-= edge
->caller
->global
.insns
;
502 /* Always prefer inlining saving code size. */
504 badness
= INT_MIN
- growth
;
506 /* When profiling is available, base priorities -(#calls / growth).
507 So we optimize for overall number of "executed" inlined calls. */
509 badness
= ((int)((double)edge
->count
* INT_MIN
/ max_count
)) / growth
;
511 /* When function local profile is available, base priorities on
512 growth / frequency, so we optimize for overall frequency of inlined
513 calls. This is not too accurate since while the call might be frequent
514 within function, the function itself is infrequent.
516 Other objective to optimize for is number of different calls inlined.
517 We add the estimated growth after inlining all functions to bias the
518 priorities slightly in this direction (so fewer times called functions
519 of the same size gets priority). */
520 else if (flag_guess_branch_prob
)
522 int div
= edge
->frequency
* 100 / CGRAPH_FREQ_BASE
;
524 cgraph_estimate_size_after_inlining (1, edge
->caller
, edge
->callee
);
525 growth
-= edge
->caller
->global
.insns
;
526 badness
= growth
* 256;
528 /* Decrease badness if call is nested. */
529 /* Compress the range so we don't overflow. */
531 div
= 256 + ceil_log2 (div
) - 8;
536 badness
+= cgraph_estimate_growth (edge
->callee
);
538 /* When function local profile is not available or it does not give
539 useful information (ie frequency is zero), base the cost on
540 loop nest and overall size growth, so we optimize for overall number
541 of functions fully inlined in program. */
544 int nest
= MIN (edge
->loop_nest
, 8);
545 badness
= cgraph_estimate_growth (edge
->callee
) * 256;
547 /* Decrease badness if call is nested. */
555 /* Make recursive inlining happen always after other inlining is done. */
556 if (cgraph_recursive_inlining_p (edge
->caller
, edge
->callee
, NULL
))
562 /* Recompute heap nodes for each of caller edge. */
565 update_caller_keys (fibheap_t heap
, struct cgraph_node
*node
,
566 bitmap updated_nodes
)
568 struct cgraph_edge
*edge
;
569 cgraph_inline_failed_t failed_reason
;
571 if (!node
->local
.inlinable
|| node
->local
.disregard_inline_limits
572 || node
->global
.inlined_to
)
574 if (bitmap_bit_p (updated_nodes
, node
->uid
))
576 bitmap_set_bit (updated_nodes
, node
->uid
);
577 node
->global
.estimated_growth
= INT_MIN
;
579 if (!node
->local
.inlinable
)
581 /* Prune out edges we won't inline into anymore. */
582 if (!cgraph_default_inline_p (node
, &failed_reason
))
584 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
587 fibheap_delete_node (heap
, (fibnode_t
) edge
->aux
);
589 if (edge
->inline_failed
)
590 edge
->inline_failed
= failed_reason
;
595 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
596 if (edge
->inline_failed
)
598 int badness
= cgraph_edge_badness (edge
);
601 fibnode_t n
= (fibnode_t
) edge
->aux
;
602 gcc_assert (n
->data
== edge
);
603 if (n
->key
== badness
)
606 /* fibheap_replace_key only increase the keys. */
607 if (fibheap_replace_key (heap
, n
, badness
))
609 fibheap_delete_node (heap
, (fibnode_t
) edge
->aux
);
611 edge
->aux
= fibheap_insert (heap
, badness
, edge
);
615 /* Recompute heap nodes for each of caller edges of each of callees. */
618 update_callee_keys (fibheap_t heap
, struct cgraph_node
*node
,
619 bitmap updated_nodes
)
621 struct cgraph_edge
*e
;
622 node
->global
.estimated_growth
= INT_MIN
;
624 for (e
= node
->callees
; e
; e
= e
->next_callee
)
625 if (e
->inline_failed
)
626 update_caller_keys (heap
, e
->callee
, updated_nodes
);
627 else if (!e
->inline_failed
)
628 update_callee_keys (heap
, e
->callee
, updated_nodes
);
631 /* Enqueue all recursive calls from NODE into priority queue depending on
632 how likely we want to recursively inline the call. */
635 lookup_recursive_calls (struct cgraph_node
*node
, struct cgraph_node
*where
,
639 struct cgraph_edge
*e
;
640 for (e
= where
->callees
; e
; e
= e
->next_callee
)
641 if (e
->callee
== node
)
643 /* When profile feedback is available, prioritize by expected number
644 of calls. Without profile feedback we maintain simple queue
645 to order candidates via recursive depths. */
646 fibheap_insert (heap
,
647 !max_count
? priority
++
648 : -(e
->count
/ ((max_count
+ (1<<24) - 1) / (1<<24))),
651 for (e
= where
->callees
; e
; e
= e
->next_callee
)
652 if (!e
->inline_failed
)
653 lookup_recursive_calls (node
, e
->callee
, heap
);
656 /* Decide on recursive inlining: in the case function has recursive calls,
657 inline until body size reaches given argument. If any new indirect edges
658 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
662 cgraph_decide_recursive_inlining (struct cgraph_node
*node
,
663 VEC (cgraph_edge_p
, heap
) **new_edges
)
665 int limit
= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO
);
666 int max_depth
= PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO
);
667 int probability
= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY
);
669 struct cgraph_edge
*e
;
670 struct cgraph_node
*master_clone
, *next
;
674 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node
->decl
))
675 || (!flag_inline_functions
&& !DECL_DECLARED_INLINE_P (node
->decl
)))
678 if (DECL_DECLARED_INLINE_P (node
->decl
))
680 limit
= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE
);
681 max_depth
= PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH
);
684 /* Make sure that function is small enough to be considered for inlining. */
686 || cgraph_estimate_size_after_inlining (1, node
, node
) >= limit
)
688 heap
= fibheap_new ();
689 lookup_recursive_calls (node
, node
, heap
);
690 if (fibheap_empty (heap
))
692 fibheap_delete (heap
);
698 " Performing recursive inlining on %s\n",
699 cgraph_node_name (node
));
701 /* We need original clone to copy around. */
702 master_clone
= cgraph_clone_node (node
, node
->count
, CGRAPH_FREQ_BASE
, 1, false);
703 master_clone
->needed
= true;
704 for (e
= master_clone
->callees
; e
; e
= e
->next_callee
)
705 if (!e
->inline_failed
)
706 cgraph_clone_inlined_nodes (e
, true, false);
708 /* Do the inlining and update list of recursive call during process. */
709 while (!fibheap_empty (heap
)
710 && (cgraph_estimate_size_after_inlining (1, node
, master_clone
)
713 struct cgraph_edge
*curr
714 = (struct cgraph_edge
*) fibheap_extract_min (heap
);
715 struct cgraph_node
*cnode
;
718 for (cnode
= curr
->caller
;
719 cnode
->global
.inlined_to
; cnode
= cnode
->callers
->caller
)
720 if (node
->decl
== curr
->callee
->decl
)
722 if (depth
> max_depth
)
726 " maximal depth reached\n");
732 if (!cgraph_maybe_hot_edge_p (curr
))
735 fprintf (dump_file
, " Not inlining cold call\n");
738 if (curr
->count
* 100 / node
->count
< probability
)
742 " Probability of edge is too small\n");
750 " Inlining call of depth %i", depth
);
753 fprintf (dump_file
, " called approx. %.2f times per call",
754 (double)curr
->count
/ node
->count
);
756 fprintf (dump_file
, "\n");
758 cgraph_redirect_edge_callee (curr
, master_clone
);
759 cgraph_mark_inline_edge (curr
, false, new_edges
);
760 lookup_recursive_calls (node
, curr
->callee
, heap
);
763 if (!fibheap_empty (heap
) && dump_file
)
764 fprintf (dump_file
, " Recursive inlining growth limit met.\n");
766 fibheap_delete (heap
);
769 "\n Inlined %i times, body grown from %i to %i insns\n", n
,
770 master_clone
->global
.insns
, node
->global
.insns
);
772 /* Remove master clone we used for inlining. We rely that clones inlined
773 into master clone gets queued just before master clone so we don't
775 for (node
= cgraph_nodes
; node
!= master_clone
;
779 if (node
->global
.inlined_to
== master_clone
)
780 cgraph_remove_node (node
);
782 cgraph_remove_node (master_clone
);
783 /* FIXME: Recursive inlining actually reduces number of calls of the
784 function. At this place we should probably walk the function and
785 inline clones and compensate the counts accordingly. This probably
786 doesn't matter much in practice. */
790 /* Set inline_failed for all callers of given function to REASON. */
793 cgraph_set_inline_failed (struct cgraph_node
*node
,
794 cgraph_inline_failed_t reason
)
796 struct cgraph_edge
*e
;
799 fprintf (dump_file
, "Inlining failed: %s\n",
800 cgraph_inline_failed_string (reason
));
801 for (e
= node
->callers
; e
; e
= e
->next_caller
)
802 if (e
->inline_failed
)
803 e
->inline_failed
= reason
;
806 /* Given whole compilation unit estimate of INSNS, compute how large we can
807 allow the unit to grow. */
809 compute_max_insns (int insns
)
811 int max_insns
= insns
;
812 if (max_insns
< PARAM_VALUE (PARAM_LARGE_UNIT_INSNS
))
813 max_insns
= PARAM_VALUE (PARAM_LARGE_UNIT_INSNS
);
815 return ((HOST_WIDEST_INT
) max_insns
816 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH
)) / 100);
819 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
821 add_new_edges_to_heap (fibheap_t heap
, VEC (cgraph_edge_p
, heap
) *new_edges
)
823 while (VEC_length (cgraph_edge_p
, new_edges
) > 0)
825 struct cgraph_edge
*edge
= VEC_pop (cgraph_edge_p
, new_edges
);
827 gcc_assert (!edge
->aux
);
828 edge
->aux
= fibheap_insert (heap
, cgraph_edge_badness (edge
), edge
);
833 /* We use greedy algorithm for inlining of small functions:
834 All inline candidates are put into prioritized heap based on estimated
835 growth of the overall number of instructions and then update the estimates.
837 INLINED and INLINED_CALEES are just pointers to arrays large enough
838 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
841 cgraph_decide_inlining_of_small_functions (void)
843 struct cgraph_node
*node
;
844 struct cgraph_edge
*edge
;
845 cgraph_inline_failed_t failed_reason
;
846 fibheap_t heap
= fibheap_new ();
847 bitmap updated_nodes
= BITMAP_ALLOC (NULL
);
848 int min_insns
, max_insns
;
849 VEC (cgraph_edge_p
, heap
) *new_indirect_edges
= NULL
;
851 if (flag_indirect_inlining
)
852 new_indirect_edges
= VEC_alloc (cgraph_edge_p
, heap
, 8);
855 fprintf (dump_file
, "\nDeciding on smaller functions:\n");
857 /* Put all inline candidates into the heap. */
859 for (node
= cgraph_nodes
; node
; node
= node
->next
)
861 if (!node
->local
.inlinable
|| !node
->callers
862 || node
->local
.disregard_inline_limits
)
865 fprintf (dump_file
, "Considering inline candidate %s.\n", cgraph_node_name (node
));
867 node
->global
.estimated_growth
= INT_MIN
;
868 if (!cgraph_default_inline_p (node
, &failed_reason
))
870 cgraph_set_inline_failed (node
, failed_reason
);
874 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
875 if (edge
->inline_failed
)
877 gcc_assert (!edge
->aux
);
878 edge
->aux
= fibheap_insert (heap
, cgraph_edge_badness (edge
), edge
);
882 max_insns
= compute_max_insns (overall_insns
);
883 min_insns
= overall_insns
;
885 while (overall_insns
<= max_insns
886 && (edge
= (struct cgraph_edge
*) fibheap_extract_min (heap
)))
888 int old_insns
= overall_insns
;
889 struct cgraph_node
*where
;
891 cgraph_estimate_size_after_inlining (1, edge
->caller
, edge
->callee
);
892 cgraph_inline_failed_t not_good
= CIF_OK
;
894 growth
-= edge
->caller
->global
.insns
;
899 "\nConsidering %s with %i insns\n",
900 cgraph_node_name (edge
->callee
),
901 edge
->callee
->global
.insns
);
903 " to be inlined into %s\n"
904 " Estimated growth after inlined into all callees is %+i insns.\n"
905 " Estimated badness is %i, frequency %.2f.\n",
906 cgraph_node_name (edge
->caller
),
907 cgraph_estimate_growth (edge
->callee
),
908 cgraph_edge_badness (edge
),
909 edge
->frequency
/ (double)CGRAPH_FREQ_BASE
);
911 fprintf (dump_file
," Called "HOST_WIDEST_INT_PRINT_DEC
"x\n", edge
->count
);
913 gcc_assert (edge
->aux
);
915 if (!edge
->inline_failed
)
918 /* When not having profile info ready we don't weight by any way the
919 position of call in procedure itself. This means if call of
920 function A from function B seems profitable to inline, the recursive
921 call of function A in inline copy of A in B will look profitable too
922 and we end up inlining until reaching maximal function growth. This
923 is not good idea so prohibit the recursive inlining.
925 ??? When the frequencies are taken into account we might not need this
928 We need to be cureful here, in some testcases, e.g. directivec.c in
929 libcpp, we can estimate self recursive function to have negative growth
930 for inlining completely.
934 where
= edge
->caller
;
935 while (where
->global
.inlined_to
)
937 if (where
->decl
== edge
->callee
->decl
)
939 where
= where
->callers
->caller
;
941 if (where
->global
.inlined_to
)
944 = (edge
->callee
->local
.disregard_inline_limits
945 ? CIF_RECURSIVE_INLINING
: CIF_UNSPECIFIED
);
947 fprintf (dump_file
, " inline_failed:Recursive inlining performed only for function itself.\n");
952 if (!cgraph_maybe_hot_edge_p (edge
))
953 not_good
= CIF_UNLIKELY_CALL
;
954 if (!flag_inline_functions
955 && !DECL_DECLARED_INLINE_P (edge
->callee
->decl
))
956 not_good
= CIF_NOT_DECLARED_INLINED
;
957 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge
->caller
->decl
)))
958 not_good
= CIF_OPTIMIZING_FOR_SIZE
;
959 if (not_good
&& growth
> 0 && cgraph_estimate_growth (edge
->callee
) > 0)
961 if (!cgraph_recursive_inlining_p (edge
->caller
, edge
->callee
,
962 &edge
->inline_failed
))
964 edge
->inline_failed
= not_good
;
966 fprintf (dump_file
, " inline_failed:%s.\n",
967 cgraph_inline_failed_string (edge
->inline_failed
));
971 if (!cgraph_default_inline_p (edge
->callee
, &edge
->inline_failed
))
973 if (!cgraph_recursive_inlining_p (edge
->caller
, edge
->callee
,
974 &edge
->inline_failed
))
977 fprintf (dump_file
, " inline_failed:%s.\n",
978 cgraph_inline_failed_string (edge
->inline_failed
));
982 if (!tree_can_inline_p (edge
->caller
->decl
, edge
->callee
->decl
))
984 gimple_call_set_cannot_inline (edge
->call_stmt
, true);
985 edge
->inline_failed
= CIF_TARGET_OPTION_MISMATCH
;
987 fprintf (dump_file
, " inline_failed:%s.\n",
988 cgraph_inline_failed_string (edge
->inline_failed
));
991 if (cgraph_recursive_inlining_p (edge
->caller
, edge
->callee
,
992 &edge
->inline_failed
))
994 where
= edge
->caller
;
995 if (where
->global
.inlined_to
)
996 where
= where
->global
.inlined_to
;
997 if (!cgraph_decide_recursive_inlining (where
,
998 flag_indirect_inlining
999 ? &new_indirect_edges
: NULL
))
1001 if (flag_indirect_inlining
)
1002 add_new_edges_to_heap (heap
, new_indirect_edges
);
1003 update_callee_keys (heap
, where
, updated_nodes
);
1007 struct cgraph_node
*callee
;
1008 if (gimple_call_cannot_inline_p (edge
->call_stmt
)
1009 || !cgraph_check_inline_limits (edge
->caller
, edge
->callee
,
1010 &edge
->inline_failed
, true))
1013 fprintf (dump_file
, " Not inlining into %s:%s.\n",
1014 cgraph_node_name (edge
->caller
),
1015 cgraph_inline_failed_string (edge
->inline_failed
));
1018 callee
= edge
->callee
;
1019 cgraph_mark_inline_edge (edge
, true, &new_indirect_edges
);
1020 if (flag_indirect_inlining
)
1021 add_new_edges_to_heap (heap
, new_indirect_edges
);
1023 update_callee_keys (heap
, callee
, updated_nodes
);
1025 where
= edge
->caller
;
1026 if (where
->global
.inlined_to
)
1027 where
= where
->global
.inlined_to
;
1029 /* Our profitability metric can depend on local properties
1030 such as number of inlinable calls and size of the function body.
1031 After inlining these properties might change for the function we
1032 inlined into (since it's body size changed) and for the functions
1033 called by function we inlined (since number of it inlinable callers
1035 update_caller_keys (heap
, where
, updated_nodes
);
1036 bitmap_clear (updated_nodes
);
1041 " Inlined into %s which now has %i insns,"
1042 "net change of %+i insns.\n",
1043 cgraph_node_name (edge
->caller
),
1044 edge
->caller
->global
.insns
,
1045 overall_insns
- old_insns
);
1047 if (min_insns
> overall_insns
)
1049 min_insns
= overall_insns
;
1050 max_insns
= compute_max_insns (min_insns
);
1053 fprintf (dump_file
, "New minimal insns reached: %i\n", min_insns
);
1056 while ((edge
= (struct cgraph_edge
*) fibheap_extract_min (heap
)) != NULL
)
1058 gcc_assert (edge
->aux
);
1060 if (!edge
->callee
->local
.disregard_inline_limits
&& edge
->inline_failed
1061 && !cgraph_recursive_inlining_p (edge
->caller
, edge
->callee
,
1062 &edge
->inline_failed
))
1063 edge
->inline_failed
= CIF_INLINE_UNIT_GROWTH_LIMIT
;
1066 if (new_indirect_edges
)
1067 VEC_free (cgraph_edge_p
, heap
, new_indirect_edges
);
1068 fibheap_delete (heap
);
1069 BITMAP_FREE (updated_nodes
);
1072 /* Decide on the inlining. We do so in the topological order to avoid
1073 expenses on updating data structures. */
1076 cgraph_decide_inlining (void)
1078 struct cgraph_node
*node
;
1080 struct cgraph_node
**order
=
1081 XCNEWVEC (struct cgraph_node
*, cgraph_n_nodes
);
1084 int initial_insns
= 0;
1085 bool redo_always_inline
= true;
1087 cgraph_remove_function_insertion_hook (function_insertion_hook_holder
);
1090 for (node
= cgraph_nodes
; node
; node
= node
->next
)
1091 if (node
->analyzed
&& (node
->needed
|| node
->reachable
))
1093 struct cgraph_edge
*e
;
1095 initial_insns
+= inline_summary (node
)->self_insns
;
1096 gcc_assert (inline_summary (node
)->self_insns
== node
->global
.insns
);
1097 for (e
= node
->callees
; e
; e
= e
->next_callee
)
1098 if (max_count
< e
->count
)
1099 max_count
= e
->count
;
1101 overall_insns
= initial_insns
;
1102 gcc_assert (!max_count
|| (profile_info
&& flag_branch_probabilities
));
1104 nnodes
= cgraph_postorder (order
);
1108 "\nDeciding on inlining. Starting with %i insns.\n",
1111 for (node
= cgraph_nodes
; node
; node
= node
->next
)
1115 fprintf (dump_file
, "\nInlining always_inline functions:\n");
1117 /* In the first pass mark all always_inline edges. Do this with a priority
1118 so none of our later choices will make this impossible. */
1119 while (redo_always_inline
)
1121 redo_always_inline
= false;
1122 for (i
= nnodes
- 1; i
>= 0; i
--)
1124 struct cgraph_edge
*e
, *next
;
1128 /* Handle nodes to be flattened, but don't update overall unit
1130 if (lookup_attribute ("flatten",
1131 DECL_ATTRIBUTES (node
->decl
)) != NULL
)
1135 "Flattening %s\n", cgraph_node_name (node
));
1136 cgraph_decide_inlining_incrementally (node
, INLINE_ALL
, 0);
1139 if (!node
->local
.disregard_inline_limits
)
1143 "\nConsidering %s %i insns (always inline)\n",
1144 cgraph_node_name (node
), node
->global
.insns
);
1145 old_insns
= overall_insns
;
1146 for (e
= node
->callers
; e
; e
= next
)
1148 next
= e
->next_caller
;
1149 if (!e
->inline_failed
1150 || gimple_call_cannot_inline_p (e
->call_stmt
))
1152 if (cgraph_recursive_inlining_p (e
->caller
, e
->callee
,
1155 if (!tree_can_inline_p (e
->caller
->decl
, e
->callee
->decl
))
1157 gimple_call_set_cannot_inline (e
->call_stmt
, true);
1160 if (cgraph_mark_inline_edge (e
, true, NULL
))
1161 redo_always_inline
= true;
1164 " Inlined into %s which now has %i insns.\n",
1165 cgraph_node_name (e
->caller
),
1166 e
->caller
->global
.insns
);
1168 /* Inlining self recursive function might introduce new calls to
1169 themselves we didn't see in the loop above. Fill in the proper
1170 reason why inline failed. */
1171 for (e
= node
->callers
; e
; e
= e
->next_caller
)
1172 if (e
->inline_failed
)
1173 e
->inline_failed
= CIF_RECURSIVE_INLINING
;
1176 " Inlined for a net change of %+i insns.\n",
1177 overall_insns
- old_insns
);
1181 cgraph_decide_inlining_of_small_functions ();
1183 if (flag_inline_functions_called_once
)
1186 fprintf (dump_file
, "\nDeciding on functions called once:\n");
1188 /* And finally decide what functions are called once. */
1189 for (i
= nnodes
- 1; i
>= 0; i
--)
1194 && !node
->callers
->next_caller
1196 && node
->local
.inlinable
1197 && node
->callers
->inline_failed
1198 && !gimple_call_cannot_inline_p (node
->callers
->call_stmt
)
1199 && !DECL_EXTERNAL (node
->decl
)
1200 && !DECL_COMDAT (node
->decl
))
1205 "\nConsidering %s %i insns.\n",
1206 cgraph_node_name (node
), node
->global
.insns
);
1208 " Called once from %s %i insns.\n",
1209 cgraph_node_name (node
->callers
->caller
),
1210 node
->callers
->caller
->global
.insns
);
1213 old_insns
= overall_insns
;
1215 if (cgraph_check_inline_limits (node
->callers
->caller
, node
,
1218 cgraph_mark_inline (node
->callers
);
1221 " Inlined into %s which now has %i insns"
1222 " for a net change of %+i insns.\n",
1223 cgraph_node_name (node
->callers
->caller
),
1224 node
->callers
->caller
->global
.insns
,
1225 overall_insns
- old_insns
);
1231 " Inline limit reached, not inlined.\n");
1237 /* Free ipa-prop structures if they are no longer needed. */
1238 if (flag_indirect_inlining
)
1239 free_all_ipa_structures_after_iinln ();
1243 "\nInlined %i calls, eliminated %i functions, "
1244 "%i insns turned to %i insns.\n\n",
1245 ncalls_inlined
, nfunctions_inlined
, initial_insns
,
1251 /* Try to inline edge E from incremental inliner. MODE specifies mode
1254 We are detecting cycles by storing mode of inliner into cgraph_node last
1255 time we visited it in the recursion. In general when mode is set, we have
1256 recursive inlining, but as an special case, we want to try harder inline
1257 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1258 flatten, b being always inline. Flattening 'a' will collapse
1259 a->b->c before hitting cycle. To accommodate always inline, we however
1260 need to inline a->b->c->b.
1262 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1263 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1265 try_inline (struct cgraph_edge
*e
, enum inlining_mode mode
, int depth
)
1267 struct cgraph_node
*callee
= e
->callee
;
1268 enum inlining_mode callee_mode
= (enum inlining_mode
) (size_t) callee
->aux
;
1269 bool always_inline
= e
->callee
->local
.disregard_inline_limits
;
1271 /* We've hit cycle? */
1274 /* It is first time we see it and we are not in ALWAY_INLINE only
1275 mode yet. and the function in question is always_inline. */
1276 if (always_inline
&& mode
!= INLINE_ALWAYS_INLINE
)
1280 indent_to (dump_file
, depth
);
1282 "Hit cycle in %s, switching to always inline only.\n",
1283 cgraph_node_name (callee
));
1285 mode
= INLINE_ALWAYS_INLINE
;
1287 /* Otherwise it is time to give up. */
1292 indent_to (dump_file
, depth
);
1294 "Not inlining %s into %s to avoid cycle.\n",
1295 cgraph_node_name (callee
),
1296 cgraph_node_name (e
->caller
));
1298 e
->inline_failed
= (e
->callee
->local
.disregard_inline_limits
1299 ? CIF_RECURSIVE_INLINING
: CIF_UNSPECIFIED
);
1304 callee
->aux
= (void *)(size_t) mode
;
1307 indent_to (dump_file
, depth
);
1308 fprintf (dump_file
, " Inlining %s into %s.\n",
1309 cgraph_node_name (e
->callee
),
1310 cgraph_node_name (e
->caller
));
1312 if (e
->inline_failed
)
1314 cgraph_mark_inline (e
);
1316 /* In order to fully inline always_inline functions, we need to
1317 recurse here, since the inlined functions might not be processed by
1318 incremental inlining at all yet.
1320 Also flattening needs to be done recursively. */
1322 if (mode
== INLINE_ALL
|| always_inline
)
1323 cgraph_decide_inlining_incrementally (e
->callee
, mode
, depth
+ 1);
1325 callee
->aux
= (void *)(size_t) callee_mode
;
1329 /* Decide on the inlining. We do so in the topological order to avoid
1330 expenses on updating data structures.
1331 DEPTH is depth of recursion, used only for debug output. */
1334 cgraph_decide_inlining_incrementally (struct cgraph_node
*node
,
1335 enum inlining_mode mode
,
1338 struct cgraph_edge
*e
;
1339 bool inlined
= false;
1340 cgraph_inline_failed_t failed_reason
;
1341 enum inlining_mode old_mode
;
1343 #ifdef ENABLE_CHECKING
1344 verify_cgraph_node (node
);
1347 old_mode
= (enum inlining_mode
) (size_t)node
->aux
;
1349 if (mode
!= INLINE_ALWAYS_INLINE
1350 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node
->decl
)) != NULL
)
1354 indent_to (dump_file
, depth
);
1355 fprintf (dump_file
, "Flattening %s\n", cgraph_node_name (node
));
1360 node
->aux
= (void *)(size_t) mode
;
1362 /* First of all look for always inline functions. */
1363 for (e
= node
->callees
; e
; e
= e
->next_callee
)
1365 if (!e
->callee
->local
.disregard_inline_limits
1366 && (mode
!= INLINE_ALL
|| !e
->callee
->local
.inlinable
))
1368 if (gimple_call_cannot_inline_p (e
->call_stmt
))
1370 /* When the edge is already inlined, we just need to recurse into
1371 it in order to fully flatten the leaves. */
1372 if (!e
->inline_failed
&& mode
== INLINE_ALL
)
1374 inlined
|= try_inline (e
, mode
, depth
);
1379 indent_to (dump_file
, depth
);
1381 "Considering to always inline inline candidate %s.\n",
1382 cgraph_node_name (e
->callee
));
1384 if (cgraph_recursive_inlining_p (node
, e
->callee
, &e
->inline_failed
))
1388 indent_to (dump_file
, depth
);
1389 fprintf (dump_file
, "Not inlining: recursive call.\n");
1393 if (!tree_can_inline_p (node
->decl
, e
->callee
->decl
))
1395 gimple_call_set_cannot_inline (e
->call_stmt
, true);
1398 indent_to (dump_file
, depth
);
1400 "Not inlining: Target specific option mismatch.\n");
1404 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node
->decl
))
1405 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e
->callee
->decl
)))
1409 indent_to (dump_file
, depth
);
1410 fprintf (dump_file
, "Not inlining: SSA form does not match.\n");
1414 if (!e
->callee
->analyzed
&& !e
->callee
->inline_decl
)
1418 indent_to (dump_file
, depth
);
1420 "Not inlining: Function body no longer available.\n");
1424 inlined
|= try_inline (e
, mode
, depth
);
1427 /* Now do the automatic inlining. */
1428 if (mode
!= INLINE_ALL
&& mode
!= INLINE_ALWAYS_INLINE
)
1429 for (e
= node
->callees
; e
; e
= e
->next_callee
)
1431 if (!e
->callee
->local
.inlinable
1432 || !e
->inline_failed
1433 || e
->callee
->local
.disregard_inline_limits
)
1436 fprintf (dump_file
, "Considering inline candidate %s.\n",
1437 cgraph_node_name (e
->callee
));
1438 if (cgraph_recursive_inlining_p (node
, e
->callee
, &e
->inline_failed
))
1442 indent_to (dump_file
, depth
);
1443 fprintf (dump_file
, "Not inlining: recursive call.\n");
1447 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node
->decl
))
1448 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e
->callee
->decl
)))
1452 indent_to (dump_file
, depth
);
1453 fprintf (dump_file
, "Not inlining: SSA form does not match.\n");
1457 /* When the function body would grow and inlining the function won't
1458 eliminate the need for offline copy of the function, don't inline.
1460 if ((mode
== INLINE_SIZE
1461 || (!flag_inline_functions
1462 && !DECL_DECLARED_INLINE_P (e
->callee
->decl
)))
1463 && (cgraph_estimate_size_after_inlining (1, e
->caller
, e
->callee
)
1464 > e
->caller
->global
.insns
)
1465 && cgraph_estimate_growth (e
->callee
) > 0)
1469 indent_to (dump_file
, depth
);
1471 "Not inlining: code size would grow by %i insns.\n",
1472 cgraph_estimate_size_after_inlining (1, e
->caller
,
1474 - e
->caller
->global
.insns
);
1478 if (!cgraph_check_inline_limits (node
, e
->callee
, &e
->inline_failed
,
1480 || gimple_call_cannot_inline_p (e
->call_stmt
))
1484 indent_to (dump_file
, depth
);
1485 fprintf (dump_file
, "Not inlining: %s.\n",
1486 cgraph_inline_failed_string (e
->inline_failed
));
1490 if (!e
->callee
->analyzed
&& !e
->callee
->inline_decl
)
1494 indent_to (dump_file
, depth
);
1496 "Not inlining: Function body no longer available.\n");
1500 if (!tree_can_inline_p (node
->decl
, e
->callee
->decl
))
1502 gimple_call_set_cannot_inline (e
->call_stmt
, true);
1505 indent_to (dump_file
, depth
);
1507 "Not inlining: Target specific option mismatch.\n");
1511 if (cgraph_default_inline_p (e
->callee
, &failed_reason
))
1512 inlined
|= try_inline (e
, mode
, depth
);
1514 node
->aux
= (void *)(size_t) old_mode
;
1518 /* Because inlining might remove no-longer reachable nodes, we need to
1519 keep the array visible to garbage collector to avoid reading collected
1522 static GTY ((length ("nnodes"))) struct cgraph_node
**order
;
1524 /* Do inlining of small functions. Doing so early helps profiling and other
1525 passes to be somewhat more effective and avoids some code duplication in
1526 later real inlining pass for testcases with very many function calls. */
1528 cgraph_early_inlining (void)
1530 struct cgraph_node
*node
= cgraph_node (current_function_decl
);
1531 unsigned int todo
= 0;
1533 if (sorrycount
|| errorcount
)
1535 if (cgraph_decide_inlining_incrementally (node
, INLINE_SIZE
, 0))
1537 timevar_push (TV_INTEGRATION
);
1538 todo
= optimize_inline_calls (current_function_decl
);
1539 timevar_pop (TV_INTEGRATION
);
1541 cfun
->always_inline_functions_inlined
= true;
1545 /* When inlining shall be performed. */
1547 cgraph_gate_early_inlining (void)
1549 return flag_early_inlining
;
1552 struct gimple_opt_pass pass_early_inline
=
1556 "einline", /* name */
1557 cgraph_gate_early_inlining
, /* gate */
1558 cgraph_early_inlining
, /* execute */
1561 0, /* static_pass_number */
1562 TV_INLINE_HEURISTICS
, /* tv_id */
1563 0, /* properties_required */
1564 PROP_cfg
, /* properties_provided */
1565 0, /* properties_destroyed */
1566 0, /* todo_flags_start */
1567 TODO_dump_func
/* todo_flags_finish */
1571 /* When inlining shall be performed. */
1573 cgraph_gate_ipa_early_inlining (void)
1575 return (flag_early_inlining
1576 && (flag_branch_probabilities
|| flag_test_coverage
1577 || profile_arc_flag
));
1580 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1581 before tree profiling so we have stand alone IPA pass for doing so. */
1582 struct simple_ipa_opt_pass pass_ipa_early_inline
=
1586 "einline_ipa", /* name */
1587 cgraph_gate_ipa_early_inlining
, /* gate */
1591 0, /* static_pass_number */
1592 TV_INLINE_HEURISTICS
, /* tv_id */
1593 0, /* properties_required */
1594 PROP_cfg
, /* properties_provided */
1595 0, /* properties_destroyed */
1596 0, /* todo_flags_start */
1597 TODO_dump_cgraph
/* todo_flags_finish */
1601 /* Compute parameters of functions used by inliner. */
1603 compute_inline_parameters (struct cgraph_node
*node
)
1605 HOST_WIDE_INT self_stack_size
;
1607 gcc_assert (!node
->global
.inlined_to
);
1609 /* Estimate the stack size for the function. But not at -O0
1610 because estimated_stack_frame_size is a quadratic problem. */
1611 self_stack_size
= optimize
? estimated_stack_frame_size () : 0;
1612 inline_summary (node
)->estimated_self_stack_size
= self_stack_size
;
1613 node
->global
.estimated_stack_size
= self_stack_size
;
1614 node
->global
.stack_frame_offset
= 0;
1616 /* Can this function be inlined at all? */
1617 node
->local
.inlinable
= tree_inlinable_function_p (current_function_decl
);
1619 /* Estimate the number of instructions for this function.
1620 ??? At -O0 we don't use this information except for the dumps, and
1621 even then only for always_inline functions. But disabling this
1622 causes ICEs in the inline heuristics... */
1623 inline_summary (node
)->self_insns
1624 = estimate_num_insns_fn (current_function_decl
, &eni_inlining_weights
);
1625 if (node
->local
.inlinable
&& !node
->local
.disregard_inline_limits
)
1626 node
->local
.disregard_inline_limits
1627 = DECL_DISREGARD_INLINE_LIMITS (current_function_decl
);
1629 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1630 node
->global
.insns
= inline_summary (node
)->self_insns
;
1635 /* Compute parameters of functions used by inliner using
1636 current_function_decl. */
1638 compute_inline_parameters_for_current (void)
1640 compute_inline_parameters (cgraph_node (current_function_decl
));
1644 struct gimple_opt_pass pass_inline_parameters
=
1650 compute_inline_parameters_for_current
,/* execute */
1653 0, /* static_pass_number */
1654 TV_INLINE_HEURISTICS
, /* tv_id */
1655 0, /* properties_required */
1656 PROP_cfg
, /* properties_provided */
1657 0, /* properties_destroyed */
1658 0, /* todo_flags_start */
1659 0 /* todo_flags_finish */
1663 /* This function performs intraprocedural analyzis in NODE that is required to
1664 inline indirect calls. */
1666 inline_indirect_intraprocedural_analysis (struct cgraph_node
*node
)
1668 struct cgraph_edge
*cs
;
1672 ipa_initialize_node_params (node
);
1673 ipa_detect_param_modifications (node
);
1675 ipa_analyze_params_uses (node
);
1678 for (cs
= node
->callees
; cs
; cs
= cs
->next_callee
)
1680 ipa_count_arguments (cs
);
1681 ipa_compute_jump_functions (cs
);
1686 ipa_print_node_params (dump_file
, node
);
1687 ipa_print_node_jump_functions (dump_file
, node
);
1691 /* Note function body size. */
1693 analyze_function (struct cgraph_node
*node
)
1695 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
1696 current_function_decl
= node
->decl
;
1698 compute_inline_parameters (node
);
1699 if (flag_indirect_inlining
)
1700 inline_indirect_intraprocedural_analysis (node
);
1702 current_function_decl
= NULL
;
1706 /* Called when new function is inserted to callgraph late. */
1708 add_new_function (struct cgraph_node
*node
, void *data ATTRIBUTE_UNUSED
)
1710 analyze_function (node
);
1713 /* Note function body size. */
1715 inline_generate_summary (void)
1717 struct cgraph_node
*node
;
1719 function_insertion_hook_holder
=
1720 cgraph_add_function_insertion_hook (&add_new_function
, NULL
);
1722 if (flag_indirect_inlining
)
1724 ipa_register_cgraph_hooks ();
1725 ipa_check_create_node_params ();
1726 ipa_check_create_edge_args ();
1729 for (node
= cgraph_nodes
; node
; node
= node
->next
)
1731 analyze_function (node
);
1736 /* Apply inline plan to function. */
1738 inline_transform (struct cgraph_node
*node
)
1740 unsigned int todo
= 0;
1741 struct cgraph_edge
*e
;
1743 /* We might need the body of this function so that we can expand
1744 it inline somewhere else. */
1745 if (cgraph_preserve_function_body_p (node
->decl
))
1746 save_inline_function_body (node
);
1748 for (e
= node
->callees
; e
; e
= e
->next_callee
)
1749 if (!e
->inline_failed
|| warn_inline
)
1754 timevar_push (TV_INTEGRATION
);
1755 todo
= optimize_inline_calls (current_function_decl
);
1756 timevar_pop (TV_INTEGRATION
);
1758 cfun
->always_inline_functions_inlined
= true;
1759 cfun
->after_inlining
= true;
1760 return todo
| execute_fixup_cfg ();
1763 struct ipa_opt_pass pass_ipa_inline
=
1767 "inline", /* name */
1769 cgraph_decide_inlining
, /* execute */
1772 0, /* static_pass_number */
1773 TV_INLINE_HEURISTICS
, /* tv_id */
1774 0, /* properties_required */
1775 PROP_cfg
, /* properties_provided */
1776 0, /* properties_destroyed */
1777 TODO_remove_functions
, /* todo_flags_finish */
1778 TODO_dump_cgraph
| TODO_dump_func
1779 | TODO_remove_functions
/* todo_flags_finish */
1781 inline_generate_summary
, /* generate_summary */
1782 NULL
, /* write_summary */
1783 NULL
, /* read_summary */
1784 NULL
, /* function_read_summary */
1786 inline_transform
, /* function_transform */
1787 NULL
, /* variable_transform */
1791 #include "gt-ipa-inline.h"