1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 We separate inlining decisions from the inliner itself and store it
24 inside callgraph as so called inline plan. Refer to cgraph.c
25 documentation about particular representation of inline plans in the
28 There are three major parts of this file:
30 cgraph_mark_inline implementation
32 This function allows to mark given call inline and performs necessary
33 modifications of cgraph (production of the clones and updating overall
36 inlining heuristics limits
38 These functions allow to check that particular inlining is allowed
39 by the limits specified by user (allowed function growth, overall unit
44 This is implementation of IPA pass aiming to get as much of benefit
45 from inlining obeying the limits checked above.
47 The implementation of particular heuristics is separated from
48 the rest of code to make it easier to replace it with more complicated
49 implementation in the future. The rest of inlining code acts as a
50 library aimed to modify the callgraph and verify that the parameters
51 on code size growth fits.
53 To mark given call inline, use cgraph_mark_inline function, the
54 verification is performed by cgraph_default_inline_p and
55 cgraph_check_inline_limits.
57 The heuristics implements simple knapsack style algorithm ordering
58 all functions by their "profitability" (estimated by code size growth)
59 and inlining them in priority order.
61 cgraph_decide_inlining implements heuristics taking whole callgraph
62 into account, while cgraph_decide_inlining_incrementally considers
63 only one function at a time and is used by early inliner.
65 The inliner itself is split into several passes:
67 pass_inline_parameters
69 This pass computes local properties of functions that are used by inliner:
70 estimated function body size, whether function is inlinable at all and
71 stack frame consumption.
73 Before executing any of inliner passes, this local pass has to be applied
74 to each function in the callgraph (ie run as subpass of some earlier
75 IPA pass). The results are made out of date by any optimization applied
80 Simple local inlining pass inlining callees into current function. This
81 pass makes no global whole compilation unit analysis and this when allowed
82 to do inlining expanding code size it might result in unbounded growth of
85 The pass is run during conversion into SSA form. Only functions already
86 converted into SSA form are inlined, so the conversion must happen in
87 topological order on the callgraph (that is maintained by pass manager).
88 The functions after inlining are early optimized so the early inliner sees
89 unoptimized function itself, but all considered callees are already
90 optimized allowing it to unfold abstraction penalty on C++ effectively and
93 pass_ipa_early_inlining
95 With profiling, the early inlining is also necessary to reduce
96 instrumentation costs on program with high abstraction penalty (doing
97 many redundant calls). This can't happen in parallel with early
98 optimization and profile instrumentation, because we would end up
99 re-instrumenting already instrumented function bodies we brought in via
102 To avoid this, this pass is executed as IPA pass before profiling. It is
103 simple wrapper to pass_early_inlining and ensures first inlining.
107 This is the main pass implementing simple greedy algorithm to do inlining
108 of small functions that results in overall growth of compilation unit and
109 inlining of functions called once. The pass compute just so called inline
110 plan (representation of inlining to be done in callgraph) and unlike early
111 inlining it is not performing the inlining itself.
115 This pass performs actual inlining according to pass_ipa_inline on given
116 function. Possible the function body before inlining is saved when it is
117 needed for further inlining later.
122 #include "coretypes.h"
125 #include "tree-inline.h"
126 #include "langhooks.h"
129 #include "diagnostic.h"
134 #include "tree-pass.h"
136 #include "coverage.h"
138 #include "tree-flow.h"
140 #include "ipa-prop.h"
143 #define MAX_TIME 1000000000
145 /* Mode incremental inliner operate on:
147 In ALWAYS_INLINE only functions marked
148 always_inline are inlined. This mode is used after detecting cycle during
151 In SIZE mode, only functions that reduce function body size after inlining
152 are inlined, this is used during early inlining.
154 in ALL mode, everything is inlined. This is used during flattening. */
157 INLINE_ALWAYS_INLINE
,
158 INLINE_SIZE_NORECURSIVE
,
163 cgraph_decide_inlining_incrementally (struct cgraph_node
*, enum inlining_mode
,
167 /* Statistics we collect about inlining algorithm. */
168 static int ncalls_inlined
;
169 static int nfunctions_inlined
;
170 static int overall_size
;
171 static gcov_type max_count
, max_benefit
;
173 /* Holders of ipa cgraph hooks: */
174 static struct cgraph_node_hook_list
*function_insertion_hook_holder
;
176 static inline struct inline_summary
*
177 inline_summary (struct cgraph_node
*node
)
179 return &node
->local
.inline_summary
;
182 /* Estimate self time of the function after inlining WHAT into TO. */
185 cgraph_estimate_time_after_inlining (int frequency
, struct cgraph_node
*to
,
186 struct cgraph_node
*what
)
188 gcov_type time
= (((gcov_type
)what
->global
.time
189 - inline_summary (what
)->time_inlining_benefit
)
190 * frequency
+ CGRAPH_FREQ_BASE
/ 2) / CGRAPH_FREQ_BASE
199 /* Estimate self time of the function after inlining WHAT into TO. */
202 cgraph_estimate_size_after_inlining (int times
, struct cgraph_node
*to
,
203 struct cgraph_node
*what
)
205 int size
= (what
->global
.size
- inline_summary (what
)->size_inlining_benefit
) * times
+ to
->global
.size
;
206 gcc_assert (size
>= 0);
210 /* E is expected to be an edge being inlined. Clone destination node of
211 the edge and redirect it to the new clone.
212 DUPLICATE is used for bookkeeping on whether we are actually creating new
213 clones or re-using node originally representing out-of-line function call.
216 cgraph_clone_inlined_nodes (struct cgraph_edge
*e
, bool duplicate
,
217 bool update_original
)
223 /* We may eliminate the need for out-of-line copy to be output.
224 In that case just go ahead and re-use it. */
225 if (!e
->callee
->callers
->next_caller
226 && !e
->callee
->needed
227 && !cgraph_new_nodes
)
229 gcc_assert (!e
->callee
->global
.inlined_to
);
230 if (e
->callee
->analyzed
)
232 overall_size
-= e
->callee
->global
.size
;
233 nfunctions_inlined
++;
239 struct cgraph_node
*n
;
240 n
= cgraph_clone_node (e
->callee
, e
->count
, e
->frequency
, e
->loop_nest
,
242 cgraph_redirect_edge_callee (e
, n
);
246 if (e
->caller
->global
.inlined_to
)
247 e
->callee
->global
.inlined_to
= e
->caller
->global
.inlined_to
;
249 e
->callee
->global
.inlined_to
= e
->caller
;
250 e
->callee
->global
.stack_frame_offset
251 = e
->caller
->global
.stack_frame_offset
252 + inline_summary (e
->caller
)->estimated_self_stack_size
;
253 peak
= e
->callee
->global
.stack_frame_offset
254 + inline_summary (e
->callee
)->estimated_self_stack_size
;
255 if (e
->callee
->global
.inlined_to
->global
.estimated_stack_size
< peak
)
256 e
->callee
->global
.inlined_to
->global
.estimated_stack_size
= peak
;
258 /* Recursively clone all bodies. */
259 for (e
= e
->callee
->callees
; e
; e
= e
->next_callee
)
260 if (!e
->inline_failed
)
261 cgraph_clone_inlined_nodes (e
, duplicate
, update_original
);
264 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
265 specify whether profile of original function should be updated. If any new
266 indirect edges are discovered in the process, add them to NEW_EDGES, unless
267 it is NULL. Return true iff any new callgraph edges were discovered as a
268 result of inlining. */
271 cgraph_mark_inline_edge (struct cgraph_edge
*e
, bool update_original
,
272 VEC (cgraph_edge_p
, heap
) **new_edges
)
274 int old_size
= 0, new_size
= 0;
275 struct cgraph_node
*to
= NULL
, *what
;
276 struct cgraph_edge
*curr
= e
;
278 bool duplicate
= false;
279 int orig_size
= e
->callee
->global
.size
;
281 gcc_assert (e
->inline_failed
);
282 e
->inline_failed
= CIF_OK
;
284 if (!e
->callee
->global
.inlined
)
285 DECL_POSSIBLY_INLINED (e
->callee
->decl
) = true;
286 e
->callee
->global
.inlined
= true;
288 if (e
->callee
->callers
->next_caller
289 || e
->callee
->needed
)
291 cgraph_clone_inlined_nodes (e
, true, update_original
);
296 /* Now update size of caller and all functions caller is inlined into. */
297 for (;e
&& !e
->inline_failed
; e
= e
->caller
->callers
)
300 old_size
= e
->caller
->global
.size
;
301 new_size
= cgraph_estimate_size_after_inlining (1, to
, what
);
302 to
->global
.size
= new_size
;
303 to
->global
.time
= cgraph_estimate_time_after_inlining (freq
, to
, what
);
305 gcc_assert (what
->global
.inlined_to
== to
);
306 if (new_size
> old_size
)
307 overall_size
+= new_size
- old_size
;
309 overall_size
-= orig_size
;
312 if (flag_indirect_inlining
)
313 return ipa_propagate_indirect_call_infos (curr
, new_edges
);
318 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER.
319 Return following unredirected edge in the list of callers
322 static struct cgraph_edge
*
323 cgraph_mark_inline (struct cgraph_edge
*edge
)
325 struct cgraph_node
*to
= edge
->caller
;
326 struct cgraph_node
*what
= edge
->callee
;
327 struct cgraph_edge
*e
, *next
;
329 gcc_assert (!gimple_call_cannot_inline_p (edge
->call_stmt
));
330 /* Look for all calls, mark them inline and clone recursively
331 all inlined functions. */
332 for (e
= what
->callers
; e
; e
= next
)
334 next
= e
->next_caller
;
335 if (e
->caller
== to
&& e
->inline_failed
)
337 cgraph_mark_inline_edge (e
, true, NULL
);
346 /* Estimate the growth caused by inlining NODE into all callees. */
349 cgraph_estimate_growth (struct cgraph_node
*node
)
352 struct cgraph_edge
*e
;
353 bool self_recursive
= false;
355 if (node
->global
.estimated_growth
!= INT_MIN
)
356 return node
->global
.estimated_growth
;
358 for (e
= node
->callers
; e
; e
= e
->next_caller
)
360 if (e
->caller
== node
)
361 self_recursive
= true;
362 if (e
->inline_failed
)
363 growth
+= (cgraph_estimate_size_after_inlining (1, e
->caller
, node
)
364 - e
->caller
->global
.size
);
367 /* ??? Wrong for non-trivially self recursive functions or cases where
368 we decide to not inline for different reasons, but it is not big deal
369 as in that case we will keep the body around, but we will also avoid
371 if (!node
->needed
&& !DECL_EXTERNAL (node
->decl
) && !self_recursive
)
372 growth
-= node
->global
.size
;
374 node
->global
.estimated_growth
= growth
;
378 /* Return false when inlining WHAT into TO is not good idea
379 as it would cause too large growth of function bodies.
380 When ONE_ONLY is true, assume that only one call site is going
381 to be inlined, otherwise figure out how many call sites in
382 TO calls WHAT and verify that all can be inlined.
386 cgraph_check_inline_limits (struct cgraph_node
*to
, struct cgraph_node
*what
,
387 cgraph_inline_failed_t
*reason
, bool one_only
)
390 struct cgraph_edge
*e
;
393 HOST_WIDE_INT stack_size_limit
, inlined_stack
;
398 for (e
= to
->callees
; e
; e
= e
->next_callee
)
399 if (e
->callee
== what
)
402 if (to
->global
.inlined_to
)
403 to
= to
->global
.inlined_to
;
405 /* When inlining large function body called once into small function,
406 take the inlined function as base for limiting the growth. */
407 if (inline_summary (to
)->self_size
> inline_summary(what
)->self_size
)
408 limit
= inline_summary (to
)->self_size
;
410 limit
= inline_summary (what
)->self_size
;
412 limit
+= limit
* PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH
) / 100;
414 /* Check the size after inlining against the function limits. But allow
415 the function to shrink if it went over the limits by forced inlining. */
416 newsize
= cgraph_estimate_size_after_inlining (times
, to
, what
);
417 if (newsize
>= to
->global
.size
418 && newsize
> PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS
)
422 *reason
= CIF_LARGE_FUNCTION_GROWTH_LIMIT
;
426 stack_size_limit
= inline_summary (to
)->estimated_self_stack_size
;
428 stack_size_limit
+= stack_size_limit
* PARAM_VALUE (PARAM_STACK_FRAME_GROWTH
) / 100;
430 inlined_stack
= (to
->global
.stack_frame_offset
431 + inline_summary (to
)->estimated_self_stack_size
432 + what
->global
.estimated_stack_size
);
433 if (inlined_stack
> stack_size_limit
434 && inlined_stack
> PARAM_VALUE (PARAM_LARGE_STACK_FRAME
))
437 *reason
= CIF_LARGE_STACK_FRAME_GROWTH_LIMIT
;
443 /* Return true when function N is small enough to be inlined. */
446 cgraph_default_inline_p (struct cgraph_node
*n
, cgraph_inline_failed_t
*reason
)
450 if (!flag_inline_small_functions
&& !DECL_DECLARED_INLINE_P (decl
))
453 *reason
= CIF_FUNCTION_NOT_INLINE_CANDIDATE
;
460 *reason
= CIF_BODY_NOT_AVAILABLE
;
464 if (DECL_DECLARED_INLINE_P (decl
))
466 if (n
->global
.size
>= MAX_INLINE_INSNS_SINGLE
)
469 *reason
= CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
;
475 if (n
->global
.size
>= MAX_INLINE_INSNS_AUTO
)
478 *reason
= CIF_MAX_INLINE_INSNS_AUTO_LIMIT
;
486 /* Return true when inlining WHAT would create recursive inlining.
487 We call recursive inlining all cases where same function appears more than
488 once in the single recursion nest path in the inline graph. */
491 cgraph_recursive_inlining_p (struct cgraph_node
*to
,
492 struct cgraph_node
*what
,
493 cgraph_inline_failed_t
*reason
)
496 if (to
->global
.inlined_to
)
497 recursive
= what
->decl
== to
->global
.inlined_to
->decl
;
499 recursive
= what
->decl
== to
->decl
;
500 /* Marking recursive function inline has sane semantic and thus we should
502 if (recursive
&& reason
)
503 *reason
= (what
->local
.disregard_inline_limits
504 ? CIF_RECURSIVE_INLINING
: CIF_UNSPECIFIED
);
508 /* A cost model driving the inlining heuristics in a way so the edges with
509 smallest badness are inlined first. After each inlining is performed
510 the costs of all caller edges of nodes affected are recomputed so the
511 metrics may accurately depend on values such as number of inlinable callers
512 of the function or function body size. */
515 cgraph_edge_badness (struct cgraph_edge
*edge
)
519 cgraph_estimate_size_after_inlining (1, edge
->caller
, edge
->callee
);
521 growth
-= edge
->caller
->global
.size
;
523 /* Always prefer inlining saving code size. */
525 badness
= INT_MIN
- growth
;
527 /* When profiling is available, base priorities -(#calls / growth).
528 So we optimize for overall number of "executed" inlined calls. */
530 badness
= ((int)((double)edge
->count
* INT_MIN
/ max_count
/ (max_benefit
+ 1))
531 * (inline_summary (edge
->callee
)->time_inlining_benefit
+ 1)) / growth
;
533 /* When function local profile is available, base priorities on
534 growth / frequency, so we optimize for overall frequency of inlined
535 calls. This is not too accurate since while the call might be frequent
536 within function, the function itself is infrequent.
538 Other objective to optimize for is number of different calls inlined.
539 We add the estimated growth after inlining all functions to bias the
540 priorities slightly in this direction (so fewer times called functions
541 of the same size gets priority). */
542 else if (flag_guess_branch_prob
)
544 int div
= edge
->frequency
* 100 / CGRAPH_FREQ_BASE
+ 1;
545 badness
= growth
* 10000;
546 div
*= MIN (100 * inline_summary (edge
->callee
)->time_inlining_benefit
547 / (edge
->callee
->global
.time
+ 1) + 1, 100);
550 /* Decrease badness if call is nested. */
551 /* Compress the range so we don't overflow. */
553 div
= 10000 + ceil_log2 (div
) - 8;
558 badness
+= cgraph_estimate_growth (edge
->callee
);
559 if (badness
> INT_MAX
)
562 /* When function local profile is not available or it does not give
563 useful information (ie frequency is zero), base the cost on
564 loop nest and overall size growth, so we optimize for overall number
565 of functions fully inlined in program. */
568 int nest
= MIN (edge
->loop_nest
, 8);
569 badness
= cgraph_estimate_growth (edge
->callee
) * 256;
571 /* Decrease badness if call is nested. */
579 /* Make recursive inlining happen always after other inlining is done. */
580 if (cgraph_recursive_inlining_p (edge
->caller
, edge
->callee
, NULL
))
586 /* Recompute heap nodes for each of caller edge. */
589 update_caller_keys (fibheap_t heap
, struct cgraph_node
*node
,
590 bitmap updated_nodes
)
592 struct cgraph_edge
*edge
;
593 cgraph_inline_failed_t failed_reason
;
595 if (!node
->local
.inlinable
|| node
->local
.disregard_inline_limits
596 || node
->global
.inlined_to
)
598 if (bitmap_bit_p (updated_nodes
, node
->uid
))
600 bitmap_set_bit (updated_nodes
, node
->uid
);
601 node
->global
.estimated_growth
= INT_MIN
;
603 if (!node
->local
.inlinable
)
605 /* Prune out edges we won't inline into anymore. */
606 if (!cgraph_default_inline_p (node
, &failed_reason
))
608 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
611 fibheap_delete_node (heap
, (fibnode_t
) edge
->aux
);
613 if (edge
->inline_failed
)
614 edge
->inline_failed
= failed_reason
;
619 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
620 if (edge
->inline_failed
)
622 int badness
= cgraph_edge_badness (edge
);
625 fibnode_t n
= (fibnode_t
) edge
->aux
;
626 gcc_assert (n
->data
== edge
);
627 if (n
->key
== badness
)
630 /* fibheap_replace_key only increase the keys. */
631 if (fibheap_replace_key (heap
, n
, badness
))
633 fibheap_delete_node (heap
, (fibnode_t
) edge
->aux
);
635 edge
->aux
= fibheap_insert (heap
, badness
, edge
);
639 /* Recompute heap nodes for each of caller edges of each of callees. */
642 update_callee_keys (fibheap_t heap
, struct cgraph_node
*node
,
643 bitmap updated_nodes
)
645 struct cgraph_edge
*e
;
646 node
->global
.estimated_growth
= INT_MIN
;
648 for (e
= node
->callees
; e
; e
= e
->next_callee
)
649 if (e
->inline_failed
)
650 update_caller_keys (heap
, e
->callee
, updated_nodes
);
651 else if (!e
->inline_failed
)
652 update_callee_keys (heap
, e
->callee
, updated_nodes
);
655 /* Enqueue all recursive calls from NODE into priority queue depending on
656 how likely we want to recursively inline the call. */
659 lookup_recursive_calls (struct cgraph_node
*node
, struct cgraph_node
*where
,
663 struct cgraph_edge
*e
;
664 for (e
= where
->callees
; e
; e
= e
->next_callee
)
665 if (e
->callee
== node
)
667 /* When profile feedback is available, prioritize by expected number
668 of calls. Without profile feedback we maintain simple queue
669 to order candidates via recursive depths. */
670 fibheap_insert (heap
,
671 !max_count
? priority
++
672 : -(e
->count
/ ((max_count
+ (1<<24) - 1) / (1<<24))),
675 for (e
= where
->callees
; e
; e
= e
->next_callee
)
676 if (!e
->inline_failed
)
677 lookup_recursive_calls (node
, e
->callee
, heap
);
680 /* Decide on recursive inlining: in the case function has recursive calls,
681 inline until body size reaches given argument. If any new indirect edges
682 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
686 cgraph_decide_recursive_inlining (struct cgraph_node
*node
,
687 VEC (cgraph_edge_p
, heap
) **new_edges
)
689 int limit
= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO
);
690 int max_depth
= PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO
);
691 int probability
= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY
);
693 struct cgraph_edge
*e
;
694 struct cgraph_node
*master_clone
, *next
;
698 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node
->decl
))
699 || (!flag_inline_functions
&& !DECL_DECLARED_INLINE_P (node
->decl
)))
702 if (DECL_DECLARED_INLINE_P (node
->decl
))
704 limit
= PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE
);
705 max_depth
= PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH
);
708 /* Make sure that function is small enough to be considered for inlining. */
710 || cgraph_estimate_size_after_inlining (1, node
, node
) >= limit
)
712 heap
= fibheap_new ();
713 lookup_recursive_calls (node
, node
, heap
);
714 if (fibheap_empty (heap
))
716 fibheap_delete (heap
);
722 " Performing recursive inlining on %s\n",
723 cgraph_node_name (node
));
725 /* We need original clone to copy around. */
726 master_clone
= cgraph_clone_node (node
, node
->count
, CGRAPH_FREQ_BASE
, 1, false);
727 master_clone
->needed
= true;
728 for (e
= master_clone
->callees
; e
; e
= e
->next_callee
)
729 if (!e
->inline_failed
)
730 cgraph_clone_inlined_nodes (e
, true, false);
732 /* Do the inlining and update list of recursive call during process. */
733 while (!fibheap_empty (heap
)
734 && (cgraph_estimate_size_after_inlining (1, node
, master_clone
)
737 struct cgraph_edge
*curr
738 = (struct cgraph_edge
*) fibheap_extract_min (heap
);
739 struct cgraph_node
*cnode
;
742 for (cnode
= curr
->caller
;
743 cnode
->global
.inlined_to
; cnode
= cnode
->callers
->caller
)
744 if (node
->decl
== curr
->callee
->decl
)
746 if (depth
> max_depth
)
750 " maximal depth reached\n");
756 if (!cgraph_maybe_hot_edge_p (curr
))
759 fprintf (dump_file
, " Not inlining cold call\n");
762 if (curr
->count
* 100 / node
->count
< probability
)
766 " Probability of edge is too small\n");
774 " Inlining call of depth %i", depth
);
777 fprintf (dump_file
, " called approx. %.2f times per call",
778 (double)curr
->count
/ node
->count
);
780 fprintf (dump_file
, "\n");
782 cgraph_redirect_edge_callee (curr
, master_clone
);
783 cgraph_mark_inline_edge (curr
, false, new_edges
);
784 lookup_recursive_calls (node
, curr
->callee
, heap
);
787 if (!fibheap_empty (heap
) && dump_file
)
788 fprintf (dump_file
, " Recursive inlining growth limit met.\n");
790 fibheap_delete (heap
);
793 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n
,
794 master_clone
->global
.size
, node
->global
.size
,
795 master_clone
->global
.time
, node
->global
.time
);
797 /* Remove master clone we used for inlining. We rely that clones inlined
798 into master clone gets queued just before master clone so we don't
800 for (node
= cgraph_nodes
; node
!= master_clone
;
804 if (node
->global
.inlined_to
== master_clone
)
805 cgraph_remove_node (node
);
807 cgraph_remove_node (master_clone
);
808 /* FIXME: Recursive inlining actually reduces number of calls of the
809 function. At this place we should probably walk the function and
810 inline clones and compensate the counts accordingly. This probably
811 doesn't matter much in practice. */
815 /* Set inline_failed for all callers of given function to REASON. */
818 cgraph_set_inline_failed (struct cgraph_node
*node
,
819 cgraph_inline_failed_t reason
)
821 struct cgraph_edge
*e
;
824 fprintf (dump_file
, "Inlining failed: %s\n",
825 cgraph_inline_failed_string (reason
));
826 for (e
= node
->callers
; e
; e
= e
->next_caller
)
827 if (e
->inline_failed
)
828 e
->inline_failed
= reason
;
831 /* Given whole compilation unit estimate of INSNS, compute how large we can
832 allow the unit to grow. */
834 compute_max_insns (int insns
)
836 int max_insns
= insns
;
837 if (max_insns
< PARAM_VALUE (PARAM_LARGE_UNIT_INSNS
))
838 max_insns
= PARAM_VALUE (PARAM_LARGE_UNIT_INSNS
);
840 return ((HOST_WIDEST_INT
) max_insns
841 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH
)) / 100);
844 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
846 add_new_edges_to_heap (fibheap_t heap
, VEC (cgraph_edge_p
, heap
) *new_edges
)
848 while (VEC_length (cgraph_edge_p
, new_edges
) > 0)
850 struct cgraph_edge
*edge
= VEC_pop (cgraph_edge_p
, new_edges
);
852 gcc_assert (!edge
->aux
);
853 edge
->aux
= fibheap_insert (heap
, cgraph_edge_badness (edge
), edge
);
858 /* We use greedy algorithm for inlining of small functions:
859 All inline candidates are put into prioritized heap based on estimated
860 growth of the overall number of instructions and then update the estimates.
862 INLINED and INLINED_CALEES are just pointers to arrays large enough
863 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
866 cgraph_decide_inlining_of_small_functions (void)
868 struct cgraph_node
*node
;
869 struct cgraph_edge
*edge
;
870 cgraph_inline_failed_t failed_reason
;
871 fibheap_t heap
= fibheap_new ();
872 bitmap updated_nodes
= BITMAP_ALLOC (NULL
);
873 int min_size
, max_size
;
874 VEC (cgraph_edge_p
, heap
) *new_indirect_edges
= NULL
;
876 if (flag_indirect_inlining
)
877 new_indirect_edges
= VEC_alloc (cgraph_edge_p
, heap
, 8);
880 fprintf (dump_file
, "\nDeciding on smaller functions:\n");
882 /* Put all inline candidates into the heap. */
884 for (node
= cgraph_nodes
; node
; node
= node
->next
)
886 if (!node
->local
.inlinable
|| !node
->callers
887 || node
->local
.disregard_inline_limits
)
890 fprintf (dump_file
, "Considering inline candidate %s.\n", cgraph_node_name (node
));
892 node
->global
.estimated_growth
= INT_MIN
;
893 if (!cgraph_default_inline_p (node
, &failed_reason
))
895 cgraph_set_inline_failed (node
, failed_reason
);
899 for (edge
= node
->callers
; edge
; edge
= edge
->next_caller
)
900 if (edge
->inline_failed
)
902 gcc_assert (!edge
->aux
);
903 edge
->aux
= fibheap_insert (heap
, cgraph_edge_badness (edge
), edge
);
907 max_size
= compute_max_insns (overall_size
);
908 min_size
= overall_size
;
910 while (overall_size
<= max_size
911 && (edge
= (struct cgraph_edge
*) fibheap_extract_min (heap
)))
913 int old_size
= overall_size
;
914 struct cgraph_node
*where
;
916 cgraph_estimate_size_after_inlining (1, edge
->caller
, edge
->callee
);
917 cgraph_inline_failed_t not_good
= CIF_OK
;
919 growth
-= edge
->caller
->global
.size
;
924 "\nConsidering %s with %i size\n",
925 cgraph_node_name (edge
->callee
),
926 edge
->callee
->global
.size
);
928 " to be inlined into %s in %s:%i\n"
929 " Estimated growth after inlined into all callees is %+i insns.\n"
930 " Estimated badness is %i, frequency %.2f.\n",
931 cgraph_node_name (edge
->caller
),
932 gimple_filename ((const_gimple
) edge
->call_stmt
),
933 gimple_lineno ((const_gimple
) edge
->call_stmt
),
934 cgraph_estimate_growth (edge
->callee
),
935 cgraph_edge_badness (edge
),
936 edge
->frequency
/ (double)CGRAPH_FREQ_BASE
);
938 fprintf (dump_file
," Called "HOST_WIDEST_INT_PRINT_DEC
"x\n", edge
->count
);
940 gcc_assert (edge
->aux
);
942 if (!edge
->inline_failed
)
945 /* When not having profile info ready we don't weight by any way the
946 position of call in procedure itself. This means if call of
947 function A from function B seems profitable to inline, the recursive
948 call of function A in inline copy of A in B will look profitable too
949 and we end up inlining until reaching maximal function growth. This
950 is not good idea so prohibit the recursive inlining.
952 ??? When the frequencies are taken into account we might not need this
955 We need to be cureful here, in some testcases, e.g. directivec.c in
956 libcpp, we can estimate self recursive function to have negative growth
957 for inlining completely.
961 where
= edge
->caller
;
962 while (where
->global
.inlined_to
)
964 if (where
->decl
== edge
->callee
->decl
)
966 where
= where
->callers
->caller
;
968 if (where
->global
.inlined_to
)
971 = (edge
->callee
->local
.disregard_inline_limits
972 ? CIF_RECURSIVE_INLINING
: CIF_UNSPECIFIED
);
974 fprintf (dump_file
, " inline_failed:Recursive inlining performed only for function itself.\n");
979 if (!cgraph_maybe_hot_edge_p (edge
))
980 not_good
= CIF_UNLIKELY_CALL
;
981 if (!flag_inline_functions
982 && !DECL_DECLARED_INLINE_P (edge
->callee
->decl
))
983 not_good
= CIF_NOT_DECLARED_INLINED
;
984 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge
->caller
->decl
)))
985 not_good
= CIF_OPTIMIZING_FOR_SIZE
;
986 if (not_good
&& growth
> 0 && cgraph_estimate_growth (edge
->callee
) > 0)
988 if (!cgraph_recursive_inlining_p (edge
->caller
, edge
->callee
,
989 &edge
->inline_failed
))
991 edge
->inline_failed
= not_good
;
993 fprintf (dump_file
, " inline_failed:%s.\n",
994 cgraph_inline_failed_string (edge
->inline_failed
));
998 if (!cgraph_default_inline_p (edge
->callee
, &edge
->inline_failed
))
1000 if (!cgraph_recursive_inlining_p (edge
->caller
, edge
->callee
,
1001 &edge
->inline_failed
))
1004 fprintf (dump_file
, " inline_failed:%s.\n",
1005 cgraph_inline_failed_string (edge
->inline_failed
));
1009 if (!tree_can_inline_p (edge
->caller
->decl
, edge
->callee
->decl
))
1011 gimple_call_set_cannot_inline (edge
->call_stmt
, true);
1012 edge
->inline_failed
= CIF_TARGET_OPTION_MISMATCH
;
1014 fprintf (dump_file
, " inline_failed:%s.\n",
1015 cgraph_inline_failed_string (edge
->inline_failed
));
1018 if (cgraph_recursive_inlining_p (edge
->caller
, edge
->callee
,
1019 &edge
->inline_failed
))
1021 where
= edge
->caller
;
1022 if (where
->global
.inlined_to
)
1023 where
= where
->global
.inlined_to
;
1024 if (!cgraph_decide_recursive_inlining (where
,
1025 flag_indirect_inlining
1026 ? &new_indirect_edges
: NULL
))
1028 if (flag_indirect_inlining
)
1029 add_new_edges_to_heap (heap
, new_indirect_edges
);
1030 update_callee_keys (heap
, where
, updated_nodes
);
1034 struct cgraph_node
*callee
;
1035 if (gimple_call_cannot_inline_p (edge
->call_stmt
)
1036 || !cgraph_check_inline_limits (edge
->caller
, edge
->callee
,
1037 &edge
->inline_failed
, true))
1040 fprintf (dump_file
, " Not inlining into %s:%s.\n",
1041 cgraph_node_name (edge
->caller
),
1042 cgraph_inline_failed_string (edge
->inline_failed
));
1045 callee
= edge
->callee
;
1046 cgraph_mark_inline_edge (edge
, true, &new_indirect_edges
);
1047 if (flag_indirect_inlining
)
1048 add_new_edges_to_heap (heap
, new_indirect_edges
);
1050 update_callee_keys (heap
, callee
, updated_nodes
);
1052 where
= edge
->caller
;
1053 if (where
->global
.inlined_to
)
1054 where
= where
->global
.inlined_to
;
1056 /* Our profitability metric can depend on local properties
1057 such as number of inlinable calls and size of the function body.
1058 After inlining these properties might change for the function we
1059 inlined into (since it's body size changed) and for the functions
1060 called by function we inlined (since number of it inlinable callers
1062 update_caller_keys (heap
, where
, updated_nodes
);
1063 bitmap_clear (updated_nodes
);
1068 " Inlined into %s which now has size %i and self time %i,"
1069 "net change of %+i.\n",
1070 cgraph_node_name (edge
->caller
),
1071 edge
->caller
->global
.time
,
1072 edge
->caller
->global
.size
,
1073 overall_size
- old_size
);
1075 if (min_size
> overall_size
)
1077 min_size
= overall_size
;
1078 max_size
= compute_max_insns (min_size
);
1081 fprintf (dump_file
, "New minimal size reached: %i\n", min_size
);
1084 while ((edge
= (struct cgraph_edge
*) fibheap_extract_min (heap
)) != NULL
)
1086 gcc_assert (edge
->aux
);
1088 if (!edge
->callee
->local
.disregard_inline_limits
&& edge
->inline_failed
1089 && !cgraph_recursive_inlining_p (edge
->caller
, edge
->callee
,
1090 &edge
->inline_failed
))
1091 edge
->inline_failed
= CIF_INLINE_UNIT_GROWTH_LIMIT
;
1094 if (new_indirect_edges
)
1095 VEC_free (cgraph_edge_p
, heap
, new_indirect_edges
);
1096 fibheap_delete (heap
);
1097 BITMAP_FREE (updated_nodes
);
1100 /* Decide on the inlining. We do so in the topological order to avoid
1101 expenses on updating data structures. */
1104 cgraph_decide_inlining (void)
1106 struct cgraph_node
*node
;
1108 struct cgraph_node
**order
=
1109 XCNEWVEC (struct cgraph_node
*, cgraph_n_nodes
);
1112 bool redo_always_inline
= true;
1113 int initial_size
= 0;
1115 cgraph_remove_function_insertion_hook (function_insertion_hook_holder
);
1119 for (node
= cgraph_nodes
; node
; node
= node
->next
)
1122 struct cgraph_edge
*e
;
1124 gcc_assert (inline_summary (node
)->self_size
== node
->global
.size
);
1125 gcc_assert (node
->needed
|| node
->reachable
);
1126 initial_size
+= node
->global
.size
;
1127 for (e
= node
->callees
; e
; e
= e
->next_callee
)
1128 if (max_count
< e
->count
)
1129 max_count
= e
->count
;
1130 if (max_benefit
< inline_summary (node
)->time_inlining_benefit
)
1131 max_benefit
= inline_summary (node
)->time_inlining_benefit
;
1133 gcc_assert (!max_count
|| (profile_info
&& flag_branch_probabilities
));
1134 overall_size
= initial_size
;
1136 nnodes
= cgraph_postorder (order
);
1140 "\nDeciding on inlining. Starting with size %i.\n",
1143 for (node
= cgraph_nodes
; node
; node
= node
->next
)
1147 fprintf (dump_file
, "\nInlining always_inline functions:\n");
1149 /* In the first pass mark all always_inline edges. Do this with a priority
1150 so none of our later choices will make this impossible. */
1151 while (redo_always_inline
)
1153 redo_always_inline
= false;
1154 for (i
= nnodes
- 1; i
>= 0; i
--)
1156 struct cgraph_edge
*e
, *next
;
1160 /* Handle nodes to be flattened, but don't update overall unit
1162 if (lookup_attribute ("flatten",
1163 DECL_ATTRIBUTES (node
->decl
)) != NULL
)
1167 "Flattening %s\n", cgraph_node_name (node
));
1168 cgraph_decide_inlining_incrementally (node
, INLINE_ALL
, 0);
1171 if (!node
->local
.disregard_inline_limits
)
1175 "\nConsidering %s size:%i (always inline)\n",
1176 cgraph_node_name (node
), node
->global
.size
);
1177 old_size
= overall_size
;
1178 for (e
= node
->callers
; e
; e
= next
)
1180 next
= e
->next_caller
;
1181 if (!e
->inline_failed
1182 || gimple_call_cannot_inline_p (e
->call_stmt
))
1184 if (cgraph_recursive_inlining_p (e
->caller
, e
->callee
,
1187 if (!tree_can_inline_p (e
->caller
->decl
, e
->callee
->decl
))
1189 gimple_call_set_cannot_inline (e
->call_stmt
, true);
1192 if (cgraph_mark_inline_edge (e
, true, NULL
))
1193 redo_always_inline
= true;
1196 " Inlined into %s which now has size %i.\n",
1197 cgraph_node_name (e
->caller
),
1198 e
->caller
->global
.size
);
1200 /* Inlining self recursive function might introduce new calls to
1201 themselves we didn't see in the loop above. Fill in the proper
1202 reason why inline failed. */
1203 for (e
= node
->callers
; e
; e
= e
->next_caller
)
1204 if (e
->inline_failed
)
1205 e
->inline_failed
= CIF_RECURSIVE_INLINING
;
1208 " Inlined for a net change of %+i size.\n",
1209 overall_size
- old_size
);
1213 cgraph_decide_inlining_of_small_functions ();
1215 if (flag_inline_functions_called_once
)
1218 fprintf (dump_file
, "\nDeciding on functions called once:\n");
1220 /* And finally decide what functions are called once. */
1221 for (i
= nnodes
- 1; i
>= 0; i
--)
1226 && !node
->callers
->next_caller
1228 && node
->local
.inlinable
1229 && node
->callers
->inline_failed
1230 && node
->callers
->caller
!= node
1231 && node
->callers
->caller
->global
.inlined_to
!= node
1232 && !gimple_call_cannot_inline_p (node
->callers
->call_stmt
)
1233 && !DECL_EXTERNAL (node
->decl
)
1234 && !DECL_COMDAT (node
->decl
))
1236 old_size
= overall_size
;
1240 "\nConsidering %s size %i.\n",
1241 cgraph_node_name (node
), node
->global
.size
);
1243 " Called once from %s %i insns.\n",
1244 cgraph_node_name (node
->callers
->caller
),
1245 node
->callers
->caller
->global
.size
);
1248 if (cgraph_check_inline_limits (node
->callers
->caller
, node
,
1251 cgraph_mark_inline (node
->callers
);
1254 " Inlined into %s which now has %i size"
1255 " for a net change of %+i size.\n",
1256 cgraph_node_name (node
->callers
->caller
),
1257 node
->callers
->caller
->global
.size
,
1258 overall_size
- old_size
);
1264 " Inline limit reached, not inlined.\n");
1270 /* Free ipa-prop structures if they are no longer needed. */
1271 if (flag_indirect_inlining
)
1272 free_all_ipa_structures_after_iinln ();
1276 "\nInlined %i calls, eliminated %i functions, "
1277 "size %i turned to %i size.\n\n",
1278 ncalls_inlined
, nfunctions_inlined
, initial_size
,
1284 /* Try to inline edge E from incremental inliner. MODE specifies mode
1287 We are detecting cycles by storing mode of inliner into cgraph_node last
1288 time we visited it in the recursion. In general when mode is set, we have
1289 recursive inlining, but as an special case, we want to try harder inline
1290 ALWAYS_INLINE functions: consider callgraph a->b->c->b, with a being
1291 flatten, b being always inline. Flattening 'a' will collapse
1292 a->b->c before hitting cycle. To accommodate always inline, we however
1293 need to inline a->b->c->b.
1295 So after hitting cycle first time, we switch into ALWAYS_INLINE mode and
1296 stop inlining only after hitting ALWAYS_INLINE in ALWAY_INLINE mode. */
1298 try_inline (struct cgraph_edge
*e
, enum inlining_mode mode
, int depth
)
1300 struct cgraph_node
*callee
= e
->callee
;
1301 enum inlining_mode callee_mode
= (enum inlining_mode
) (size_t) callee
->aux
;
1302 bool always_inline
= e
->callee
->local
.disregard_inline_limits
;
1303 bool inlined
= false;
1305 /* We've hit cycle? */
1308 /* It is first time we see it and we are not in ALWAY_INLINE only
1309 mode yet. and the function in question is always_inline. */
1310 if (always_inline
&& mode
!= INLINE_ALWAYS_INLINE
)
1314 indent_to (dump_file
, depth
);
1316 "Hit cycle in %s, switching to always inline only.\n",
1317 cgraph_node_name (callee
));
1319 mode
= INLINE_ALWAYS_INLINE
;
1321 /* Otherwise it is time to give up. */
1326 indent_to (dump_file
, depth
);
1328 "Not inlining %s into %s to avoid cycle.\n",
1329 cgraph_node_name (callee
),
1330 cgraph_node_name (e
->caller
));
1332 e
->inline_failed
= (e
->callee
->local
.disregard_inline_limits
1333 ? CIF_RECURSIVE_INLINING
: CIF_UNSPECIFIED
);
1338 callee
->aux
= (void *)(size_t) mode
;
1341 indent_to (dump_file
, depth
);
1342 fprintf (dump_file
, " Inlining %s into %s.\n",
1343 cgraph_node_name (e
->callee
),
1344 cgraph_node_name (e
->caller
));
1346 if (e
->inline_failed
)
1348 cgraph_mark_inline (e
);
1350 /* In order to fully inline always_inline functions, we need to
1351 recurse here, since the inlined functions might not be processed by
1352 incremental inlining at all yet.
1354 Also flattening needs to be done recursively. */
1356 if (mode
== INLINE_ALL
|| always_inline
)
1357 cgraph_decide_inlining_incrementally (e
->callee
, mode
, depth
+ 1);
1360 callee
->aux
= (void *)(size_t) callee_mode
;
1364 /* Return true when N is leaf function. Accept cheap (pure&const) builtins
1365 in leaf functions. */
1367 leaf_node_p (struct cgraph_node
*n
)
1369 struct cgraph_edge
*e
;
1370 for (e
= n
->callees
; e
; e
= e
->next_callee
)
1371 if (!DECL_BUILT_IN (e
->callee
->decl
)
1372 || (!TREE_READONLY (e
->callee
->decl
)
1373 || DECL_PURE_P (e
->callee
->decl
)))
1378 /* Decide on the inlining. We do so in the topological order to avoid
1379 expenses on updating data structures.
1380 DEPTH is depth of recursion, used only for debug output. */
1383 cgraph_decide_inlining_incrementally (struct cgraph_node
*node
,
1384 enum inlining_mode mode
,
1387 struct cgraph_edge
*e
;
1388 bool inlined
= false;
1389 cgraph_inline_failed_t failed_reason
;
1390 enum inlining_mode old_mode
;
1392 #ifdef ENABLE_CHECKING
1393 verify_cgraph_node (node
);
1396 old_mode
= (enum inlining_mode
) (size_t)node
->aux
;
1398 if (mode
!= INLINE_ALWAYS_INLINE
&& mode
!= INLINE_SIZE_NORECURSIVE
1399 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node
->decl
)) != NULL
)
1403 indent_to (dump_file
, depth
);
1404 fprintf (dump_file
, "Flattening %s\n", cgraph_node_name (node
));
1409 node
->aux
= (void *)(size_t) mode
;
1411 /* First of all look for always inline functions. */
1412 if (mode
!= INLINE_SIZE_NORECURSIVE
)
1413 for (e
= node
->callees
; e
; e
= e
->next_callee
)
1415 if (!e
->callee
->local
.disregard_inline_limits
1416 && (mode
!= INLINE_ALL
|| !e
->callee
->local
.inlinable
))
1418 if (gimple_call_cannot_inline_p (e
->call_stmt
))
1420 /* When the edge is already inlined, we just need to recurse into
1421 it in order to fully flatten the leaves. */
1422 if (!e
->inline_failed
&& mode
== INLINE_ALL
)
1424 inlined
|= try_inline (e
, mode
, depth
);
1429 indent_to (dump_file
, depth
);
1431 "Considering to always inline inline candidate %s.\n",
1432 cgraph_node_name (e
->callee
));
1434 if (cgraph_recursive_inlining_p (node
, e
->callee
, &e
->inline_failed
))
1438 indent_to (dump_file
, depth
);
1439 fprintf (dump_file
, "Not inlining: recursive call.\n");
1443 if (!tree_can_inline_p (node
->decl
, e
->callee
->decl
))
1445 gimple_call_set_cannot_inline (e
->call_stmt
, true);
1448 indent_to (dump_file
, depth
);
1450 "Not inlining: Target specific option mismatch.\n");
1454 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node
->decl
))
1455 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e
->callee
->decl
)))
1459 indent_to (dump_file
, depth
);
1460 fprintf (dump_file
, "Not inlining: SSA form does not match.\n");
1464 if (!e
->callee
->analyzed
)
1468 indent_to (dump_file
, depth
);
1470 "Not inlining: Function body no longer available.\n");
1474 inlined
|= try_inline (e
, mode
, depth
);
1477 /* Now do the automatic inlining. */
1478 if (mode
!= INLINE_ALL
&& mode
!= INLINE_ALWAYS_INLINE
)
1479 for (e
= node
->callees
; e
; e
= e
->next_callee
)
1481 int allowed_growth
= 0;
1482 if (!e
->callee
->local
.inlinable
1483 || !e
->inline_failed
1484 || e
->callee
->local
.disregard_inline_limits
)
1487 fprintf (dump_file
, "Considering inline candidate %s.\n",
1488 cgraph_node_name (e
->callee
));
1489 if (cgraph_recursive_inlining_p (node
, e
->callee
, &e
->inline_failed
))
1493 indent_to (dump_file
, depth
);
1494 fprintf (dump_file
, "Not inlining: recursive call.\n");
1498 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node
->decl
))
1499 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e
->callee
->decl
)))
1503 indent_to (dump_file
, depth
);
1504 fprintf (dump_file
, "Not inlining: SSA form does not match.\n");
1509 if (cgraph_maybe_hot_edge_p (e
) && leaf_node_p (e
->callee
)
1510 && optimize_function_for_speed_p (cfun
))
1511 allowed_growth
= PARAM_VALUE (PARAM_EARLY_INLINING_INSNS
);
1513 /* When the function body would grow and inlining the function won't
1514 eliminate the need for offline copy of the function, don't inline.
1516 if (((mode
== INLINE_SIZE
|| mode
== INLINE_SIZE_NORECURSIVE
)
1517 || (!flag_inline_functions
1518 && !DECL_DECLARED_INLINE_P (e
->callee
->decl
)))
1519 && (cgraph_estimate_size_after_inlining (1, e
->caller
, e
->callee
)
1520 > e
->caller
->global
.size
+ allowed_growth
)
1521 && cgraph_estimate_growth (e
->callee
) > allowed_growth
)
1525 indent_to (dump_file
, depth
);
1527 "Not inlining: code size would grow by %i.\n",
1528 cgraph_estimate_size_after_inlining (1, e
->caller
,
1530 - e
->caller
->global
.size
);
1534 if (!cgraph_check_inline_limits (node
, e
->callee
, &e
->inline_failed
,
1536 || gimple_call_cannot_inline_p (e
->call_stmt
))
1540 indent_to (dump_file
, depth
);
1541 fprintf (dump_file
, "Not inlining: %s.\n",
1542 cgraph_inline_failed_string (e
->inline_failed
));
1546 if (!e
->callee
->analyzed
)
1550 indent_to (dump_file
, depth
);
1552 "Not inlining: Function body no longer available.\n");
1556 if (!tree_can_inline_p (node
->decl
, e
->callee
->decl
))
1558 gimple_call_set_cannot_inline (e
->call_stmt
, true);
1561 indent_to (dump_file
, depth
);
1563 "Not inlining: Target specific option mismatch.\n");
1567 if (cgraph_default_inline_p (e
->callee
, &failed_reason
))
1568 inlined
|= try_inline (e
, mode
, depth
);
1570 node
->aux
= (void *)(size_t) old_mode
;
1574 /* Because inlining might remove no-longer reachable nodes, we need to
1575 keep the array visible to garbage collector to avoid reading collected
1578 static GTY ((length ("nnodes"))) struct cgraph_node
**order
;
1580 /* Do inlining of small functions. Doing so early helps profiling and other
1581 passes to be somewhat more effective and avoids some code duplication in
1582 later real inlining pass for testcases with very many function calls. */
1584 cgraph_early_inlining (void)
1586 struct cgraph_node
*node
= cgraph_node (current_function_decl
);
1587 unsigned int todo
= 0;
1590 if (sorrycount
|| errorcount
)
1592 while (cgraph_decide_inlining_incrementally (node
,
1594 ? INLINE_SIZE_NORECURSIVE
: INLINE_SIZE
, 0)
1595 && iterations
< PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS
))
1597 timevar_push (TV_INTEGRATION
);
1598 todo
|= optimize_inline_calls (current_function_decl
);
1600 timevar_pop (TV_INTEGRATION
);
1603 fprintf (dump_file
, "Iterations: %i\n", iterations
);
1604 cfun
->always_inline_functions_inlined
= true;
1608 /* When inlining shall be performed. */
1610 cgraph_gate_early_inlining (void)
1612 return flag_early_inlining
;
1615 struct gimple_opt_pass pass_early_inline
=
1619 "einline", /* name */
1620 cgraph_gate_early_inlining
, /* gate */
1621 cgraph_early_inlining
, /* execute */
1624 0, /* static_pass_number */
1625 TV_INLINE_HEURISTICS
, /* tv_id */
1626 0, /* properties_required */
1627 0, /* properties_provided */
1628 0, /* properties_destroyed */
1629 0, /* todo_flags_start */
1630 TODO_dump_func
/* todo_flags_finish */
1634 /* When inlining shall be performed. */
1636 cgraph_gate_ipa_early_inlining (void)
1638 return (flag_early_inlining
1639 && (flag_branch_probabilities
|| flag_test_coverage
1640 || profile_arc_flag
));
1643 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1644 before tree profiling so we have stand alone IPA pass for doing so. */
1645 struct simple_ipa_opt_pass pass_ipa_early_inline
=
1649 "einline_ipa", /* name */
1650 cgraph_gate_ipa_early_inlining
, /* gate */
1654 0, /* static_pass_number */
1655 TV_INLINE_HEURISTICS
, /* tv_id */
1656 0, /* properties_required */
1657 0, /* properties_provided */
1658 0, /* properties_destroyed */
1659 0, /* todo_flags_start */
1660 TODO_dump_cgraph
/* todo_flags_finish */
1664 /* See if statement might disappear after inlining. We are not terribly
1665 sophisficated, basically looking for simple abstraction penalty wrappers. */
1668 likely_eliminated_by_inlining_p (gimple stmt
)
1670 enum gimple_code code
= gimple_code (stmt
);
1676 if (gimple_num_ops (stmt
) != 2)
1679 /* Casts of parameters, loads from parameters passed by reference
1680 and stores to return value or parameters are probably free after
1682 if (gimple_assign_rhs_code (stmt
) == CONVERT_EXPR
1683 || gimple_assign_rhs_code (stmt
) == NOP_EXPR
1684 || gimple_assign_rhs_code (stmt
) == VIEW_CONVERT_EXPR
1685 || gimple_assign_rhs_class (stmt
) == GIMPLE_SINGLE_RHS
)
1687 tree rhs
= gimple_assign_rhs1 (stmt
);
1688 tree lhs
= gimple_assign_lhs (stmt
);
1689 tree inner_rhs
= rhs
;
1690 tree inner_lhs
= lhs
;
1691 bool rhs_free
= false;
1692 bool lhs_free
= false;
1694 while (handled_component_p (inner_lhs
) || TREE_CODE (inner_lhs
) == INDIRECT_REF
)
1695 inner_lhs
= TREE_OPERAND (inner_lhs
, 0);
1696 while (handled_component_p (inner_rhs
)
1697 || TREE_CODE (inner_rhs
) == ADDR_EXPR
|| TREE_CODE (inner_rhs
) == INDIRECT_REF
)
1698 inner_rhs
= TREE_OPERAND (inner_rhs
, 0);
1701 if (TREE_CODE (inner_rhs
) == PARM_DECL
1702 || (TREE_CODE (inner_rhs
) == SSA_NAME
1703 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs
)
1704 && TREE_CODE (SSA_NAME_VAR (inner_rhs
)) == PARM_DECL
))
1706 if (rhs_free
&& is_gimple_reg (lhs
))
1708 if (((TREE_CODE (inner_lhs
) == PARM_DECL
1709 || (TREE_CODE (inner_lhs
) == SSA_NAME
1710 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs
)
1711 && TREE_CODE (SSA_NAME_VAR (inner_lhs
)) == PARM_DECL
))
1712 && inner_lhs
!= lhs
)
1713 || TREE_CODE (inner_lhs
) == RESULT_DECL
1714 || (TREE_CODE (inner_lhs
) == SSA_NAME
1715 && TREE_CODE (SSA_NAME_VAR (inner_lhs
)) == RESULT_DECL
))
1717 if (lhs_free
&& (is_gimple_reg (rhs
) || is_gimple_min_invariant (rhs
)))
1719 if (lhs_free
&& rhs_free
)
1728 /* Compute function body size parameters for NODE. */
1731 estimate_function_body_sizes (struct cgraph_node
*node
)
1734 gcov_type time_inlining_benefit
= 0;
1736 int size_inlining_benefit
= 0;
1738 gimple_stmt_iterator bsi
;
1739 struct function
*my_function
= DECL_STRUCT_FUNCTION (node
->decl
);
1742 tree funtype
= TREE_TYPE (node
->decl
);
1743 bitmap must_not_throw
= must_not_throw_labels ();
1747 fprintf (dump_file
, "Analyzing function body size: %s\n", cgraph_node_name (node
));
1750 gcc_assert (my_function
&& my_function
->cfg
);
1751 FOR_EACH_BB_FN (bb
, my_function
)
1753 freq
= compute_call_stmt_bb_frequency (node
->decl
, bb
);
1754 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1756 int this_size
= estimate_num_insns (gsi_stmt (bsi
), &eni_size_weights
);
1757 int this_time
= estimate_num_insns (gsi_stmt (bsi
), &eni_time_weights
);
1759 /* MUST_NOT_THROW is usually handled by runtime calling terminate and stopping
1760 stacking unwinding. However when there is local cleanup that can resume
1761 to MUST_NOT_THROW then we generate explicit handler containing
1762 std::terminate () call.
1764 Because inlining of function can introduce new cleanup region, prior
1765 inlining we keep std::terinate () calls for every MUST_NOT_THROW containing
1766 function call. Wast majority of these will be eliminated after inlining
1767 and crossjumping will inify possible duplicated calls. So ignore
1768 the handlers for function body estimates. */
1769 if (gimple_code (gsi_stmt (bsi
)) == GIMPLE_LABEL
1770 && bitmap_bit_p (must_not_throw
,
1771 LABEL_DECL_UID (gimple_label_label (gsi_stmt (bsi
)))))
1774 fprintf (dump_file
, " MUST_NOT_THROW landing pad. Ignoring whole BB.\n");
1778 fprintf (dump_file
, " freq:%6i size:%3i time:%3i ", freq
, this_size
, this_time
);
1779 print_gimple_stmt (dump_file
, gsi_stmt (bsi
), 0, 0);
1784 if (likely_eliminated_by_inlining_p (gsi_stmt (bsi
)))
1786 size_inlining_benefit
+= this_size
;
1787 time_inlining_benefit
+= this_time
;
1789 fprintf (dump_file
, " Likely eliminated\n");
1791 gcc_assert (time
>= 0);
1792 gcc_assert (size
>= 0);
1795 time
= (time
+ CGRAPH_FREQ_BASE
/ 2) / CGRAPH_FREQ_BASE
;
1796 time_inlining_benefit
= ((time_inlining_benefit
+ CGRAPH_FREQ_BASE
/ 2)
1797 / CGRAPH_FREQ_BASE
);
1800 fprintf (dump_file
, "Overall function body time: %i-%i size: %i-%i\n",
1801 (int)time
, (int)time_inlining_benefit
,
1802 size
, size_inlining_benefit
);
1804 time_inlining_benefit
+= eni_time_weights
.call_cost
;
1805 size_inlining_benefit
+= eni_size_weights
.call_cost
;
1806 if (!VOID_TYPE_P (TREE_TYPE (funtype
)))
1808 int cost
= estimate_move_cost (TREE_TYPE (funtype
));
1809 time_inlining_benefit
+= cost
;
1810 size_inlining_benefit
+= cost
;
1812 for (arg
= DECL_ARGUMENTS (node
->decl
); arg
; arg
= TREE_CHAIN (arg
))
1813 if (!VOID_TYPE_P (TREE_TYPE (arg
)))
1815 int cost
= estimate_move_cost (TREE_TYPE (arg
));
1816 time_inlining_benefit
+= cost
;
1817 size_inlining_benefit
+= cost
;
1819 if (time_inlining_benefit
> MAX_TIME
)
1820 time_inlining_benefit
= MAX_TIME
;
1821 if (time
> MAX_TIME
)
1823 inline_summary (node
)->self_time
= time
;
1824 inline_summary (node
)->self_size
= size
;
1827 fprintf (dump_file
, "With function call overhead time: %i-%i size: %i-%i\n",
1828 (int)time
, (int)time_inlining_benefit
,
1829 size
, size_inlining_benefit
);
1831 inline_summary (node
)->time_inlining_benefit
= time_inlining_benefit
;
1832 inline_summary (node
)->size_inlining_benefit
= size_inlining_benefit
;
1833 BITMAP_FREE (must_not_throw
);
1836 /* Compute parameters of functions used by inliner. */
1838 compute_inline_parameters (struct cgraph_node
*node
)
1840 HOST_WIDE_INT self_stack_size
;
1842 gcc_assert (!node
->global
.inlined_to
);
1844 /* Estimate the stack size for the function. But not at -O0
1845 because estimated_stack_frame_size is a quadratic problem. */
1846 self_stack_size
= optimize
? estimated_stack_frame_size () : 0;
1847 inline_summary (node
)->estimated_self_stack_size
= self_stack_size
;
1848 node
->global
.estimated_stack_size
= self_stack_size
;
1849 node
->global
.stack_frame_offset
= 0;
1851 /* Can this function be inlined at all? */
1852 node
->local
.inlinable
= tree_inlinable_function_p (current_function_decl
);
1853 if (node
->local
.inlinable
&& !node
->local
.disregard_inline_limits
)
1854 node
->local
.disregard_inline_limits
1855 = DECL_DISREGARD_INLINE_LIMITS (current_function_decl
);
1856 estimate_function_body_sizes (node
);
1857 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1858 node
->global
.time
= inline_summary (node
)->self_time
;
1859 node
->global
.size
= inline_summary (node
)->self_size
;
1864 /* Compute parameters of functions used by inliner using
1865 current_function_decl. */
1867 compute_inline_parameters_for_current (void)
1869 compute_inline_parameters (cgraph_node (current_function_decl
));
1873 struct gimple_opt_pass pass_inline_parameters
=
1877 "inline_param", /* name */
1879 compute_inline_parameters_for_current
,/* execute */
1882 0, /* static_pass_number */
1883 TV_INLINE_HEURISTICS
, /* tv_id */
1884 0, /* properties_required */
1885 0, /* properties_provided */
1886 0, /* properties_destroyed */
1887 0, /* todo_flags_start */
1888 0 /* todo_flags_finish */
1892 /* This function performs intraprocedural analyzis in NODE that is required to
1893 inline indirect calls. */
1895 inline_indirect_intraprocedural_analysis (struct cgraph_node
*node
)
1897 struct cgraph_edge
*cs
;
1901 ipa_initialize_node_params (node
);
1902 ipa_detect_param_modifications (node
);
1904 ipa_analyze_params_uses (node
);
1907 for (cs
= node
->callees
; cs
; cs
= cs
->next_callee
)
1909 ipa_count_arguments (cs
);
1910 ipa_compute_jump_functions (cs
);
1915 ipa_print_node_params (dump_file
, node
);
1916 ipa_print_node_jump_functions (dump_file
, node
);
1920 /* Note function body size. */
1922 analyze_function (struct cgraph_node
*node
)
1924 push_cfun (DECL_STRUCT_FUNCTION (node
->decl
));
1925 current_function_decl
= node
->decl
;
1927 compute_inline_parameters (node
);
1928 if (flag_indirect_inlining
)
1929 inline_indirect_intraprocedural_analysis (node
);
1931 current_function_decl
= NULL
;
1935 /* Called when new function is inserted to callgraph late. */
1937 add_new_function (struct cgraph_node
*node
, void *data ATTRIBUTE_UNUSED
)
1939 analyze_function (node
);
1942 /* Note function body size. */
1944 inline_generate_summary (void)
1946 struct cgraph_node
*node
;
1948 function_insertion_hook_holder
=
1949 cgraph_add_function_insertion_hook (&add_new_function
, NULL
);
1951 if (flag_indirect_inlining
)
1953 ipa_register_cgraph_hooks ();
1954 ipa_check_create_node_params ();
1955 ipa_check_create_edge_args ();
1958 for (node
= cgraph_nodes
; node
; node
= node
->next
)
1960 analyze_function (node
);
1965 /* Apply inline plan to function. */
1967 inline_transform (struct cgraph_node
*node
)
1969 unsigned int todo
= 0;
1970 struct cgraph_edge
*e
;
1972 /* We might need the body of this function so that we can expand
1973 it inline somewhere else. */
1974 if (cgraph_preserve_function_body_p (node
->decl
))
1975 save_inline_function_body (node
);
1977 for (e
= node
->callees
; e
; e
= e
->next_callee
)
1978 if (!e
->inline_failed
|| warn_inline
)
1983 timevar_push (TV_INTEGRATION
);
1984 todo
= optimize_inline_calls (current_function_decl
);
1985 timevar_pop (TV_INTEGRATION
);
1987 cfun
->always_inline_functions_inlined
= true;
1988 cfun
->after_inlining
= true;
1989 return todo
| execute_fixup_cfg ();
1992 struct ipa_opt_pass_d pass_ipa_inline
=
1996 "inline", /* name */
1998 cgraph_decide_inlining
, /* execute */
2001 0, /* static_pass_number */
2002 TV_INLINE_HEURISTICS
, /* tv_id */
2003 0, /* properties_required */
2004 0, /* properties_provided */
2005 0, /* properties_destroyed */
2006 TODO_remove_functions
, /* todo_flags_finish */
2007 TODO_dump_cgraph
| TODO_dump_func
2008 | TODO_remove_functions
/* todo_flags_finish */
2010 inline_generate_summary
, /* generate_summary */
2011 NULL
, /* write_summary */
2012 NULL
, /* read_summary */
2013 NULL
, /* function_read_summary */
2015 inline_transform
, /* function_transform */
2016 NULL
, /* variable_transform */
2020 #include "gt-ipa-inline.h"