gcc/fortran/:
[official-gcc.git] / gcc / ipa-inline.c
blob46bce87b16f81901c3cfa5ae78057b006ca6d129
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
29 There are three major parts of this file:
31 cgraph_mark_inline implementation
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
37 inlining heuristics limits
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
43 inlining heuristics
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used by early inliner.
66 The inliner itself is split into several passes:
68 pass_inline_parameters
70 This pass computes local properties of functions that are used by inliner:
71 estimated function body size, whether function is inlinable at all and
72 stack frame consumption.
74 Before executing any of inliner passes, this local pass has to be applied
75 to each function in the callgraph (ie run as subpass of some earlier
76 IPA pass). The results are made out of date by any optimization applied
77 on the function body.
79 pass_early_inlining
81 Simple local inlining pass inlining callees into current function. This
82 pass makes no global whole compilation unit analysis and this when allowed
83 to do inlining expanding code size it might result in unbounded growth of
84 whole unit.
86 The pass is run during conversion into SSA form. Only functions already
87 converted into SSA form are inlined, so the conversion must happen in
88 topological order on the callgraph (that is maintained by pass manager).
89 The functions after inlining are early optimized so the early inliner sees
90 unoptimized function itself, but all considered callees are already
91 optimized allowing it to unfold abstraction penalty on C++ effectively and
92 cheaply.
94 pass_ipa_early_inlining
96 With profiling, the early inlining is also necessary to reduce
97 instrumentation costs on program with high abstraction penalty (doing
98 many redundant calls). This can't happen in parallel with early
99 optimization and profile instrumentation, because we would end up
100 re-instrumenting already instrumented function bodies we brought in via
101 inlining.
103 To avoid this, this pass is executed as IPA pass before profiling. It is
104 simple wrapper to pass_early_inlining and ensures first inlining.
106 pass_ipa_inline
108 This is the main pass implementing simple greedy algorithm to do inlining
109 of small functions that results in overall growth of compilation unit and
110 inlining of functions called once. The pass compute just so called inline
111 plan (representation of inlining to be done in callgraph) and unlike early
112 inlining it is not performing the inlining itself.
114 pass_apply_inline
116 This pass performs actual inlining according to pass_ipa_inline on given
117 function. Possible the function body before inlining is saved when it is
118 needed for further inlining later.
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "tm.h"
125 #include "tree.h"
126 #include "tree-inline.h"
127 #include "langhooks.h"
128 #include "flags.h"
129 #include "cgraph.h"
130 #include "diagnostic.h"
131 #include "gimple-pretty-print.h"
132 #include "timevar.h"
133 #include "params.h"
134 #include "fibheap.h"
135 #include "intl.h"
136 #include "tree-pass.h"
137 #include "hashtab.h"
138 #include "coverage.h"
139 #include "ggc.h"
140 #include "tree-flow.h"
141 #include "rtl.h"
142 #include "ipa-prop.h"
143 #include "except.h"
145 #define MAX_TIME 1000000000
147 /* Mode incremental inliner operate on:
149 In ALWAYS_INLINE only functions marked
150 always_inline are inlined. This mode is used after detecting cycle during
151 flattening.
153 In SIZE mode, only functions that reduce function body size after inlining
154 are inlined, this is used during early inlining.
156 in ALL mode, everything is inlined. This is used during flattening. */
157 enum inlining_mode {
158 INLINE_NONE = 0,
159 INLINE_ALWAYS_INLINE,
160 INLINE_SIZE_NORECURSIVE,
161 INLINE_SIZE,
162 INLINE_ALL
165 static bool
166 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode);
167 static void cgraph_flatten (struct cgraph_node *node);
170 /* Statistics we collect about inlining algorithm. */
171 static int ncalls_inlined;
172 static int nfunctions_inlined;
173 static int overall_size;
174 static gcov_type max_count, max_benefit;
176 /* Holders of ipa cgraph hooks: */
177 static struct cgraph_node_hook_list *function_insertion_hook_holder;
179 static inline struct inline_summary *
180 inline_summary (struct cgraph_node *node)
182 return &node->local.inline_summary;
185 /* Estimate self time of the function after inlining WHAT into TO. */
187 static int
188 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
189 struct cgraph_node *what)
191 gcov_type time = (((gcov_type)what->global.time
192 - inline_summary (what)->time_inlining_benefit)
193 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
194 + to->global.time;
195 if (time < 0)
196 time = 0;
197 if (time > MAX_TIME)
198 time = MAX_TIME;
199 return time;
202 /* Estimate self time of the function after inlining WHAT into TO. */
204 static inline int
205 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
206 struct cgraph_node *what)
208 int size = ((what->global.size - inline_summary (what)->size_inlining_benefit)
209 * times + to->global.size);
210 gcc_assert (size >= 0);
211 return size;
214 /* Scale frequency of NODE edges by FREQ_SCALE and increase loop nest
215 by NEST. */
217 static void
218 update_noncloned_frequencies (struct cgraph_node *node,
219 int freq_scale, int nest)
221 struct cgraph_edge *e;
223 /* We do not want to ignore high loop nest after freq drops to 0. */
224 if (!freq_scale)
225 freq_scale = 1;
226 for (e = node->callees; e; e = e->next_callee)
228 e->loop_nest += nest;
229 e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
230 if (e->frequency > CGRAPH_FREQ_MAX)
231 e->frequency = CGRAPH_FREQ_MAX;
232 if (!e->inline_failed)
233 update_noncloned_frequencies (e->callee, freq_scale, nest);
237 /* E is expected to be an edge being inlined. Clone destination node of
238 the edge and redirect it to the new clone.
239 DUPLICATE is used for bookkeeping on whether we are actually creating new
240 clones or re-using node originally representing out-of-line function call.
242 void
243 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
244 bool update_original)
246 HOST_WIDE_INT peak;
248 if (duplicate)
250 /* We may eliminate the need for out-of-line copy to be output.
251 In that case just go ahead and re-use it. */
252 if (!e->callee->callers->next_caller
253 && cgraph_can_remove_if_no_direct_calls_p (e->callee)
254 /* Don't reuse if more than one function shares a comdat group.
255 If the other function(s) are needed, we need to emit even
256 this function out of line. */
257 && !e->callee->same_comdat_group
258 && !cgraph_new_nodes)
260 gcc_assert (!e->callee->global.inlined_to);
261 if (e->callee->analyzed)
263 overall_size -= e->callee->global.size;
264 nfunctions_inlined++;
266 duplicate = false;
267 e->callee->local.externally_visible = false;
268 update_noncloned_frequencies (e->callee, e->frequency, e->loop_nest);
270 else
272 struct cgraph_node *n;
273 n = cgraph_clone_node (e->callee, e->callee->decl,
274 e->count, e->frequency, e->loop_nest,
275 update_original, NULL);
276 cgraph_redirect_edge_callee (e, n);
280 if (e->caller->global.inlined_to)
281 e->callee->global.inlined_to = e->caller->global.inlined_to;
282 else
283 e->callee->global.inlined_to = e->caller;
284 e->callee->global.stack_frame_offset
285 = e->caller->global.stack_frame_offset
286 + inline_summary (e->caller)->estimated_self_stack_size;
287 peak = e->callee->global.stack_frame_offset
288 + inline_summary (e->callee)->estimated_self_stack_size;
289 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
290 e->callee->global.inlined_to->global.estimated_stack_size = peak;
291 cgraph_propagate_frequency (e->callee);
293 /* Recursively clone all bodies. */
294 for (e = e->callee->callees; e; e = e->next_callee)
295 if (!e->inline_failed)
296 cgraph_clone_inlined_nodes (e, duplicate, update_original);
299 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
300 specify whether profile of original function should be updated. If any new
301 indirect edges are discovered in the process, add them to NEW_EDGES, unless
302 it is NULL. Return true iff any new callgraph edges were discovered as a
303 result of inlining. */
305 static bool
306 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
307 VEC (cgraph_edge_p, heap) **new_edges)
309 int old_size = 0, new_size = 0;
310 struct cgraph_node *to = NULL, *what;
311 struct cgraph_edge *curr = e;
312 int freq;
314 gcc_assert (e->inline_failed);
315 e->inline_failed = CIF_OK;
316 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
318 cgraph_clone_inlined_nodes (e, true, update_original);
320 what = e->callee;
322 freq = e->frequency;
323 /* Now update size of caller and all functions caller is inlined into. */
324 for (;e && !e->inline_failed; e = e->caller->callers)
326 to = e->caller;
327 old_size = e->caller->global.size;
328 new_size = cgraph_estimate_size_after_inlining (1, to, what);
329 to->global.size = new_size;
330 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
332 gcc_assert (what->global.inlined_to == to);
333 if (new_size > old_size)
334 overall_size += new_size - old_size;
335 ncalls_inlined++;
337 if (flag_indirect_inlining)
338 return ipa_propagate_indirect_call_infos (curr, new_edges);
339 else
340 return false;
343 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER. */
345 static void
346 cgraph_mark_inline (struct cgraph_edge *edge)
348 struct cgraph_node *to = edge->caller;
349 struct cgraph_node *what = edge->callee;
350 struct cgraph_edge *e, *next;
352 gcc_assert (!edge->call_stmt_cannot_inline_p);
353 /* Look for all calls, mark them inline and clone recursively
354 all inlined functions. */
355 for (e = what->callers; e; e = next)
357 next = e->next_caller;
358 if (e->caller == to && e->inline_failed)
360 cgraph_mark_inline_edge (e, true, NULL);
361 if (e == edge)
362 edge = next;
367 /* Estimate the growth caused by inlining NODE into all callees. */
369 static int
370 cgraph_estimate_growth (struct cgraph_node *node)
372 int growth = 0;
373 struct cgraph_edge *e;
374 bool self_recursive = false;
376 if (node->global.estimated_growth != INT_MIN)
377 return node->global.estimated_growth;
379 for (e = node->callers; e; e = e->next_caller)
381 if (e->caller == node)
382 self_recursive = true;
383 if (e->inline_failed)
384 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
385 - e->caller->global.size);
388 /* ??? Wrong for non-trivially self recursive functions or cases where
389 we decide to not inline for different reasons, but it is not big deal
390 as in that case we will keep the body around, but we will also avoid
391 some inlining. */
392 if (cgraph_only_called_directly_p (node)
393 && !DECL_EXTERNAL (node->decl) && !self_recursive)
394 growth -= node->global.size;
396 node->global.estimated_growth = growth;
397 return growth;
400 /* Return false when inlining WHAT into TO is not good idea
401 as it would cause too large growth of function bodies.
402 When ONE_ONLY is true, assume that only one call site is going
403 to be inlined, otherwise figure out how many call sites in
404 TO calls WHAT and verify that all can be inlined.
407 static bool
408 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
409 cgraph_inline_failed_t *reason, bool one_only)
411 int times = 0;
412 struct cgraph_edge *e;
413 int newsize;
414 int limit;
415 HOST_WIDE_INT stack_size_limit, inlined_stack;
417 if (one_only)
418 times = 1;
419 else
420 for (e = to->callees; e; e = e->next_callee)
421 if (e->callee == what)
422 times++;
424 if (to->global.inlined_to)
425 to = to->global.inlined_to;
427 /* When inlining large function body called once into small function,
428 take the inlined function as base for limiting the growth. */
429 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
430 limit = inline_summary (to)->self_size;
431 else
432 limit = inline_summary (what)->self_size;
434 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
436 /* Check the size after inlining against the function limits. But allow
437 the function to shrink if it went over the limits by forced inlining. */
438 newsize = cgraph_estimate_size_after_inlining (times, to, what);
439 if (newsize >= to->global.size
440 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
441 && newsize > limit)
443 if (reason)
444 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
445 return false;
448 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
450 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
452 inlined_stack = (to->global.stack_frame_offset
453 + inline_summary (to)->estimated_self_stack_size
454 + what->global.estimated_stack_size);
455 if (inlined_stack > stack_size_limit
456 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
458 if (reason)
459 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
460 return false;
462 return true;
465 /* Return true when function N is small enough to be inlined. */
467 static bool
468 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
470 tree decl = n->decl;
472 if (n->local.disregard_inline_limits)
473 return true;
475 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
477 if (reason)
478 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
479 return false;
482 if (!n->analyzed)
484 if (reason)
485 *reason = CIF_BODY_NOT_AVAILABLE;
486 return false;
489 if (DECL_DECLARED_INLINE_P (decl))
491 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
493 if (reason)
494 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
495 return false;
498 else
500 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
502 if (reason)
503 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
504 return false;
508 return true;
511 /* Return true when inlining WHAT would create recursive inlining.
512 We call recursive inlining all cases where same function appears more than
513 once in the single recursion nest path in the inline graph. */
515 static inline bool
516 cgraph_recursive_inlining_p (struct cgraph_node *to,
517 struct cgraph_node *what,
518 cgraph_inline_failed_t *reason)
520 bool recursive;
521 if (to->global.inlined_to)
522 recursive = what->decl == to->global.inlined_to->decl;
523 else
524 recursive = what->decl == to->decl;
525 /* Marking recursive function inline has sane semantic and thus we should
526 not warn on it. */
527 if (recursive && reason)
528 *reason = (what->local.disregard_inline_limits
529 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
530 return recursive;
533 /* A cost model driving the inlining heuristics in a way so the edges with
534 smallest badness are inlined first. After each inlining is performed
535 the costs of all caller edges of nodes affected are recomputed so the
536 metrics may accurately depend on values such as number of inlinable callers
537 of the function or function body size. */
539 static int
540 cgraph_edge_badness (struct cgraph_edge *edge, bool dump)
542 gcov_type badness;
543 int growth =
544 (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
545 - edge->caller->global.size);
547 if (edge->callee->local.disregard_inline_limits)
548 return INT_MIN;
550 if (dump)
552 fprintf (dump_file, " Badness calculcation for %s -> %s\n",
553 cgraph_node_name (edge->caller),
554 cgraph_node_name (edge->callee));
555 fprintf (dump_file, " growth %i, time %i-%i, size %i-%i\n",
556 growth,
557 edge->callee->global.time,
558 inline_summary (edge->callee)->time_inlining_benefit,
559 edge->callee->global.size,
560 inline_summary (edge->callee)->size_inlining_benefit);
563 /* Always prefer inlining saving code size. */
564 if (growth <= 0)
566 badness = INT_MIN - growth;
567 if (dump)
568 fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
569 growth);
572 /* When profiling is available, base priorities -(#calls / growth).
573 So we optimize for overall number of "executed" inlined calls. */
574 else if (max_count)
576 badness =
577 ((int)
578 ((double) edge->count * INT_MIN / max_count / (max_benefit + 1)) *
579 (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
580 if (dump)
582 fprintf (dump_file,
583 " %i (relative %f): profile info. Relative count %f"
584 " * Relative benefit %f\n",
585 (int) badness, (double) badness / INT_MIN,
586 (double) edge->count / max_count,
587 (double) (inline_summary (edge->callee)->
588 time_inlining_benefit + 1) / (max_benefit + 1));
592 /* When function local profile is available, base priorities on
593 growth / frequency, so we optimize for overall frequency of inlined
594 calls. This is not too accurate since while the call might be frequent
595 within function, the function itself is infrequent.
597 Other objective to optimize for is number of different calls inlined.
598 We add the estimated growth after inlining all functions to bias the
599 priorities slightly in this direction (so fewer times called functions
600 of the same size gets priority). */
601 else if (flag_guess_branch_prob)
603 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
604 int benefitperc;
605 int growth_for_all;
606 badness = growth * 10000;
607 benefitperc =
608 MIN (100 * inline_summary (edge->callee)->time_inlining_benefit /
609 (edge->callee->global.time + 1) +1, 100);
610 div *= benefitperc;
613 /* Decrease badness if call is nested. */
614 /* Compress the range so we don't overflow. */
615 if (div > 10000)
616 div = 10000 + ceil_log2 (div) - 8;
617 if (div < 1)
618 div = 1;
619 if (badness > 0)
620 badness /= div;
621 growth_for_all = cgraph_estimate_growth (edge->callee);
622 badness += growth_for_all;
623 if (badness > INT_MAX)
624 badness = INT_MAX;
625 if (dump)
627 fprintf (dump_file,
628 " %i: guessed profile. frequency %i, overall growth %i,"
629 " benefit %i%%, divisor %i\n",
630 (int) badness, edge->frequency, growth_for_all, benefitperc, div);
633 /* When function local profile is not available or it does not give
634 useful information (ie frequency is zero), base the cost on
635 loop nest and overall size growth, so we optimize for overall number
636 of functions fully inlined in program. */
637 else
639 int nest = MIN (edge->loop_nest, 8);
640 badness = cgraph_estimate_growth (edge->callee) * 256;
642 /* Decrease badness if call is nested. */
643 if (badness > 0)
644 badness >>= nest;
645 else
647 badness <<= nest;
649 if (dump)
650 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
651 nest);
654 /* Ensure that we did not overflow in all the fixed point math above. */
655 gcc_assert (badness >= INT_MIN);
656 gcc_assert (badness <= INT_MAX - 1);
657 /* Make recursive inlining happen always after other inlining is done. */
658 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
659 return badness + 1;
660 else
661 return badness;
664 /* Recompute heap nodes for each of caller edge. */
666 static void
667 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
668 bitmap updated_nodes)
670 struct cgraph_edge *edge;
671 cgraph_inline_failed_t failed_reason;
673 if (!node->local.inlinable
674 || node->global.inlined_to)
675 return;
676 if (bitmap_bit_p (updated_nodes, node->uid))
677 return;
678 bitmap_set_bit (updated_nodes, node->uid);
679 node->global.estimated_growth = INT_MIN;
681 if (!node->local.inlinable)
682 return;
683 /* See if there is something to do. */
684 for (edge = node->callers; edge; edge = edge->next_caller)
685 if (edge->inline_failed)
686 break;
687 if (!edge)
688 return;
689 /* Prune out edges we won't inline into anymore. */
690 if (!cgraph_default_inline_p (node, &failed_reason))
692 for (; edge; edge = edge->next_caller)
693 if (edge->aux)
695 fibheap_delete_node (heap, (fibnode_t) edge->aux);
696 edge->aux = NULL;
697 if (edge->inline_failed)
698 edge->inline_failed = failed_reason;
700 return;
703 for (; edge; edge = edge->next_caller)
704 if (edge->inline_failed)
706 int badness = cgraph_edge_badness (edge, false);
707 if (edge->aux)
709 fibnode_t n = (fibnode_t) edge->aux;
710 gcc_assert (n->data == edge);
711 if (n->key == badness)
712 continue;
714 /* fibheap_replace_key only decrease the keys.
715 When we increase the key we do not update heap
716 and instead re-insert the element once it becomes
717 a minium of heap. */
718 if (badness < n->key)
720 fibheap_replace_key (heap, n, badness);
721 gcc_assert (n->key == badness);
722 continue;
725 else
726 edge->aux = fibheap_insert (heap, badness, edge);
730 /* Recompute heap nodes for each of caller edges of each of callees.
731 Walk recursively into all inline clones. */
733 static void
734 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
735 bitmap updated_nodes)
737 struct cgraph_edge *e = node->callees;
738 node->global.estimated_growth = INT_MIN;
740 if (!e)
741 return;
742 while (true)
743 if (!e->inline_failed && e->callee->callees)
744 e = e->callee->callees;
745 else
747 if (e->inline_failed)
748 update_caller_keys (heap, e->callee, updated_nodes);
749 if (e->next_callee)
750 e = e->next_callee;
751 else
755 if (e->caller == node)
756 return;
757 e = e->caller->callers;
759 while (!e->next_callee);
760 e = e->next_callee;
765 /* Enqueue all recursive calls from NODE into priority queue depending on
766 how likely we want to recursively inline the call. */
768 static void
769 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
770 fibheap_t heap)
772 static int priority;
773 struct cgraph_edge *e;
774 for (e = where->callees; e; e = e->next_callee)
775 if (e->callee == node)
777 /* When profile feedback is available, prioritize by expected number
778 of calls. Without profile feedback we maintain simple queue
779 to order candidates via recursive depths. */
780 fibheap_insert (heap,
781 !max_count ? priority++
782 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
785 for (e = where->callees; e; e = e->next_callee)
786 if (!e->inline_failed)
787 lookup_recursive_calls (node, e->callee, heap);
790 /* Decide on recursive inlining: in the case function has recursive calls,
791 inline until body size reaches given argument. If any new indirect edges
792 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
793 is NULL. */
795 static bool
796 cgraph_decide_recursive_inlining (struct cgraph_node *node,
797 VEC (cgraph_edge_p, heap) **new_edges)
799 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
800 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
801 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
802 fibheap_t heap;
803 struct cgraph_edge *e;
804 struct cgraph_node *master_clone, *next;
805 int depth = 0;
806 int n = 0;
808 /* It does not make sense to recursively inline always-inline functions
809 as we are going to sorry() on the remaining calls anyway. */
810 if (node->local.disregard_inline_limits
811 && lookup_attribute ("always_inline", DECL_ATTRIBUTES (node->decl)))
812 return false;
814 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
815 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
816 return false;
818 if (DECL_DECLARED_INLINE_P (node->decl))
820 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
821 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
824 /* Make sure that function is small enough to be considered for inlining. */
825 if (!max_depth
826 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
827 return false;
828 heap = fibheap_new ();
829 lookup_recursive_calls (node, node, heap);
830 if (fibheap_empty (heap))
832 fibheap_delete (heap);
833 return false;
836 if (dump_file)
837 fprintf (dump_file,
838 " Performing recursive inlining on %s\n",
839 cgraph_node_name (node));
841 /* We need original clone to copy around. */
842 master_clone = cgraph_clone_node (node, node->decl,
843 node->count, CGRAPH_FREQ_BASE, 1,
844 false, NULL);
845 master_clone->needed = true;
846 for (e = master_clone->callees; e; e = e->next_callee)
847 if (!e->inline_failed)
848 cgraph_clone_inlined_nodes (e, true, false);
850 /* Do the inlining and update list of recursive call during process. */
851 while (!fibheap_empty (heap)
852 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
853 <= limit))
855 struct cgraph_edge *curr
856 = (struct cgraph_edge *) fibheap_extract_min (heap);
857 struct cgraph_node *cnode;
859 depth = 1;
860 for (cnode = curr->caller;
861 cnode->global.inlined_to; cnode = cnode->callers->caller)
862 if (node->decl == curr->callee->decl)
863 depth++;
864 if (depth > max_depth)
866 if (dump_file)
867 fprintf (dump_file,
868 " maximal depth reached\n");
869 continue;
872 if (max_count)
874 if (!cgraph_maybe_hot_edge_p (curr))
876 if (dump_file)
877 fprintf (dump_file, " Not inlining cold call\n");
878 continue;
880 if (curr->count * 100 / node->count < probability)
882 if (dump_file)
883 fprintf (dump_file,
884 " Probability of edge is too small\n");
885 continue;
889 if (dump_file)
891 fprintf (dump_file,
892 " Inlining call of depth %i", depth);
893 if (node->count)
895 fprintf (dump_file, " called approx. %.2f times per call",
896 (double)curr->count / node->count);
898 fprintf (dump_file, "\n");
900 cgraph_redirect_edge_callee (curr, master_clone);
901 cgraph_mark_inline_edge (curr, false, new_edges);
902 lookup_recursive_calls (node, curr->callee, heap);
903 n++;
905 if (!fibheap_empty (heap) && dump_file)
906 fprintf (dump_file, " Recursive inlining growth limit met.\n");
908 fibheap_delete (heap);
909 if (dump_file)
910 fprintf (dump_file,
911 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
912 master_clone->global.size, node->global.size,
913 master_clone->global.time, node->global.time);
915 /* Remove master clone we used for inlining. We rely that clones inlined
916 into master clone gets queued just before master clone so we don't
917 need recursion. */
918 for (node = cgraph_nodes; node != master_clone;
919 node = next)
921 next = node->next;
922 if (node->global.inlined_to == master_clone)
923 cgraph_remove_node (node);
925 cgraph_remove_node (master_clone);
926 /* FIXME: Recursive inlining actually reduces number of calls of the
927 function. At this place we should probably walk the function and
928 inline clones and compensate the counts accordingly. This probably
929 doesn't matter much in practice. */
930 return n > 0;
933 /* Set inline_failed for all callers of given function to REASON. */
935 static void
936 cgraph_set_inline_failed (struct cgraph_node *node,
937 cgraph_inline_failed_t reason)
939 struct cgraph_edge *e;
941 if (dump_file)
942 fprintf (dump_file, "Inlining failed: %s\n",
943 cgraph_inline_failed_string (reason));
944 for (e = node->callers; e; e = e->next_caller)
945 if (e->inline_failed)
946 e->inline_failed = reason;
949 /* Given whole compilation unit estimate of INSNS, compute how large we can
950 allow the unit to grow. */
951 static int
952 compute_max_insns (int insns)
954 int max_insns = insns;
955 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
956 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
958 return ((HOST_WIDEST_INT) max_insns
959 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
962 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
963 static void
964 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
966 while (VEC_length (cgraph_edge_p, new_edges) > 0)
968 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
970 gcc_assert (!edge->aux);
971 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
976 /* We use greedy algorithm for inlining of small functions:
977 All inline candidates are put into prioritized heap based on estimated
978 growth of the overall number of instructions and then update the estimates.
980 INLINED and INLINED_CALEES are just pointers to arrays large enough
981 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
983 static void
984 cgraph_decide_inlining_of_small_functions (void)
986 struct cgraph_node *node;
987 struct cgraph_edge *edge;
988 cgraph_inline_failed_t failed_reason;
989 fibheap_t heap = fibheap_new ();
990 bitmap updated_nodes = BITMAP_ALLOC (NULL);
991 int min_size, max_size;
992 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
994 if (flag_indirect_inlining)
995 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
997 if (dump_file)
998 fprintf (dump_file, "\nDeciding on smaller functions:\n");
1000 /* Put all inline candidates into the heap. */
1002 for (node = cgraph_nodes; node; node = node->next)
1004 if (!node->local.inlinable || !node->callers)
1005 continue;
1006 if (dump_file)
1007 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
1009 node->global.estimated_growth = INT_MIN;
1010 if (!cgraph_default_inline_p (node, &failed_reason))
1012 cgraph_set_inline_failed (node, failed_reason);
1013 continue;
1016 for (edge = node->callers; edge; edge = edge->next_caller)
1017 if (edge->inline_failed)
1019 gcc_assert (!edge->aux);
1020 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1024 max_size = compute_max_insns (overall_size);
1025 min_size = overall_size;
1027 while (overall_size <= max_size
1028 && !fibheap_empty (heap))
1030 int old_size = overall_size;
1031 struct cgraph_node *where, *callee;
1032 int badness = fibheap_min_key (heap);
1033 int current_badness;
1034 int growth;
1035 cgraph_inline_failed_t not_good = CIF_OK;
1037 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1038 gcc_assert (edge->aux);
1039 edge->aux = NULL;
1040 if (!edge->inline_failed)
1041 continue;
1043 /* When updating the edge costs, we only decrease badness in the keys.
1044 When the badness increase, we keep the heap as it is and re-insert
1045 key now. */
1046 current_badness = cgraph_edge_badness (edge, false);
1047 gcc_assert (current_badness >= badness);
1048 if (current_badness != badness)
1050 edge->aux = fibheap_insert (heap, current_badness, edge);
1051 continue;
1054 callee = edge->callee;
1056 growth = (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
1057 - edge->caller->global.size);
1059 if (dump_file)
1061 fprintf (dump_file,
1062 "\nConsidering %s with %i size\n",
1063 cgraph_node_name (edge->callee),
1064 edge->callee->global.size);
1065 fprintf (dump_file,
1066 " to be inlined into %s in %s:%i\n"
1067 " Estimated growth after inlined into all callees is %+i insns.\n"
1068 " Estimated badness is %i, frequency %.2f.\n",
1069 cgraph_node_name (edge->caller),
1070 flag_wpa ? "unknown"
1071 : gimple_filename ((const_gimple) edge->call_stmt),
1072 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1073 cgraph_estimate_growth (edge->callee),
1074 badness,
1075 edge->frequency / (double)CGRAPH_FREQ_BASE);
1076 if (edge->count)
1077 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1078 if (dump_flags & TDF_DETAILS)
1079 cgraph_edge_badness (edge, true);
1082 /* When not having profile info ready we don't weight by any way the
1083 position of call in procedure itself. This means if call of
1084 function A from function B seems profitable to inline, the recursive
1085 call of function A in inline copy of A in B will look profitable too
1086 and we end up inlining until reaching maximal function growth. This
1087 is not good idea so prohibit the recursive inlining.
1089 ??? When the frequencies are taken into account we might not need this
1090 restriction.
1092 We need to be cureful here, in some testcases, e.g. directivec.c in
1093 libcpp, we can estimate self recursive function to have negative growth
1094 for inlining completely.
1096 if (!edge->count)
1098 where = edge->caller;
1099 while (where->global.inlined_to)
1101 if (where->decl == edge->callee->decl)
1102 break;
1103 where = where->callers->caller;
1105 if (where->global.inlined_to)
1107 edge->inline_failed
1108 = (edge->callee->local.disregard_inline_limits
1109 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1110 if (dump_file)
1111 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
1112 continue;
1116 if (edge->callee->local.disregard_inline_limits)
1118 else if (!cgraph_maybe_hot_edge_p (edge))
1119 not_good = CIF_UNLIKELY_CALL;
1120 else if (!flag_inline_functions
1121 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
1122 not_good = CIF_NOT_DECLARED_INLINED;
1123 else if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
1124 not_good = CIF_OPTIMIZING_FOR_SIZE;
1125 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
1127 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1128 &edge->inline_failed))
1130 edge->inline_failed = not_good;
1131 if (dump_file)
1132 fprintf (dump_file, " inline_failed:%s.\n",
1133 cgraph_inline_failed_string (edge->inline_failed));
1135 continue;
1137 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1139 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1140 &edge->inline_failed))
1142 if (dump_file)
1143 fprintf (dump_file, " inline_failed:%s.\n",
1144 cgraph_inline_failed_string (edge->inline_failed));
1146 continue;
1148 if (!tree_can_inline_p (edge))
1150 if (dump_file)
1151 fprintf (dump_file, " inline_failed:%s.\n",
1152 cgraph_inline_failed_string (edge->inline_failed));
1153 continue;
1155 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1156 &edge->inline_failed))
1158 where = edge->caller;
1159 if (where->global.inlined_to)
1160 where = where->global.inlined_to;
1161 if (!cgraph_decide_recursive_inlining (where,
1162 flag_indirect_inlining
1163 ? &new_indirect_edges : NULL))
1164 continue;
1165 if (flag_indirect_inlining)
1166 add_new_edges_to_heap (heap, new_indirect_edges);
1167 update_callee_keys (heap, where, updated_nodes);
1169 else
1171 struct cgraph_node *callee;
1172 if (edge->call_stmt_cannot_inline_p
1173 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1174 &edge->inline_failed, true))
1176 if (dump_file)
1177 fprintf (dump_file, " Not inlining into %s:%s.\n",
1178 cgraph_node_name (edge->caller),
1179 cgraph_inline_failed_string (edge->inline_failed));
1180 continue;
1182 callee = edge->callee;
1183 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1184 if (flag_indirect_inlining)
1185 add_new_edges_to_heap (heap, new_indirect_edges);
1187 update_callee_keys (heap, callee, updated_nodes);
1189 where = edge->caller;
1190 if (where->global.inlined_to)
1191 where = where->global.inlined_to;
1193 /* Our profitability metric can depend on local properties
1194 such as number of inlinable calls and size of the function body.
1195 After inlining these properties might change for the function we
1196 inlined into (since it's body size changed) and for the functions
1197 called by function we inlined (since number of it inlinable callers
1198 might change). */
1199 update_caller_keys (heap, where, updated_nodes);
1201 /* We removed one call of the function we just inlined. If offline
1202 copy is still needed, be sure to update the keys. */
1203 if (callee != where && !callee->global.inlined_to)
1204 update_caller_keys (heap, callee, updated_nodes);
1205 bitmap_clear (updated_nodes);
1207 if (dump_file)
1209 fprintf (dump_file,
1210 " Inlined into %s which now has size %i and self time %i,"
1211 "net change of %+i.\n",
1212 cgraph_node_name (edge->caller),
1213 edge->caller->global.time,
1214 edge->caller->global.size,
1215 overall_size - old_size);
1217 if (min_size > overall_size)
1219 min_size = overall_size;
1220 max_size = compute_max_insns (min_size);
1222 if (dump_file)
1223 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1226 while (!fibheap_empty (heap))
1228 int badness = fibheap_min_key (heap);
1230 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1231 gcc_assert (edge->aux);
1232 edge->aux = NULL;
1233 if (!edge->inline_failed)
1234 continue;
1235 #ifdef ENABLE_CHECKING
1236 gcc_assert (cgraph_edge_badness (edge, false) >= badness);
1237 #endif
1238 if (dump_file)
1240 fprintf (dump_file,
1241 "\nSkipping %s with %i size\n",
1242 cgraph_node_name (edge->callee),
1243 edge->callee->global.size);
1244 fprintf (dump_file,
1245 " called by %s in %s:%i\n"
1246 " Estimated growth after inlined into all callees is %+i insns.\n"
1247 " Estimated badness is %i, frequency %.2f.\n",
1248 cgraph_node_name (edge->caller),
1249 flag_wpa ? "unknown"
1250 : gimple_filename ((const_gimple) edge->call_stmt),
1251 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1252 cgraph_estimate_growth (edge->callee),
1253 badness,
1254 edge->frequency / (double)CGRAPH_FREQ_BASE);
1255 if (edge->count)
1256 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1257 if (dump_flags & TDF_DETAILS)
1258 cgraph_edge_badness (edge, true);
1260 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1261 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1262 &edge->inline_failed))
1263 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1266 if (new_indirect_edges)
1267 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1268 fibheap_delete (heap);
1269 BITMAP_FREE (updated_nodes);
1272 /* Flatten NODE from the IPA inliner. */
1274 static void
1275 cgraph_flatten (struct cgraph_node *node)
1277 struct cgraph_edge *e;
1279 /* We shouldn't be called recursively when we are being processed. */
1280 gcc_assert (node->aux == NULL);
1282 node->aux = (void *)(size_t) INLINE_ALL;
1284 for (e = node->callees; e; e = e->next_callee)
1286 struct cgraph_node *orig_callee;
1288 if (e->call_stmt_cannot_inline_p)
1289 continue;
1291 if (!e->callee->analyzed)
1293 if (dump_file)
1294 fprintf (dump_file,
1295 "Not inlining: Function body not available.\n");
1296 continue;
1299 /* We've hit cycle? It is time to give up. */
1300 if (e->callee->aux)
1302 if (dump_file)
1303 fprintf (dump_file,
1304 "Not inlining %s into %s to avoid cycle.\n",
1305 cgraph_node_name (e->callee),
1306 cgraph_node_name (e->caller));
1307 e->inline_failed = CIF_RECURSIVE_INLINING;
1308 continue;
1311 /* When the edge is already inlined, we just need to recurse into
1312 it in order to fully flatten the leaves. */
1313 if (!e->inline_failed)
1315 cgraph_flatten (e->callee);
1316 continue;
1319 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1321 if (dump_file)
1322 fprintf (dump_file, "Not inlining: recursive call.\n");
1323 continue;
1326 if (!tree_can_inline_p (e))
1328 if (dump_file)
1329 fprintf (dump_file, "Not inlining: %s",
1330 cgraph_inline_failed_string (e->inline_failed));
1331 continue;
1334 /* Inline the edge and flatten the inline clone. Avoid
1335 recursing through the original node if the node was cloned. */
1336 if (dump_file)
1337 fprintf (dump_file, " Inlining %s into %s.\n",
1338 cgraph_node_name (e->callee),
1339 cgraph_node_name (e->caller));
1340 orig_callee = e->callee;
1341 cgraph_mark_inline_edge (e, true, NULL);
1342 if (e->callee != orig_callee)
1343 orig_callee->aux = (void *)(size_t) INLINE_ALL;
1344 cgraph_flatten (e->callee);
1345 if (e->callee != orig_callee)
1346 orig_callee->aux = NULL;
1349 node->aux = NULL;
1352 /* Decide on the inlining. We do so in the topological order to avoid
1353 expenses on updating data structures. */
1355 static unsigned int
1356 cgraph_decide_inlining (void)
1358 struct cgraph_node *node;
1359 int nnodes;
1360 struct cgraph_node **order =
1361 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1362 int old_size = 0;
1363 int i;
1364 int initial_size = 0;
1366 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1367 if (in_lto_p && flag_indirect_inlining)
1368 ipa_update_after_lto_read ();
1369 if (flag_indirect_inlining)
1370 ipa_create_all_structures_for_iinln ();
1372 max_count = 0;
1373 max_benefit = 0;
1374 for (node = cgraph_nodes; node; node = node->next)
1375 if (node->analyzed)
1377 struct cgraph_edge *e;
1379 gcc_assert (inline_summary (node)->self_size == node->global.size);
1380 initial_size += node->global.size;
1381 for (e = node->callees; e; e = e->next_callee)
1382 if (max_count < e->count)
1383 max_count = e->count;
1384 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1385 max_benefit = inline_summary (node)->time_inlining_benefit;
1387 gcc_assert (in_lto_p
1388 || !max_count
1389 || (profile_info && flag_branch_probabilities));
1390 overall_size = initial_size;
1392 nnodes = cgraph_postorder (order);
1394 if (dump_file)
1395 fprintf (dump_file,
1396 "\nDeciding on inlining. Starting with size %i.\n",
1397 initial_size);
1399 for (node = cgraph_nodes; node; node = node->next)
1400 node->aux = 0;
1402 if (dump_file)
1403 fprintf (dump_file, "\nFlattening functions:\n");
1405 /* In the first pass handle functions to be flattened. Do this with
1406 a priority so none of our later choices will make this impossible. */
1407 for (i = nnodes - 1; i >= 0; i--)
1409 node = order[i];
1411 /* Handle nodes to be flattened, but don't update overall unit
1412 size. Calling the incremental inliner here is lame,
1413 a simple worklist should be enough. What should be left
1414 here from the early inliner (if it runs) is cyclic cases.
1415 Ideally when processing callees we stop inlining at the
1416 entry of cycles, possibly cloning that entry point and
1417 try to flatten itself turning it into a self-recursive
1418 function. */
1419 if (lookup_attribute ("flatten",
1420 DECL_ATTRIBUTES (node->decl)) != NULL)
1422 if (dump_file)
1423 fprintf (dump_file,
1424 "Flattening %s\n", cgraph_node_name (node));
1425 cgraph_flatten (node);
1429 cgraph_decide_inlining_of_small_functions ();
1431 if (flag_inline_functions_called_once)
1433 if (dump_file)
1434 fprintf (dump_file, "\nDeciding on functions called once:\n");
1436 /* And finally decide what functions are called once. */
1437 for (i = nnodes - 1; i >= 0; i--)
1439 node = order[i];
1441 if (node->callers
1442 && !node->callers->next_caller
1443 && cgraph_only_called_directly_p (node)
1444 && node->local.inlinable
1445 && node->callers->inline_failed
1446 && node->callers->caller != node
1447 && node->callers->caller->global.inlined_to != node
1448 && !node->callers->call_stmt_cannot_inline_p
1449 && !DECL_EXTERNAL (node->decl)
1450 && !DECL_COMDAT (node->decl))
1452 cgraph_inline_failed_t reason;
1453 old_size = overall_size;
1454 if (dump_file)
1456 fprintf (dump_file,
1457 "\nConsidering %s size %i.\n",
1458 cgraph_node_name (node), node->global.size);
1459 fprintf (dump_file,
1460 " Called once from %s %i insns.\n",
1461 cgraph_node_name (node->callers->caller),
1462 node->callers->caller->global.size);
1465 if (cgraph_check_inline_limits (node->callers->caller, node,
1466 &reason, false))
1468 struct cgraph_node *caller = node->callers->caller;
1469 cgraph_mark_inline (node->callers);
1470 if (dump_file)
1471 fprintf (dump_file,
1472 " Inlined into %s which now has %i size"
1473 " for a net change of %+i size.\n",
1474 cgraph_node_name (caller),
1475 caller->global.size,
1476 overall_size - old_size);
1478 else
1480 if (dump_file)
1481 fprintf (dump_file,
1482 " Not inlining: %s.\n",
1483 cgraph_inline_failed_string (reason));
1489 /* Free ipa-prop structures if they are no longer needed. */
1490 if (flag_indirect_inlining)
1491 ipa_free_all_structures_after_iinln ();
1493 if (dump_file)
1494 fprintf (dump_file,
1495 "\nInlined %i calls, eliminated %i functions, "
1496 "size %i turned to %i size.\n\n",
1497 ncalls_inlined, nfunctions_inlined, initial_size,
1498 overall_size);
1499 free (order);
1500 return 0;
1503 /* Return true when N is leaf function. Accept cheap (pure&const) builtins
1504 in leaf functions. */
1505 static bool
1506 leaf_node_p (struct cgraph_node *n)
1508 struct cgraph_edge *e;
1509 for (e = n->callees; e; e = e->next_callee)
1510 if (!DECL_BUILT_IN (e->callee->decl)
1511 || (!TREE_READONLY (e->callee->decl)
1512 || DECL_PURE_P (e->callee->decl)))
1513 return false;
1514 return true;
1517 /* Decide on the inlining. We do so in the topological order to avoid
1518 expenses on updating data structures. */
1520 static bool
1521 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1522 enum inlining_mode mode)
1524 struct cgraph_edge *e;
1525 bool inlined = false;
1526 cgraph_inline_failed_t failed_reason;
1528 #ifdef ENABLE_CHECKING
1529 verify_cgraph_node (node);
1530 #endif
1532 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1533 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1535 if (dump_file)
1536 fprintf (dump_file, "Incrementally flattening %s\n",
1537 cgraph_node_name (node));
1538 mode = INLINE_ALL;
1541 /* First of all look for always inline functions. */
1542 if (mode != INLINE_SIZE_NORECURSIVE)
1543 for (e = node->callees; e; e = e->next_callee)
1545 if (!e->callee->local.disregard_inline_limits
1546 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1547 continue;
1548 if (e->call_stmt_cannot_inline_p)
1549 continue;
1550 if (dump_file)
1551 fprintf (dump_file,
1552 "Considering to always inline inline candidate %s.\n",
1553 cgraph_node_name (e->callee));
1554 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1556 if (dump_file)
1557 fprintf (dump_file, "Not inlining: recursive call.\n");
1558 continue;
1560 if (!tree_can_inline_p (e))
1562 if (dump_file)
1563 fprintf (dump_file,
1564 "Not inlining: %s",
1565 cgraph_inline_failed_string (e->inline_failed));
1566 continue;
1568 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1569 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1571 if (dump_file)
1572 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1573 continue;
1575 if (!e->callee->analyzed)
1577 if (dump_file)
1578 fprintf (dump_file,
1579 "Not inlining: Function body no longer available.\n");
1580 continue;
1583 if (dump_file)
1584 fprintf (dump_file, " Inlining %s into %s.\n",
1585 cgraph_node_name (e->callee),
1586 cgraph_node_name (e->caller));
1587 cgraph_mark_inline (e);
1588 inlined = true;
1591 /* Now do the automatic inlining. */
1592 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE
1593 /* Never inline regular functions into always-inline functions
1594 during incremental inlining. */
1595 && !node->local.disregard_inline_limits)
1597 bitmap visited = BITMAP_ALLOC (NULL);
1598 for (e = node->callees; e; e = e->next_callee)
1600 int allowed_growth = 0;
1601 if (!e->callee->local.inlinable
1602 || !e->inline_failed
1603 || e->callee->local.disregard_inline_limits)
1604 continue;
1605 /* We are inlining a function to all call-sites in node
1606 or to none. So visit each candidate only once. */
1607 if (!bitmap_set_bit (visited, e->callee->uid))
1608 continue;
1609 if (dump_file)
1610 fprintf (dump_file, "Considering inline candidate %s.\n",
1611 cgraph_node_name (e->callee));
1612 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1614 if (dump_file)
1615 fprintf (dump_file, "Not inlining: recursive call.\n");
1616 continue;
1618 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1619 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1621 if (dump_file)
1622 fprintf (dump_file,
1623 "Not inlining: SSA form does not match.\n");
1624 continue;
1627 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1628 && optimize_function_for_speed_p (cfun))
1629 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1631 /* When the function body would grow and inlining the function
1632 won't eliminate the need for offline copy of the function,
1633 don't inline. */
1634 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1635 || (!flag_inline_functions
1636 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1637 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1638 > e->caller->global.size + allowed_growth)
1639 && cgraph_estimate_growth (e->callee) > allowed_growth)
1641 if (dump_file)
1642 fprintf (dump_file,
1643 "Not inlining: code size would grow by %i.\n",
1644 cgraph_estimate_size_after_inlining (1, e->caller,
1645 e->callee)
1646 - e->caller->global.size);
1647 continue;
1649 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1650 false)
1651 || e->call_stmt_cannot_inline_p)
1653 if (dump_file)
1654 fprintf (dump_file, "Not inlining: %s.\n",
1655 cgraph_inline_failed_string (e->inline_failed));
1656 continue;
1658 if (!e->callee->analyzed)
1660 if (dump_file)
1661 fprintf (dump_file,
1662 "Not inlining: Function body no longer available.\n");
1663 continue;
1665 if (!tree_can_inline_p (e))
1667 if (dump_file)
1668 fprintf (dump_file,
1669 "Not inlining: %s.",
1670 cgraph_inline_failed_string (e->inline_failed));
1671 continue;
1673 if (cgraph_default_inline_p (e->callee, &failed_reason))
1675 if (dump_file)
1676 fprintf (dump_file, " Inlining %s into %s.\n",
1677 cgraph_node_name (e->callee),
1678 cgraph_node_name (e->caller));
1679 cgraph_mark_inline (e);
1680 inlined = true;
1683 BITMAP_FREE (visited);
1685 return inlined;
1688 /* Because inlining might remove no-longer reachable nodes, we need to
1689 keep the array visible to garbage collector to avoid reading collected
1690 out nodes. */
1691 static int nnodes;
1692 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1694 /* Do inlining of small functions. Doing so early helps profiling and other
1695 passes to be somewhat more effective and avoids some code duplication in
1696 later real inlining pass for testcases with very many function calls. */
1697 static unsigned int
1698 cgraph_early_inlining (void)
1700 struct cgraph_node *node = cgraph_node (current_function_decl);
1701 unsigned int todo = 0;
1702 int iterations = 0;
1704 if (seen_error ())
1705 return 0;
1707 if (!optimize
1708 || flag_no_inline
1709 || !flag_early_inlining)
1711 /* When not optimizing or not inlining inline only always-inline
1712 functions. */
1713 cgraph_decide_inlining_incrementally (node, INLINE_ALWAYS_INLINE);
1714 timevar_push (TV_INTEGRATION);
1715 todo |= optimize_inline_calls (current_function_decl);
1716 timevar_pop (TV_INTEGRATION);
1718 else
1720 if (lookup_attribute ("flatten",
1721 DECL_ATTRIBUTES (node->decl)) != NULL)
1723 if (dump_file)
1724 fprintf (dump_file,
1725 "Flattening %s\n", cgraph_node_name (node));
1726 cgraph_flatten (node);
1727 timevar_push (TV_INTEGRATION);
1728 todo |= optimize_inline_calls (current_function_decl);
1729 timevar_pop (TV_INTEGRATION);
1731 /* We iterate incremental inlining to get trivial cases of indirect
1732 inlining. */
1733 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1734 && cgraph_decide_inlining_incrementally (node,
1735 iterations
1736 ? INLINE_SIZE_NORECURSIVE
1737 : INLINE_SIZE))
1739 timevar_push (TV_INTEGRATION);
1740 todo |= optimize_inline_calls (current_function_decl);
1741 iterations++;
1742 timevar_pop (TV_INTEGRATION);
1744 if (dump_file)
1745 fprintf (dump_file, "Iterations: %i\n", iterations);
1748 cfun->always_inline_functions_inlined = true;
1750 return todo;
1753 struct gimple_opt_pass pass_early_inline =
1756 GIMPLE_PASS,
1757 "einline", /* name */
1758 NULL, /* gate */
1759 cgraph_early_inlining, /* execute */
1760 NULL, /* sub */
1761 NULL, /* next */
1762 0, /* static_pass_number */
1763 TV_INLINE_HEURISTICS, /* tv_id */
1764 0, /* properties_required */
1765 0, /* properties_provided */
1766 0, /* properties_destroyed */
1767 0, /* todo_flags_start */
1768 TODO_dump_func /* todo_flags_finish */
1772 /* When inlining shall be performed. */
1773 static bool
1774 cgraph_gate_ipa_early_inlining (void)
1776 return (flag_early_inlining
1777 && !in_lto_p
1778 && (flag_branch_probabilities || flag_test_coverage
1779 || profile_arc_flag));
1782 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1783 before tree profiling so we have stand alone IPA pass for doing so. */
1784 struct simple_ipa_opt_pass pass_ipa_early_inline =
1787 SIMPLE_IPA_PASS,
1788 "einline_ipa", /* name */
1789 cgraph_gate_ipa_early_inlining, /* gate */
1790 NULL, /* execute */
1791 NULL, /* sub */
1792 NULL, /* next */
1793 0, /* static_pass_number */
1794 TV_INLINE_HEURISTICS, /* tv_id */
1795 0, /* properties_required */
1796 0, /* properties_provided */
1797 0, /* properties_destroyed */
1798 0, /* todo_flags_start */
1799 TODO_dump_cgraph /* todo_flags_finish */
1803 /* See if statement might disappear after inlining. We are not terribly
1804 sophisficated, basically looking for simple abstraction penalty wrappers. */
1806 static bool
1807 likely_eliminated_by_inlining_p (gimple stmt)
1809 enum gimple_code code = gimple_code (stmt);
1810 switch (code)
1812 case GIMPLE_RETURN:
1813 return true;
1814 case GIMPLE_ASSIGN:
1815 if (gimple_num_ops (stmt) != 2)
1816 return false;
1818 /* Casts of parameters, loads from parameters passed by reference
1819 and stores to return value or parameters are probably free after
1820 inlining. */
1821 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1822 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1823 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1824 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1826 tree rhs = gimple_assign_rhs1 (stmt);
1827 tree lhs = gimple_assign_lhs (stmt);
1828 tree inner_rhs = rhs;
1829 tree inner_lhs = lhs;
1830 bool rhs_free = false;
1831 bool lhs_free = false;
1833 while (handled_component_p (inner_lhs) || TREE_CODE (inner_lhs) == INDIRECT_REF)
1834 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1835 while (handled_component_p (inner_rhs)
1836 || TREE_CODE (inner_rhs) == ADDR_EXPR || TREE_CODE (inner_rhs) == INDIRECT_REF)
1837 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1840 if (TREE_CODE (inner_rhs) == PARM_DECL
1841 || (TREE_CODE (inner_rhs) == SSA_NAME
1842 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1843 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1844 rhs_free = true;
1845 if (rhs_free && is_gimple_reg (lhs))
1846 lhs_free = true;
1847 if (((TREE_CODE (inner_lhs) == PARM_DECL
1848 || (TREE_CODE (inner_lhs) == SSA_NAME
1849 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1850 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1851 && inner_lhs != lhs)
1852 || TREE_CODE (inner_lhs) == RESULT_DECL
1853 || (TREE_CODE (inner_lhs) == SSA_NAME
1854 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1855 lhs_free = true;
1856 if (lhs_free && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1857 rhs_free = true;
1858 if (lhs_free && rhs_free)
1859 return true;
1861 return false;
1862 default:
1863 return false;
1867 /* Compute function body size parameters for NODE. */
1869 static void
1870 estimate_function_body_sizes (struct cgraph_node *node)
1872 gcov_type time = 0;
1873 gcov_type time_inlining_benefit = 0;
1874 int size = 0;
1875 int size_inlining_benefit = 0;
1876 basic_block bb;
1877 gimple_stmt_iterator bsi;
1878 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1879 tree arg;
1880 int freq;
1881 tree funtype = TREE_TYPE (node->decl);
1883 if (dump_file)
1884 fprintf (dump_file, "Analyzing function body size: %s\n",
1885 cgraph_node_name (node));
1887 gcc_assert (my_function && my_function->cfg);
1888 FOR_EACH_BB_FN (bb, my_function)
1890 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1891 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1893 gimple stmt = gsi_stmt (bsi);
1894 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1895 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1897 if (dump_file && (dump_flags & TDF_DETAILS))
1899 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1900 freq, this_size, this_time);
1901 print_gimple_stmt (dump_file, stmt, 0, 0);
1903 this_time *= freq;
1904 time += this_time;
1905 size += this_size;
1906 if (likely_eliminated_by_inlining_p (stmt))
1908 size_inlining_benefit += this_size;
1909 time_inlining_benefit += this_time;
1910 if (dump_file && (dump_flags & TDF_DETAILS))
1911 fprintf (dump_file, " Likely eliminated\n");
1913 gcc_assert (time >= 0);
1914 gcc_assert (size >= 0);
1917 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1918 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
1919 / CGRAPH_FREQ_BASE);
1920 if (dump_file)
1921 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1922 (int)time, (int)time_inlining_benefit,
1923 size, size_inlining_benefit);
1924 time_inlining_benefit += eni_time_weights.call_cost;
1925 size_inlining_benefit += eni_size_weights.call_cost;
1926 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1928 int cost = estimate_move_cost (TREE_TYPE (funtype));
1929 time_inlining_benefit += cost;
1930 size_inlining_benefit += cost;
1932 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = TREE_CHAIN (arg))
1933 if (!VOID_TYPE_P (TREE_TYPE (arg)))
1935 int cost = estimate_move_cost (TREE_TYPE (arg));
1936 time_inlining_benefit += cost;
1937 size_inlining_benefit += cost;
1939 if (time_inlining_benefit > MAX_TIME)
1940 time_inlining_benefit = MAX_TIME;
1941 if (time > MAX_TIME)
1942 time = MAX_TIME;
1943 inline_summary (node)->self_time = time;
1944 inline_summary (node)->self_size = size;
1945 if (dump_file)
1946 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
1947 (int)time, (int)time_inlining_benefit,
1948 size, size_inlining_benefit);
1949 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
1950 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
1953 /* Compute parameters of functions used by inliner. */
1954 unsigned int
1955 compute_inline_parameters (struct cgraph_node *node)
1957 HOST_WIDE_INT self_stack_size;
1959 gcc_assert (!node->global.inlined_to);
1961 /* Estimate the stack size for the function. But not at -O0
1962 because estimated_stack_frame_size is a quadratic problem. */
1963 self_stack_size = optimize ? estimated_stack_frame_size () : 0;
1964 inline_summary (node)->estimated_self_stack_size = self_stack_size;
1965 node->global.estimated_stack_size = self_stack_size;
1966 node->global.stack_frame_offset = 0;
1968 /* Can this function be inlined at all? */
1969 node->local.inlinable = tree_inlinable_function_p (node->decl);
1970 if (node->local.inlinable && !node->local.disregard_inline_limits)
1971 node->local.disregard_inline_limits
1972 = DECL_DISREGARD_INLINE_LIMITS (node->decl);
1973 estimate_function_body_sizes (node);
1974 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1975 node->global.time = inline_summary (node)->self_time;
1976 node->global.size = inline_summary (node)->self_size;
1977 return 0;
1981 /* Compute parameters of functions used by inliner using
1982 current_function_decl. */
1983 static unsigned int
1984 compute_inline_parameters_for_current (void)
1986 compute_inline_parameters (cgraph_node (current_function_decl));
1987 return 0;
1990 struct gimple_opt_pass pass_inline_parameters =
1993 GIMPLE_PASS,
1994 "inline_param", /* name */
1995 NULL, /* gate */
1996 compute_inline_parameters_for_current,/* execute */
1997 NULL, /* sub */
1998 NULL, /* next */
1999 0, /* static_pass_number */
2000 TV_INLINE_HEURISTICS, /* tv_id */
2001 0, /* properties_required */
2002 0, /* properties_provided */
2003 0, /* properties_destroyed */
2004 0, /* todo_flags_start */
2005 0 /* todo_flags_finish */
2009 /* This function performs intraprocedural analyzis in NODE that is required to
2010 inline indirect calls. */
2011 static void
2012 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
2014 ipa_initialize_node_params (node);
2015 ipa_detect_param_modifications (node);
2016 ipa_analyze_params_uses (node);
2017 ipa_compute_jump_functions (node);
2019 if (dump_file)
2021 ipa_print_node_params (dump_file, node);
2022 ipa_print_node_jump_functions (dump_file, node);
2026 /* Note function body size. */
2027 static void
2028 analyze_function (struct cgraph_node *node)
2030 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2031 current_function_decl = node->decl;
2033 compute_inline_parameters (node);
2034 if (flag_indirect_inlining)
2035 inline_indirect_intraprocedural_analysis (node);
2037 current_function_decl = NULL;
2038 pop_cfun ();
2041 /* Called when new function is inserted to callgraph late. */
2042 static void
2043 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
2045 analyze_function (node);
2048 /* Note function body size. */
2049 static void
2050 inline_generate_summary (void)
2052 struct cgraph_node *node;
2054 function_insertion_hook_holder =
2055 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2057 if (flag_indirect_inlining)
2059 ipa_register_cgraph_hooks ();
2060 ipa_check_create_node_params ();
2061 ipa_check_create_edge_args ();
2064 for (node = cgraph_nodes; node; node = node->next)
2065 if (node->analyzed)
2066 analyze_function (node);
2068 return;
2071 /* Apply inline plan to function. */
2072 static unsigned int
2073 inline_transform (struct cgraph_node *node)
2075 unsigned int todo = 0;
2076 struct cgraph_edge *e;
2077 bool inline_p = false;
2079 /* FIXME: Currently the passmanager is adding inline transform more than once to some
2080 clones. This needs revisiting after WPA cleanups. */
2081 if (cfun->after_inlining)
2082 return 0;
2084 /* We might need the body of this function so that we can expand
2085 it inline somewhere else. */
2086 if (cgraph_preserve_function_body_p (node->decl))
2087 save_inline_function_body (node);
2089 for (e = node->callees; e; e = e->next_callee)
2091 cgraph_redirect_edge_call_stmt_to_callee (e);
2092 if (!e->inline_failed || warn_inline)
2093 inline_p = true;
2096 if (inline_p)
2098 timevar_push (TV_INTEGRATION);
2099 todo = optimize_inline_calls (current_function_decl);
2100 timevar_pop (TV_INTEGRATION);
2102 cfun->always_inline_functions_inlined = true;
2103 cfun->after_inlining = true;
2104 return todo | execute_fixup_cfg ();
2107 /* Read inline summary. Jump functions are shared among ipa-cp
2108 and inliner, so when ipa-cp is active, we don't need to write them
2109 twice. */
2111 static void
2112 inline_read_summary (void)
2114 if (flag_indirect_inlining)
2116 ipa_register_cgraph_hooks ();
2117 if (!flag_ipa_cp)
2118 ipa_prop_read_jump_functions ();
2120 function_insertion_hook_holder =
2121 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2124 /* Write inline summary for node in SET.
2125 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
2126 active, we don't need to write them twice. */
2128 static void
2129 inline_write_summary (cgraph_node_set set,
2130 varpool_node_set vset ATTRIBUTE_UNUSED)
2132 if (flag_indirect_inlining && !flag_ipa_cp)
2133 ipa_prop_write_jump_functions (set);
2136 /* When to run IPA inlining. Inlining of always-inline functions
2137 happens during early inlining. */
2139 static bool
2140 gate_cgraph_decide_inlining (void)
2142 /* ??? We'd like to skip this if not optimizing or not inlining as
2143 all always-inline functions have been processed by early
2144 inlining already. But this at least breaks EH with C++ as
2145 we need to unconditionally run fixup_cfg even at -O0.
2146 So leave it on unconditionally for now. */
2147 return 1;
2150 struct ipa_opt_pass_d pass_ipa_inline =
2153 IPA_PASS,
2154 "inline", /* name */
2155 gate_cgraph_decide_inlining, /* gate */
2156 cgraph_decide_inlining, /* execute */
2157 NULL, /* sub */
2158 NULL, /* next */
2159 0, /* static_pass_number */
2160 TV_INLINE_HEURISTICS, /* tv_id */
2161 0, /* properties_required */
2162 0, /* properties_provided */
2163 0, /* properties_destroyed */
2164 TODO_remove_functions, /* todo_flags_finish */
2165 TODO_dump_cgraph | TODO_dump_func
2166 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
2168 inline_generate_summary, /* generate_summary */
2169 inline_write_summary, /* write_summary */
2170 inline_read_summary, /* read_summary */
2171 NULL, /* write_optimization_summary */
2172 NULL, /* read_optimization_summary */
2173 NULL, /* stmt_fixup */
2174 0, /* TODOs */
2175 inline_transform, /* function_transform */
2176 NULL, /* variable_transform */
2180 #include "gt-ipa-inline.h"