2010-07-02 Daniel Jacobowitz <dan@codesourcery.com>
[official-gcc.git] / gcc / ipa-inline.c
blob691bf6ca771ca130cb7ce1e60882e8d28a87ca0f
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
29 There are three major parts of this file:
31 cgraph_mark_inline implementation
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
37 inlining heuristics limits
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
43 inlining heuristics
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used by early inliner.
66 The inliner itself is split into several passes:
68 pass_inline_parameters
70 This pass computes local properties of functions that are used by inliner:
71 estimated function body size, whether function is inlinable at all and
72 stack frame consumption.
74 Before executing any of inliner passes, this local pass has to be applied
75 to each function in the callgraph (ie run as subpass of some earlier
76 IPA pass). The results are made out of date by any optimization applied
77 on the function body.
79 pass_early_inlining
81 Simple local inlining pass inlining callees into current function. This
82 pass makes no global whole compilation unit analysis and this when allowed
83 to do inlining expanding code size it might result in unbounded growth of
84 whole unit.
86 The pass is run during conversion into SSA form. Only functions already
87 converted into SSA form are inlined, so the conversion must happen in
88 topological order on the callgraph (that is maintained by pass manager).
89 The functions after inlining are early optimized so the early inliner sees
90 unoptimized function itself, but all considered callees are already
91 optimized allowing it to unfold abstraction penalty on C++ effectively and
92 cheaply.
94 pass_ipa_early_inlining
96 With profiling, the early inlining is also necessary to reduce
97 instrumentation costs on program with high abstraction penalty (doing
98 many redundant calls). This can't happen in parallel with early
99 optimization and profile instrumentation, because we would end up
100 re-instrumenting already instrumented function bodies we brought in via
101 inlining.
103 To avoid this, this pass is executed as IPA pass before profiling. It is
104 simple wrapper to pass_early_inlining and ensures first inlining.
106 pass_ipa_inline
108 This is the main pass implementing simple greedy algorithm to do inlining
109 of small functions that results in overall growth of compilation unit and
110 inlining of functions called once. The pass compute just so called inline
111 plan (representation of inlining to be done in callgraph) and unlike early
112 inlining it is not performing the inlining itself.
114 pass_apply_inline
116 This pass performs actual inlining according to pass_ipa_inline on given
117 function. Possible the function body before inlining is saved when it is
118 needed for further inlining later.
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "tm.h"
125 #include "tree.h"
126 #include "tree-inline.h"
127 #include "langhooks.h"
128 #include "flags.h"
129 #include "cgraph.h"
130 #include "diagnostic.h"
131 #include "gimple-pretty-print.h"
132 #include "timevar.h"
133 #include "params.h"
134 #include "fibheap.h"
135 #include "intl.h"
136 #include "tree-pass.h"
137 #include "hashtab.h"
138 #include "coverage.h"
139 #include "ggc.h"
140 #include "tree-flow.h"
141 #include "rtl.h"
142 #include "ipa-prop.h"
143 #include "except.h"
145 #define MAX_TIME 1000000000
147 /* Mode incremental inliner operate on:
149 In ALWAYS_INLINE only functions marked
150 always_inline are inlined. This mode is used after detecting cycle during
151 flattening.
153 In SIZE mode, only functions that reduce function body size after inlining
154 are inlined, this is used during early inlining.
156 in ALL mode, everything is inlined. This is used during flattening. */
157 enum inlining_mode {
158 INLINE_NONE = 0,
159 INLINE_ALWAYS_INLINE,
160 INLINE_SIZE_NORECURSIVE,
161 INLINE_SIZE,
162 INLINE_ALL
165 static bool
166 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode);
167 static void cgraph_flatten (struct cgraph_node *node);
170 /* Statistics we collect about inlining algorithm. */
171 static int ncalls_inlined;
172 static int nfunctions_inlined;
173 static int overall_size;
174 static gcov_type max_count, max_benefit;
176 /* Holders of ipa cgraph hooks: */
177 static struct cgraph_node_hook_list *function_insertion_hook_holder;
179 static inline struct inline_summary *
180 inline_summary (struct cgraph_node *node)
182 return &node->local.inline_summary;
185 /* Estimate self time of the function after inlining WHAT into TO. */
187 static int
188 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
189 struct cgraph_node *what)
191 gcov_type time = (((gcov_type)what->global.time
192 - inline_summary (what)->time_inlining_benefit)
193 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
194 + to->global.time;
195 if (time < 0)
196 time = 0;
197 if (time > MAX_TIME)
198 time = MAX_TIME;
199 return time;
202 /* Estimate self time of the function after inlining WHAT into TO. */
204 static inline int
205 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
206 struct cgraph_node *what)
208 int size = ((what->global.size - inline_summary (what)->size_inlining_benefit)
209 * times + to->global.size);
210 gcc_assert (size >= 0);
211 return size;
214 /* Scale frequency of NODE edges by FREQ_SCALE and increase loop nest
215 by NEST. */
217 static void
218 update_noncloned_frequencies (struct cgraph_node *node,
219 int freq_scale, int nest)
221 struct cgraph_edge *e;
223 /* We do not want to ignore high loop nest after freq drops to 0. */
224 if (!freq_scale)
225 freq_scale = 1;
226 for (e = node->callees; e; e = e->next_callee)
228 e->loop_nest += nest;
229 e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
230 if (e->frequency > CGRAPH_FREQ_MAX)
231 e->frequency = CGRAPH_FREQ_MAX;
232 if (!e->inline_failed)
233 update_noncloned_frequencies (e->callee, freq_scale, nest);
237 /* E is expected to be an edge being inlined. Clone destination node of
238 the edge and redirect it to the new clone.
239 DUPLICATE is used for bookkeeping on whether we are actually creating new
240 clones or re-using node originally representing out-of-line function call.
242 void
243 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
244 bool update_original)
246 HOST_WIDE_INT peak;
248 if (duplicate)
250 /* We may eliminate the need for out-of-line copy to be output.
251 In that case just go ahead and re-use it. */
252 if (!e->callee->callers->next_caller
253 && cgraph_can_remove_if_no_direct_calls_p (e->callee)
254 /* Don't reuse if more than one function shares a comdat group.
255 If the other function(s) are needed, we need to emit even
256 this function out of line. */
257 && !e->callee->same_comdat_group
258 && !cgraph_new_nodes)
260 gcc_assert (!e->callee->global.inlined_to);
261 if (e->callee->analyzed)
263 overall_size -= e->callee->global.size;
264 nfunctions_inlined++;
266 duplicate = false;
267 e->callee->local.externally_visible = false;
268 update_noncloned_frequencies (e->callee, e->frequency, e->loop_nest);
270 else
272 struct cgraph_node *n;
273 n = cgraph_clone_node (e->callee, e->callee->decl,
274 e->count, e->frequency, e->loop_nest,
275 update_original, NULL);
276 cgraph_redirect_edge_callee (e, n);
280 if (e->caller->global.inlined_to)
281 e->callee->global.inlined_to = e->caller->global.inlined_to;
282 else
283 e->callee->global.inlined_to = e->caller;
284 e->callee->global.stack_frame_offset
285 = e->caller->global.stack_frame_offset
286 + inline_summary (e->caller)->estimated_self_stack_size;
287 peak = e->callee->global.stack_frame_offset
288 + inline_summary (e->callee)->estimated_self_stack_size;
289 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
290 e->callee->global.inlined_to->global.estimated_stack_size = peak;
291 cgraph_propagate_frequency (e->callee);
293 /* Recursively clone all bodies. */
294 for (e = e->callee->callees; e; e = e->next_callee)
295 if (!e->inline_failed)
296 cgraph_clone_inlined_nodes (e, duplicate, update_original);
299 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
300 specify whether profile of original function should be updated. If any new
301 indirect edges are discovered in the process, add them to NEW_EDGES, unless
302 it is NULL. Return true iff any new callgraph edges were discovered as a
303 result of inlining. */
305 static bool
306 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
307 VEC (cgraph_edge_p, heap) **new_edges)
309 int old_size = 0, new_size = 0;
310 struct cgraph_node *to = NULL, *what;
311 struct cgraph_edge *curr = e;
312 int freq;
314 gcc_assert (e->inline_failed);
315 e->inline_failed = CIF_OK;
316 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
318 cgraph_clone_inlined_nodes (e, true, update_original);
320 what = e->callee;
322 freq = e->frequency;
323 /* Now update size of caller and all functions caller is inlined into. */
324 for (;e && !e->inline_failed; e = e->caller->callers)
326 to = e->caller;
327 old_size = e->caller->global.size;
328 new_size = cgraph_estimate_size_after_inlining (1, to, what);
329 to->global.size = new_size;
330 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
332 gcc_assert (what->global.inlined_to == to);
333 if (new_size > old_size)
334 overall_size += new_size - old_size;
335 ncalls_inlined++;
337 if (flag_indirect_inlining)
338 return ipa_propagate_indirect_call_infos (curr, new_edges);
339 else
340 return false;
343 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER. */
345 static void
346 cgraph_mark_inline (struct cgraph_edge *edge)
348 struct cgraph_node *to = edge->caller;
349 struct cgraph_node *what = edge->callee;
350 struct cgraph_edge *e, *next;
352 gcc_assert (!edge->call_stmt_cannot_inline_p);
353 /* Look for all calls, mark them inline and clone recursively
354 all inlined functions. */
355 for (e = what->callers; e; e = next)
357 next = e->next_caller;
358 if (e->caller == to && e->inline_failed)
360 cgraph_mark_inline_edge (e, true, NULL);
361 if (e == edge)
362 edge = next;
367 /* Estimate the growth caused by inlining NODE into all callees. */
369 static int
370 cgraph_estimate_growth (struct cgraph_node *node)
372 int growth = 0;
373 struct cgraph_edge *e;
374 bool self_recursive = false;
376 if (node->global.estimated_growth != INT_MIN)
377 return node->global.estimated_growth;
379 for (e = node->callers; e; e = e->next_caller)
381 if (e->caller == node)
382 self_recursive = true;
383 if (e->inline_failed)
384 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
385 - e->caller->global.size);
388 /* ??? Wrong for non-trivially self recursive functions or cases where
389 we decide to not inline for different reasons, but it is not big deal
390 as in that case we will keep the body around, but we will also avoid
391 some inlining. */
392 if (cgraph_only_called_directly_p (node)
393 && !DECL_EXTERNAL (node->decl) && !self_recursive)
394 growth -= node->global.size;
396 node->global.estimated_growth = growth;
397 return growth;
400 /* Return false when inlining WHAT into TO is not good idea
401 as it would cause too large growth of function bodies.
402 When ONE_ONLY is true, assume that only one call site is going
403 to be inlined, otherwise figure out how many call sites in
404 TO calls WHAT and verify that all can be inlined.
407 static bool
408 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
409 cgraph_inline_failed_t *reason, bool one_only)
411 int times = 0;
412 struct cgraph_edge *e;
413 int newsize;
414 int limit;
415 HOST_WIDE_INT stack_size_limit, inlined_stack;
417 if (one_only)
418 times = 1;
419 else
420 for (e = to->callees; e; e = e->next_callee)
421 if (e->callee == what)
422 times++;
424 if (to->global.inlined_to)
425 to = to->global.inlined_to;
427 /* When inlining large function body called once into small function,
428 take the inlined function as base for limiting the growth. */
429 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
430 limit = inline_summary (to)->self_size;
431 else
432 limit = inline_summary (what)->self_size;
434 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
436 /* Check the size after inlining against the function limits. But allow
437 the function to shrink if it went over the limits by forced inlining. */
438 newsize = cgraph_estimate_size_after_inlining (times, to, what);
439 if (newsize >= to->global.size
440 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
441 && newsize > limit)
443 if (reason)
444 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
445 return false;
448 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
450 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
452 inlined_stack = (to->global.stack_frame_offset
453 + inline_summary (to)->estimated_self_stack_size
454 + what->global.estimated_stack_size);
455 if (inlined_stack > stack_size_limit
456 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
458 if (reason)
459 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
460 return false;
462 return true;
465 /* Return true when function N is small enough to be inlined. */
467 static bool
468 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
470 tree decl = n->decl;
472 if (n->local.disregard_inline_limits)
473 return true;
475 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
477 if (reason)
478 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
479 return false;
482 if (!n->analyzed)
484 if (reason)
485 *reason = CIF_BODY_NOT_AVAILABLE;
486 return false;
489 if (DECL_DECLARED_INLINE_P (decl))
491 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
493 if (reason)
494 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
495 return false;
498 else
500 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
502 if (reason)
503 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
504 return false;
508 return true;
511 /* Return true when inlining WHAT would create recursive inlining.
512 We call recursive inlining all cases where same function appears more than
513 once in the single recursion nest path in the inline graph. */
515 static inline bool
516 cgraph_recursive_inlining_p (struct cgraph_node *to,
517 struct cgraph_node *what,
518 cgraph_inline_failed_t *reason)
520 bool recursive;
521 if (to->global.inlined_to)
522 recursive = what->decl == to->global.inlined_to->decl;
523 else
524 recursive = what->decl == to->decl;
525 /* Marking recursive function inline has sane semantic and thus we should
526 not warn on it. */
527 if (recursive && reason)
528 *reason = (what->local.disregard_inline_limits
529 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
530 return recursive;
533 /* A cost model driving the inlining heuristics in a way so the edges with
534 smallest badness are inlined first. After each inlining is performed
535 the costs of all caller edges of nodes affected are recomputed so the
536 metrics may accurately depend on values such as number of inlinable callers
537 of the function or function body size. */
539 static int
540 cgraph_edge_badness (struct cgraph_edge *edge, bool dump)
542 gcov_type badness;
543 int growth =
544 (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
545 - edge->caller->global.size);
547 if (edge->callee->local.disregard_inline_limits)
548 return INT_MIN;
550 if (dump)
552 fprintf (dump_file, " Badness calculcation for %s -> %s\n",
553 cgraph_node_name (edge->caller),
554 cgraph_node_name (edge->callee));
555 fprintf (dump_file, " growth %i, time %i-%i, size %i-%i\n",
556 growth,
557 edge->callee->global.time,
558 inline_summary (edge->callee)->time_inlining_benefit,
559 edge->callee->global.size,
560 inline_summary (edge->callee)->size_inlining_benefit);
563 /* Always prefer inlining saving code size. */
564 if (growth <= 0)
566 badness = INT_MIN - growth;
567 if (dump)
568 fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
569 growth);
572 /* When profiling is available, base priorities -(#calls / growth).
573 So we optimize for overall number of "executed" inlined calls. */
574 else if (max_count)
576 badness =
577 ((int)
578 ((double) edge->count * INT_MIN / max_count / (max_benefit + 1)) *
579 (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
580 if (dump)
582 fprintf (dump_file,
583 " %i (relative %f): profile info. Relative count %f"
584 " * Relative benefit %f\n",
585 (int) badness, (double) badness / INT_MIN,
586 (double) edge->count / max_count,
587 (double) (inline_summary (edge->callee)->
588 time_inlining_benefit + 1) / (max_benefit + 1));
592 /* When function local profile is available, base priorities on
593 growth / frequency, so we optimize for overall frequency of inlined
594 calls. This is not too accurate since while the call might be frequent
595 within function, the function itself is infrequent.
597 Other objective to optimize for is number of different calls inlined.
598 We add the estimated growth after inlining all functions to bias the
599 priorities slightly in this direction (so fewer times called functions
600 of the same size gets priority). */
601 else if (flag_guess_branch_prob)
603 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
604 int benefitperc;
605 int growth_for_all;
606 badness = growth * 10000;
607 benefitperc =
608 MIN (100 * inline_summary (edge->callee)->time_inlining_benefit /
609 (edge->callee->global.time + 1) +1, 100);
610 div *= benefitperc;
613 /* Decrease badness if call is nested. */
614 /* Compress the range so we don't overflow. */
615 if (div > 10000)
616 div = 10000 + ceil_log2 (div) - 8;
617 if (div < 1)
618 div = 1;
619 if (badness > 0)
620 badness /= div;
621 growth_for_all = cgraph_estimate_growth (edge->callee);
622 badness += growth_for_all;
623 if (badness > INT_MAX)
624 badness = INT_MAX;
625 if (dump)
627 fprintf (dump_file,
628 " %i: guessed profile. frequency %i, overall growth %i,"
629 " benefit %i%%, divisor %i\n",
630 (int) badness, edge->frequency, growth_for_all, benefitperc, div);
633 /* When function local profile is not available or it does not give
634 useful information (ie frequency is zero), base the cost on
635 loop nest and overall size growth, so we optimize for overall number
636 of functions fully inlined in program. */
637 else
639 int nest = MIN (edge->loop_nest, 8);
640 badness = cgraph_estimate_growth (edge->callee) * 256;
642 /* Decrease badness if call is nested. */
643 if (badness > 0)
644 badness >>= nest;
645 else
647 badness <<= nest;
649 if (dump)
650 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
651 nest);
654 /* Ensure that we did not overflow in all the fixed point math above. */
655 gcc_assert (badness >= INT_MIN);
656 gcc_assert (badness <= INT_MAX - 1);
657 /* Make recursive inlining happen always after other inlining is done. */
658 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
659 return badness + 1;
660 else
661 return badness;
664 /* Recompute heap nodes for each of caller edge. */
666 static void
667 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
668 bitmap updated_nodes)
670 struct cgraph_edge *edge;
671 cgraph_inline_failed_t failed_reason;
673 if (!node->local.inlinable
674 || node->global.inlined_to)
675 return;
676 if (bitmap_bit_p (updated_nodes, node->uid))
677 return;
678 bitmap_set_bit (updated_nodes, node->uid);
679 node->global.estimated_growth = INT_MIN;
681 if (!node->local.inlinable)
682 return;
683 /* See if there is something to do. */
684 for (edge = node->callers; edge; edge = edge->next_caller)
685 if (edge->inline_failed)
686 break;
687 if (!edge)
688 return;
689 /* Prune out edges we won't inline into anymore. */
690 if (!cgraph_default_inline_p (node, &failed_reason))
692 for (; edge; edge = edge->next_caller)
693 if (edge->aux)
695 fibheap_delete_node (heap, (fibnode_t) edge->aux);
696 edge->aux = NULL;
697 if (edge->inline_failed)
698 edge->inline_failed = failed_reason;
700 return;
703 for (; edge; edge = edge->next_caller)
704 if (edge->inline_failed)
706 int badness = cgraph_edge_badness (edge, false);
707 if (edge->aux)
709 fibnode_t n = (fibnode_t) edge->aux;
710 gcc_assert (n->data == edge);
711 if (n->key == badness)
712 continue;
714 /* fibheap_replace_key only decrease the keys.
715 When we increase the key we do not update heap
716 and instead re-insert the element once it becomes
717 a minium of heap. */
718 if (badness < n->key)
720 fibheap_replace_key (heap, n, badness);
721 gcc_assert (n->key == badness);
722 continue;
725 else
726 edge->aux = fibheap_insert (heap, badness, edge);
730 /* Recompute heap nodes for each of caller edges of each of callees.
731 Walk recursively into all inline clones. */
733 static void
734 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
735 bitmap updated_nodes)
737 struct cgraph_edge *e = node->callees;
738 node->global.estimated_growth = INT_MIN;
740 if (!e)
741 return;
742 while (true)
743 if (!e->inline_failed && e->callee->callees)
744 e = e->callee->callees;
745 else
747 if (e->inline_failed)
748 update_caller_keys (heap, e->callee, updated_nodes);
749 if (e->next_callee)
750 e = e->next_callee;
751 else
755 if (e->caller == node)
756 return;
757 e = e->caller->callers;
759 while (!e->next_callee);
760 e = e->next_callee;
765 /* Enqueue all recursive calls from NODE into priority queue depending on
766 how likely we want to recursively inline the call. */
768 static void
769 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
770 fibheap_t heap)
772 static int priority;
773 struct cgraph_edge *e;
774 for (e = where->callees; e; e = e->next_callee)
775 if (e->callee == node)
777 /* When profile feedback is available, prioritize by expected number
778 of calls. Without profile feedback we maintain simple queue
779 to order candidates via recursive depths. */
780 fibheap_insert (heap,
781 !max_count ? priority++
782 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
785 for (e = where->callees; e; e = e->next_callee)
786 if (!e->inline_failed)
787 lookup_recursive_calls (node, e->callee, heap);
790 /* Decide on recursive inlining: in the case function has recursive calls,
791 inline until body size reaches given argument. If any new indirect edges
792 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
793 is NULL. */
795 static bool
796 cgraph_decide_recursive_inlining (struct cgraph_node *node,
797 VEC (cgraph_edge_p, heap) **new_edges)
799 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
800 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
801 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
802 fibheap_t heap;
803 struct cgraph_edge *e;
804 struct cgraph_node *master_clone, *next;
805 int depth = 0;
806 int n = 0;
808 /* It does not make sense to recursively inline always-inline functions
809 as we are going to sorry() on the remaining calls anyway. */
810 if (node->local.disregard_inline_limits
811 && lookup_attribute ("always_inline", DECL_ATTRIBUTES (node->decl)))
812 return false;
814 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
815 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
816 return false;
818 if (DECL_DECLARED_INLINE_P (node->decl))
820 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
821 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
824 /* Make sure that function is small enough to be considered for inlining. */
825 if (!max_depth
826 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
827 return false;
828 heap = fibheap_new ();
829 lookup_recursive_calls (node, node, heap);
830 if (fibheap_empty (heap))
832 fibheap_delete (heap);
833 return false;
836 if (dump_file)
837 fprintf (dump_file,
838 " Performing recursive inlining on %s\n",
839 cgraph_node_name (node));
841 /* We need original clone to copy around. */
842 master_clone = cgraph_clone_node (node, node->decl,
843 node->count, CGRAPH_FREQ_BASE, 1,
844 false, NULL);
845 master_clone->needed = true;
846 for (e = master_clone->callees; e; e = e->next_callee)
847 if (!e->inline_failed)
848 cgraph_clone_inlined_nodes (e, true, false);
850 /* Do the inlining and update list of recursive call during process. */
851 while (!fibheap_empty (heap)
852 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
853 <= limit))
855 struct cgraph_edge *curr
856 = (struct cgraph_edge *) fibheap_extract_min (heap);
857 struct cgraph_node *cnode;
859 depth = 1;
860 for (cnode = curr->caller;
861 cnode->global.inlined_to; cnode = cnode->callers->caller)
862 if (node->decl == curr->callee->decl)
863 depth++;
864 if (depth > max_depth)
866 if (dump_file)
867 fprintf (dump_file,
868 " maximal depth reached\n");
869 continue;
872 if (max_count)
874 if (!cgraph_maybe_hot_edge_p (curr))
876 if (dump_file)
877 fprintf (dump_file, " Not inlining cold call\n");
878 continue;
880 if (curr->count * 100 / node->count < probability)
882 if (dump_file)
883 fprintf (dump_file,
884 " Probability of edge is too small\n");
885 continue;
889 if (dump_file)
891 fprintf (dump_file,
892 " Inlining call of depth %i", depth);
893 if (node->count)
895 fprintf (dump_file, " called approx. %.2f times per call",
896 (double)curr->count / node->count);
898 fprintf (dump_file, "\n");
900 cgraph_redirect_edge_callee (curr, master_clone);
901 cgraph_mark_inline_edge (curr, false, new_edges);
902 lookup_recursive_calls (node, curr->callee, heap);
903 n++;
905 if (!fibheap_empty (heap) && dump_file)
906 fprintf (dump_file, " Recursive inlining growth limit met.\n");
908 fibheap_delete (heap);
909 if (dump_file)
910 fprintf (dump_file,
911 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
912 master_clone->global.size, node->global.size,
913 master_clone->global.time, node->global.time);
915 /* Remove master clone we used for inlining. We rely that clones inlined
916 into master clone gets queued just before master clone so we don't
917 need recursion. */
918 for (node = cgraph_nodes; node != master_clone;
919 node = next)
921 next = node->next;
922 if (node->global.inlined_to == master_clone)
923 cgraph_remove_node (node);
925 cgraph_remove_node (master_clone);
926 /* FIXME: Recursive inlining actually reduces number of calls of the
927 function. At this place we should probably walk the function and
928 inline clones and compensate the counts accordingly. This probably
929 doesn't matter much in practice. */
930 return n > 0;
933 /* Set inline_failed for all callers of given function to REASON. */
935 static void
936 cgraph_set_inline_failed (struct cgraph_node *node,
937 cgraph_inline_failed_t reason)
939 struct cgraph_edge *e;
941 if (dump_file)
942 fprintf (dump_file, "Inlining failed: %s\n",
943 cgraph_inline_failed_string (reason));
944 for (e = node->callers; e; e = e->next_caller)
945 if (e->inline_failed)
946 e->inline_failed = reason;
949 /* Given whole compilation unit estimate of INSNS, compute how large we can
950 allow the unit to grow. */
951 static int
952 compute_max_insns (int insns)
954 int max_insns = insns;
955 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
956 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
958 return ((HOST_WIDEST_INT) max_insns
959 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
962 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
963 static void
964 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
966 while (VEC_length (cgraph_edge_p, new_edges) > 0)
968 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
970 gcc_assert (!edge->aux);
971 if (edge->callee->local.inlinable
972 && cgraph_default_inline_p (edge->callee, &edge->inline_failed))
973 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
978 /* We use greedy algorithm for inlining of small functions:
979 All inline candidates are put into prioritized heap based on estimated
980 growth of the overall number of instructions and then update the estimates.
982 INLINED and INLINED_CALEES are just pointers to arrays large enough
983 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
985 static void
986 cgraph_decide_inlining_of_small_functions (void)
988 struct cgraph_node *node;
989 struct cgraph_edge *edge;
990 cgraph_inline_failed_t failed_reason;
991 fibheap_t heap = fibheap_new ();
992 bitmap updated_nodes = BITMAP_ALLOC (NULL);
993 int min_size, max_size;
994 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
996 if (flag_indirect_inlining)
997 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
999 if (dump_file)
1000 fprintf (dump_file, "\nDeciding on smaller functions:\n");
1002 /* Put all inline candidates into the heap. */
1004 for (node = cgraph_nodes; node; node = node->next)
1006 if (!node->local.inlinable || !node->callers)
1007 continue;
1008 if (dump_file)
1009 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
1011 node->global.estimated_growth = INT_MIN;
1012 if (!cgraph_default_inline_p (node, &failed_reason))
1014 cgraph_set_inline_failed (node, failed_reason);
1015 continue;
1018 for (edge = node->callers; edge; edge = edge->next_caller)
1019 if (edge->inline_failed)
1021 gcc_assert (!edge->aux);
1022 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1026 max_size = compute_max_insns (overall_size);
1027 min_size = overall_size;
1029 while (overall_size <= max_size
1030 && !fibheap_empty (heap))
1032 int old_size = overall_size;
1033 struct cgraph_node *where, *callee;
1034 int badness = fibheap_min_key (heap);
1035 int current_badness;
1036 int growth;
1037 cgraph_inline_failed_t not_good = CIF_OK;
1039 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1040 gcc_assert (edge->aux);
1041 edge->aux = NULL;
1042 if (!edge->inline_failed)
1043 continue;
1045 /* When updating the edge costs, we only decrease badness in the keys.
1046 When the badness increase, we keep the heap as it is and re-insert
1047 key now. */
1048 current_badness = cgraph_edge_badness (edge, false);
1049 gcc_assert (current_badness >= badness);
1050 if (current_badness != badness)
1052 edge->aux = fibheap_insert (heap, current_badness, edge);
1053 continue;
1056 callee = edge->callee;
1058 growth = (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
1059 - edge->caller->global.size);
1061 if (dump_file)
1063 fprintf (dump_file,
1064 "\nConsidering %s with %i size\n",
1065 cgraph_node_name (edge->callee),
1066 edge->callee->global.size);
1067 fprintf (dump_file,
1068 " to be inlined into %s in %s:%i\n"
1069 " Estimated growth after inlined into all callees is %+i insns.\n"
1070 " Estimated badness is %i, frequency %.2f.\n",
1071 cgraph_node_name (edge->caller),
1072 flag_wpa ? "unknown"
1073 : gimple_filename ((const_gimple) edge->call_stmt),
1074 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1075 cgraph_estimate_growth (edge->callee),
1076 badness,
1077 edge->frequency / (double)CGRAPH_FREQ_BASE);
1078 if (edge->count)
1079 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1080 if (dump_flags & TDF_DETAILS)
1081 cgraph_edge_badness (edge, true);
1084 /* When not having profile info ready we don't weight by any way the
1085 position of call in procedure itself. This means if call of
1086 function A from function B seems profitable to inline, the recursive
1087 call of function A in inline copy of A in B will look profitable too
1088 and we end up inlining until reaching maximal function growth. This
1089 is not good idea so prohibit the recursive inlining.
1091 ??? When the frequencies are taken into account we might not need this
1092 restriction.
1094 We need to be cureful here, in some testcases, e.g. directivec.c in
1095 libcpp, we can estimate self recursive function to have negative growth
1096 for inlining completely.
1098 if (!edge->count)
1100 where = edge->caller;
1101 while (where->global.inlined_to)
1103 if (where->decl == edge->callee->decl)
1104 break;
1105 where = where->callers->caller;
1107 if (where->global.inlined_to)
1109 edge->inline_failed
1110 = (edge->callee->local.disregard_inline_limits
1111 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1112 if (dump_file)
1113 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
1114 continue;
1118 if (edge->callee->local.disregard_inline_limits)
1120 else if (!cgraph_maybe_hot_edge_p (edge))
1121 not_good = CIF_UNLIKELY_CALL;
1122 else if (!flag_inline_functions
1123 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
1124 not_good = CIF_NOT_DECLARED_INLINED;
1125 else if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
1126 not_good = CIF_OPTIMIZING_FOR_SIZE;
1127 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
1129 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1130 &edge->inline_failed))
1132 edge->inline_failed = not_good;
1133 if (dump_file)
1134 fprintf (dump_file, " inline_failed:%s.\n",
1135 cgraph_inline_failed_string (edge->inline_failed));
1137 continue;
1139 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1141 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1142 &edge->inline_failed))
1144 if (dump_file)
1145 fprintf (dump_file, " inline_failed:%s.\n",
1146 cgraph_inline_failed_string (edge->inline_failed));
1148 continue;
1150 if (!tree_can_inline_p (edge))
1152 if (dump_file)
1153 fprintf (dump_file, " inline_failed:%s.\n",
1154 cgraph_inline_failed_string (edge->inline_failed));
1155 continue;
1157 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1158 &edge->inline_failed))
1160 where = edge->caller;
1161 if (where->global.inlined_to)
1162 where = where->global.inlined_to;
1163 if (!cgraph_decide_recursive_inlining (where,
1164 flag_indirect_inlining
1165 ? &new_indirect_edges : NULL))
1166 continue;
1167 if (flag_indirect_inlining)
1168 add_new_edges_to_heap (heap, new_indirect_edges);
1169 update_callee_keys (heap, where, updated_nodes);
1171 else
1173 struct cgraph_node *callee;
1174 if (edge->call_stmt_cannot_inline_p
1175 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1176 &edge->inline_failed, true))
1178 if (dump_file)
1179 fprintf (dump_file, " Not inlining into %s:%s.\n",
1180 cgraph_node_name (edge->caller),
1181 cgraph_inline_failed_string (edge->inline_failed));
1182 continue;
1184 callee = edge->callee;
1185 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1186 if (flag_indirect_inlining)
1187 add_new_edges_to_heap (heap, new_indirect_edges);
1189 update_callee_keys (heap, callee, updated_nodes);
1191 where = edge->caller;
1192 if (where->global.inlined_to)
1193 where = where->global.inlined_to;
1195 /* Our profitability metric can depend on local properties
1196 such as number of inlinable calls and size of the function body.
1197 After inlining these properties might change for the function we
1198 inlined into (since it's body size changed) and for the functions
1199 called by function we inlined (since number of it inlinable callers
1200 might change). */
1201 update_caller_keys (heap, where, updated_nodes);
1203 /* We removed one call of the function we just inlined. If offline
1204 copy is still needed, be sure to update the keys. */
1205 if (callee != where && !callee->global.inlined_to)
1206 update_caller_keys (heap, callee, updated_nodes);
1207 bitmap_clear (updated_nodes);
1209 if (dump_file)
1211 fprintf (dump_file,
1212 " Inlined into %s which now has size %i and self time %i,"
1213 "net change of %+i.\n",
1214 cgraph_node_name (edge->caller),
1215 edge->caller->global.time,
1216 edge->caller->global.size,
1217 overall_size - old_size);
1219 if (min_size > overall_size)
1221 min_size = overall_size;
1222 max_size = compute_max_insns (min_size);
1224 if (dump_file)
1225 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1228 while (!fibheap_empty (heap))
1230 int badness = fibheap_min_key (heap);
1232 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1233 gcc_assert (edge->aux);
1234 edge->aux = NULL;
1235 if (!edge->inline_failed)
1236 continue;
1237 #ifdef ENABLE_CHECKING
1238 gcc_assert (cgraph_edge_badness (edge, false) >= badness);
1239 #endif
1240 if (dump_file)
1242 fprintf (dump_file,
1243 "\nSkipping %s with %i size\n",
1244 cgraph_node_name (edge->callee),
1245 edge->callee->global.size);
1246 fprintf (dump_file,
1247 " called by %s in %s:%i\n"
1248 " Estimated growth after inlined into all callees is %+i insns.\n"
1249 " Estimated badness is %i, frequency %.2f.\n",
1250 cgraph_node_name (edge->caller),
1251 flag_wpa ? "unknown"
1252 : gimple_filename ((const_gimple) edge->call_stmt),
1253 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1254 cgraph_estimate_growth (edge->callee),
1255 badness,
1256 edge->frequency / (double)CGRAPH_FREQ_BASE);
1257 if (edge->count)
1258 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1259 if (dump_flags & TDF_DETAILS)
1260 cgraph_edge_badness (edge, true);
1262 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1263 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1264 &edge->inline_failed))
1265 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1268 if (new_indirect_edges)
1269 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1270 fibheap_delete (heap);
1271 BITMAP_FREE (updated_nodes);
1274 /* Flatten NODE from the IPA inliner. */
1276 static void
1277 cgraph_flatten (struct cgraph_node *node)
1279 struct cgraph_edge *e;
1281 /* We shouldn't be called recursively when we are being processed. */
1282 gcc_assert (node->aux == NULL);
1284 node->aux = (void *)(size_t) INLINE_ALL;
1286 for (e = node->callees; e; e = e->next_callee)
1288 struct cgraph_node *orig_callee;
1290 if (e->call_stmt_cannot_inline_p)
1291 continue;
1293 if (!e->callee->analyzed)
1295 if (dump_file)
1296 fprintf (dump_file,
1297 "Not inlining: Function body not available.\n");
1298 continue;
1301 /* We've hit cycle? It is time to give up. */
1302 if (e->callee->aux)
1304 if (dump_file)
1305 fprintf (dump_file,
1306 "Not inlining %s into %s to avoid cycle.\n",
1307 cgraph_node_name (e->callee),
1308 cgraph_node_name (e->caller));
1309 e->inline_failed = CIF_RECURSIVE_INLINING;
1310 continue;
1313 /* When the edge is already inlined, we just need to recurse into
1314 it in order to fully flatten the leaves. */
1315 if (!e->inline_failed)
1317 cgraph_flatten (e->callee);
1318 continue;
1321 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1323 if (dump_file)
1324 fprintf (dump_file, "Not inlining: recursive call.\n");
1325 continue;
1328 if (!tree_can_inline_p (e))
1330 if (dump_file)
1331 fprintf (dump_file, "Not inlining: %s",
1332 cgraph_inline_failed_string (e->inline_failed));
1333 continue;
1336 /* Inline the edge and flatten the inline clone. Avoid
1337 recursing through the original node if the node was cloned. */
1338 if (dump_file)
1339 fprintf (dump_file, " Inlining %s into %s.\n",
1340 cgraph_node_name (e->callee),
1341 cgraph_node_name (e->caller));
1342 orig_callee = e->callee;
1343 cgraph_mark_inline_edge (e, true, NULL);
1344 if (e->callee != orig_callee)
1345 orig_callee->aux = (void *)(size_t) INLINE_ALL;
1346 cgraph_flatten (e->callee);
1347 if (e->callee != orig_callee)
1348 orig_callee->aux = NULL;
1351 node->aux = NULL;
1354 /* Decide on the inlining. We do so in the topological order to avoid
1355 expenses on updating data structures. */
1357 static unsigned int
1358 cgraph_decide_inlining (void)
1360 struct cgraph_node *node;
1361 int nnodes;
1362 struct cgraph_node **order =
1363 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1364 int old_size = 0;
1365 int i;
1366 int initial_size = 0;
1368 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1369 if (in_lto_p && flag_indirect_inlining)
1370 ipa_update_after_lto_read ();
1371 if (flag_indirect_inlining)
1372 ipa_create_all_structures_for_iinln ();
1374 max_count = 0;
1375 max_benefit = 0;
1376 for (node = cgraph_nodes; node; node = node->next)
1377 if (node->analyzed)
1379 struct cgraph_edge *e;
1381 gcc_assert (inline_summary (node)->self_size == node->global.size);
1382 initial_size += node->global.size;
1383 for (e = node->callees; e; e = e->next_callee)
1384 if (max_count < e->count)
1385 max_count = e->count;
1386 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1387 max_benefit = inline_summary (node)->time_inlining_benefit;
1389 gcc_assert (in_lto_p
1390 || !max_count
1391 || (profile_info && flag_branch_probabilities));
1392 overall_size = initial_size;
1394 nnodes = cgraph_postorder (order);
1396 if (dump_file)
1397 fprintf (dump_file,
1398 "\nDeciding on inlining. Starting with size %i.\n",
1399 initial_size);
1401 for (node = cgraph_nodes; node; node = node->next)
1402 node->aux = 0;
1404 if (dump_file)
1405 fprintf (dump_file, "\nFlattening functions:\n");
1407 /* In the first pass handle functions to be flattened. Do this with
1408 a priority so none of our later choices will make this impossible. */
1409 for (i = nnodes - 1; i >= 0; i--)
1411 node = order[i];
1413 /* Handle nodes to be flattened, but don't update overall unit
1414 size. Calling the incremental inliner here is lame,
1415 a simple worklist should be enough. What should be left
1416 here from the early inliner (if it runs) is cyclic cases.
1417 Ideally when processing callees we stop inlining at the
1418 entry of cycles, possibly cloning that entry point and
1419 try to flatten itself turning it into a self-recursive
1420 function. */
1421 if (lookup_attribute ("flatten",
1422 DECL_ATTRIBUTES (node->decl)) != NULL)
1424 if (dump_file)
1425 fprintf (dump_file,
1426 "Flattening %s\n", cgraph_node_name (node));
1427 cgraph_flatten (node);
1431 cgraph_decide_inlining_of_small_functions ();
1433 if (flag_inline_functions_called_once)
1435 if (dump_file)
1436 fprintf (dump_file, "\nDeciding on functions called once:\n");
1438 /* And finally decide what functions are called once. */
1439 for (i = nnodes - 1; i >= 0; i--)
1441 node = order[i];
1443 if (node->callers
1444 && !node->callers->next_caller
1445 && cgraph_only_called_directly_p (node)
1446 && node->local.inlinable
1447 && node->callers->inline_failed
1448 && node->callers->caller != node
1449 && node->callers->caller->global.inlined_to != node
1450 && !node->callers->call_stmt_cannot_inline_p
1451 && !DECL_EXTERNAL (node->decl)
1452 && !DECL_COMDAT (node->decl))
1454 cgraph_inline_failed_t reason;
1455 old_size = overall_size;
1456 if (dump_file)
1458 fprintf (dump_file,
1459 "\nConsidering %s size %i.\n",
1460 cgraph_node_name (node), node->global.size);
1461 fprintf (dump_file,
1462 " Called once from %s %i insns.\n",
1463 cgraph_node_name (node->callers->caller),
1464 node->callers->caller->global.size);
1467 if (cgraph_check_inline_limits (node->callers->caller, node,
1468 &reason, false))
1470 struct cgraph_node *caller = node->callers->caller;
1471 cgraph_mark_inline (node->callers);
1472 if (dump_file)
1473 fprintf (dump_file,
1474 " Inlined into %s which now has %i size"
1475 " for a net change of %+i size.\n",
1476 cgraph_node_name (caller),
1477 caller->global.size,
1478 overall_size - old_size);
1480 else
1482 if (dump_file)
1483 fprintf (dump_file,
1484 " Not inlining: %s.\n",
1485 cgraph_inline_failed_string (reason));
1491 /* Free ipa-prop structures if they are no longer needed. */
1492 if (flag_indirect_inlining)
1493 ipa_free_all_structures_after_iinln ();
1495 if (dump_file)
1496 fprintf (dump_file,
1497 "\nInlined %i calls, eliminated %i functions, "
1498 "size %i turned to %i size.\n\n",
1499 ncalls_inlined, nfunctions_inlined, initial_size,
1500 overall_size);
1501 free (order);
1502 return 0;
1505 /* Return true when N is leaf function. Accept cheap (pure&const) builtins
1506 in leaf functions. */
1507 static bool
1508 leaf_node_p (struct cgraph_node *n)
1510 struct cgraph_edge *e;
1511 for (e = n->callees; e; e = e->next_callee)
1512 if (!DECL_BUILT_IN (e->callee->decl)
1513 || (!TREE_READONLY (e->callee->decl)
1514 || DECL_PURE_P (e->callee->decl)))
1515 return false;
1516 return true;
1519 /* Decide on the inlining. We do so in the topological order to avoid
1520 expenses on updating data structures. */
1522 static bool
1523 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1524 enum inlining_mode mode)
1526 struct cgraph_edge *e;
1527 bool inlined = false;
1528 cgraph_inline_failed_t failed_reason;
1530 #ifdef ENABLE_CHECKING
1531 verify_cgraph_node (node);
1532 #endif
1534 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1535 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1537 if (dump_file)
1538 fprintf (dump_file, "Incrementally flattening %s\n",
1539 cgraph_node_name (node));
1540 mode = INLINE_ALL;
1543 /* First of all look for always inline functions. */
1544 if (mode != INLINE_SIZE_NORECURSIVE)
1545 for (e = node->callees; e; e = e->next_callee)
1547 if (!e->callee->local.disregard_inline_limits
1548 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1549 continue;
1550 if (e->call_stmt_cannot_inline_p)
1551 continue;
1552 if (dump_file)
1553 fprintf (dump_file,
1554 "Considering to always inline inline candidate %s.\n",
1555 cgraph_node_name (e->callee));
1556 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1558 if (dump_file)
1559 fprintf (dump_file, "Not inlining: recursive call.\n");
1560 continue;
1562 if (!tree_can_inline_p (e))
1564 if (dump_file)
1565 fprintf (dump_file,
1566 "Not inlining: %s",
1567 cgraph_inline_failed_string (e->inline_failed));
1568 continue;
1570 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1571 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1573 if (dump_file)
1574 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1575 continue;
1577 if (!e->callee->analyzed)
1579 if (dump_file)
1580 fprintf (dump_file,
1581 "Not inlining: Function body no longer available.\n");
1582 continue;
1585 if (dump_file)
1586 fprintf (dump_file, " Inlining %s into %s.\n",
1587 cgraph_node_name (e->callee),
1588 cgraph_node_name (e->caller));
1589 cgraph_mark_inline (e);
1590 inlined = true;
1593 /* Now do the automatic inlining. */
1594 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE
1595 /* Never inline regular functions into always-inline functions
1596 during incremental inlining. */
1597 && !node->local.disregard_inline_limits)
1599 bitmap visited = BITMAP_ALLOC (NULL);
1600 for (e = node->callees; e; e = e->next_callee)
1602 int allowed_growth = 0;
1603 if (!e->callee->local.inlinable
1604 || !e->inline_failed
1605 || e->callee->local.disregard_inline_limits)
1606 continue;
1607 /* We are inlining a function to all call-sites in node
1608 or to none. So visit each candidate only once. */
1609 if (!bitmap_set_bit (visited, e->callee->uid))
1610 continue;
1611 if (dump_file)
1612 fprintf (dump_file, "Considering inline candidate %s.\n",
1613 cgraph_node_name (e->callee));
1614 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1616 if (dump_file)
1617 fprintf (dump_file, "Not inlining: recursive call.\n");
1618 continue;
1620 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1621 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1623 if (dump_file)
1624 fprintf (dump_file,
1625 "Not inlining: SSA form does not match.\n");
1626 continue;
1629 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1630 && optimize_function_for_speed_p (cfun))
1631 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1633 /* When the function body would grow and inlining the function
1634 won't eliminate the need for offline copy of the function,
1635 don't inline. */
1636 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1637 || (!flag_inline_functions
1638 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1639 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1640 > e->caller->global.size + allowed_growth)
1641 && cgraph_estimate_growth (e->callee) > allowed_growth)
1643 if (dump_file)
1644 fprintf (dump_file,
1645 "Not inlining: code size would grow by %i.\n",
1646 cgraph_estimate_size_after_inlining (1, e->caller,
1647 e->callee)
1648 - e->caller->global.size);
1649 continue;
1651 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1652 false)
1653 || e->call_stmt_cannot_inline_p)
1655 if (dump_file)
1656 fprintf (dump_file, "Not inlining: %s.\n",
1657 cgraph_inline_failed_string (e->inline_failed));
1658 continue;
1660 if (!e->callee->analyzed)
1662 if (dump_file)
1663 fprintf (dump_file,
1664 "Not inlining: Function body no longer available.\n");
1665 continue;
1667 if (!tree_can_inline_p (e))
1669 if (dump_file)
1670 fprintf (dump_file,
1671 "Not inlining: %s.",
1672 cgraph_inline_failed_string (e->inline_failed));
1673 continue;
1675 if (cgraph_default_inline_p (e->callee, &failed_reason))
1677 if (dump_file)
1678 fprintf (dump_file, " Inlining %s into %s.\n",
1679 cgraph_node_name (e->callee),
1680 cgraph_node_name (e->caller));
1681 cgraph_mark_inline (e);
1682 inlined = true;
1685 BITMAP_FREE (visited);
1687 return inlined;
1690 /* Because inlining might remove no-longer reachable nodes, we need to
1691 keep the array visible to garbage collector to avoid reading collected
1692 out nodes. */
1693 static int nnodes;
1694 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1696 /* Do inlining of small functions. Doing so early helps profiling and other
1697 passes to be somewhat more effective and avoids some code duplication in
1698 later real inlining pass for testcases with very many function calls. */
1699 static unsigned int
1700 cgraph_early_inlining (void)
1702 struct cgraph_node *node = cgraph_node (current_function_decl);
1703 unsigned int todo = 0;
1704 int iterations = 0;
1706 if (seen_error ())
1707 return 0;
1709 if (!optimize
1710 || flag_no_inline
1711 || !flag_early_inlining)
1713 /* When not optimizing or not inlining inline only always-inline
1714 functions. */
1715 cgraph_decide_inlining_incrementally (node, INLINE_ALWAYS_INLINE);
1716 timevar_push (TV_INTEGRATION);
1717 todo |= optimize_inline_calls (current_function_decl);
1718 timevar_pop (TV_INTEGRATION);
1720 else
1722 if (lookup_attribute ("flatten",
1723 DECL_ATTRIBUTES (node->decl)) != NULL)
1725 if (dump_file)
1726 fprintf (dump_file,
1727 "Flattening %s\n", cgraph_node_name (node));
1728 cgraph_flatten (node);
1729 timevar_push (TV_INTEGRATION);
1730 todo |= optimize_inline_calls (current_function_decl);
1731 timevar_pop (TV_INTEGRATION);
1733 /* We iterate incremental inlining to get trivial cases of indirect
1734 inlining. */
1735 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1736 && cgraph_decide_inlining_incrementally (node,
1737 iterations
1738 ? INLINE_SIZE_NORECURSIVE
1739 : INLINE_SIZE))
1741 timevar_push (TV_INTEGRATION);
1742 todo |= optimize_inline_calls (current_function_decl);
1743 iterations++;
1744 timevar_pop (TV_INTEGRATION);
1746 if (dump_file)
1747 fprintf (dump_file, "Iterations: %i\n", iterations);
1750 cfun->always_inline_functions_inlined = true;
1752 return todo;
1755 struct gimple_opt_pass pass_early_inline =
1758 GIMPLE_PASS,
1759 "einline", /* name */
1760 NULL, /* gate */
1761 cgraph_early_inlining, /* execute */
1762 NULL, /* sub */
1763 NULL, /* next */
1764 0, /* static_pass_number */
1765 TV_INLINE_HEURISTICS, /* tv_id */
1766 0, /* properties_required */
1767 0, /* properties_provided */
1768 0, /* properties_destroyed */
1769 0, /* todo_flags_start */
1770 TODO_dump_func /* todo_flags_finish */
1774 /* When inlining shall be performed. */
1775 static bool
1776 cgraph_gate_ipa_early_inlining (void)
1778 return (flag_early_inlining
1779 && !in_lto_p
1780 && (flag_branch_probabilities || flag_test_coverage
1781 || profile_arc_flag));
1784 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1785 before tree profiling so we have stand alone IPA pass for doing so. */
1786 struct simple_ipa_opt_pass pass_ipa_early_inline =
1789 SIMPLE_IPA_PASS,
1790 "einline_ipa", /* name */
1791 cgraph_gate_ipa_early_inlining, /* gate */
1792 NULL, /* execute */
1793 NULL, /* sub */
1794 NULL, /* next */
1795 0, /* static_pass_number */
1796 TV_INLINE_HEURISTICS, /* tv_id */
1797 0, /* properties_required */
1798 0, /* properties_provided */
1799 0, /* properties_destroyed */
1800 0, /* todo_flags_start */
1801 TODO_dump_cgraph /* todo_flags_finish */
1805 /* See if statement might disappear after inlining. We are not terribly
1806 sophisficated, basically looking for simple abstraction penalty wrappers. */
1808 static bool
1809 likely_eliminated_by_inlining_p (gimple stmt)
1811 enum gimple_code code = gimple_code (stmt);
1812 switch (code)
1814 case GIMPLE_RETURN:
1815 return true;
1816 case GIMPLE_ASSIGN:
1817 if (gimple_num_ops (stmt) != 2)
1818 return false;
1820 /* Casts of parameters, loads from parameters passed by reference
1821 and stores to return value or parameters are probably free after
1822 inlining. */
1823 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1824 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1825 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1826 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1828 tree rhs = gimple_assign_rhs1 (stmt);
1829 tree lhs = gimple_assign_lhs (stmt);
1830 tree inner_rhs = rhs;
1831 tree inner_lhs = lhs;
1832 bool rhs_free = false;
1833 bool lhs_free = false;
1835 while (handled_component_p (inner_lhs)
1836 || TREE_CODE (inner_lhs) == MEM_REF)
1837 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1838 while (handled_component_p (inner_rhs)
1839 || TREE_CODE (inner_rhs) == ADDR_EXPR
1840 || TREE_CODE (inner_rhs) == MEM_REF)
1841 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1844 if (TREE_CODE (inner_rhs) == PARM_DECL
1845 || (TREE_CODE (inner_rhs) == SSA_NAME
1846 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1847 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1848 rhs_free = true;
1849 if (rhs_free && is_gimple_reg (lhs))
1850 lhs_free = true;
1851 if (((TREE_CODE (inner_lhs) == PARM_DECL
1852 || (TREE_CODE (inner_lhs) == SSA_NAME
1853 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1854 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1855 && inner_lhs != lhs)
1856 || TREE_CODE (inner_lhs) == RESULT_DECL
1857 || (TREE_CODE (inner_lhs) == SSA_NAME
1858 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1859 lhs_free = true;
1860 if (lhs_free
1861 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1862 rhs_free = true;
1863 if (lhs_free && rhs_free)
1864 return true;
1866 return false;
1867 default:
1868 return false;
1872 /* Compute function body size parameters for NODE. */
1874 static void
1875 estimate_function_body_sizes (struct cgraph_node *node)
1877 gcov_type time = 0;
1878 gcov_type time_inlining_benefit = 0;
1879 int size = 0;
1880 int size_inlining_benefit = 0;
1881 basic_block bb;
1882 gimple_stmt_iterator bsi;
1883 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1884 tree arg;
1885 int freq;
1886 tree funtype = TREE_TYPE (node->decl);
1888 if (dump_file)
1889 fprintf (dump_file, "Analyzing function body size: %s\n",
1890 cgraph_node_name (node));
1892 gcc_assert (my_function && my_function->cfg);
1893 FOR_EACH_BB_FN (bb, my_function)
1895 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1896 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1898 gimple stmt = gsi_stmt (bsi);
1899 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1900 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1902 if (dump_file && (dump_flags & TDF_DETAILS))
1904 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1905 freq, this_size, this_time);
1906 print_gimple_stmt (dump_file, stmt, 0, 0);
1908 this_time *= freq;
1909 time += this_time;
1910 size += this_size;
1911 if (likely_eliminated_by_inlining_p (stmt))
1913 size_inlining_benefit += this_size;
1914 time_inlining_benefit += this_time;
1915 if (dump_file && (dump_flags & TDF_DETAILS))
1916 fprintf (dump_file, " Likely eliminated\n");
1918 gcc_assert (time >= 0);
1919 gcc_assert (size >= 0);
1922 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1923 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
1924 / CGRAPH_FREQ_BASE);
1925 if (dump_file)
1926 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1927 (int)time, (int)time_inlining_benefit,
1928 size, size_inlining_benefit);
1929 time_inlining_benefit += eni_time_weights.call_cost;
1930 size_inlining_benefit += eni_size_weights.call_cost;
1931 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1933 int cost = estimate_move_cost (TREE_TYPE (funtype));
1934 time_inlining_benefit += cost;
1935 size_inlining_benefit += cost;
1937 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = TREE_CHAIN (arg))
1938 if (!VOID_TYPE_P (TREE_TYPE (arg)))
1940 int cost = estimate_move_cost (TREE_TYPE (arg));
1941 time_inlining_benefit += cost;
1942 size_inlining_benefit += cost;
1944 if (time_inlining_benefit > MAX_TIME)
1945 time_inlining_benefit = MAX_TIME;
1946 if (time > MAX_TIME)
1947 time = MAX_TIME;
1948 inline_summary (node)->self_time = time;
1949 inline_summary (node)->self_size = size;
1950 if (dump_file)
1951 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
1952 (int)time, (int)time_inlining_benefit,
1953 size, size_inlining_benefit);
1954 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
1955 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
1958 /* Compute parameters of functions used by inliner. */
1959 unsigned int
1960 compute_inline_parameters (struct cgraph_node *node)
1962 HOST_WIDE_INT self_stack_size;
1964 gcc_assert (!node->global.inlined_to);
1966 /* Estimate the stack size for the function. But not at -O0
1967 because estimated_stack_frame_size is a quadratic problem. */
1968 self_stack_size = optimize ? estimated_stack_frame_size () : 0;
1969 inline_summary (node)->estimated_self_stack_size = self_stack_size;
1970 node->global.estimated_stack_size = self_stack_size;
1971 node->global.stack_frame_offset = 0;
1973 /* Can this function be inlined at all? */
1974 node->local.inlinable = tree_inlinable_function_p (node->decl);
1975 if (node->local.inlinable && !node->local.disregard_inline_limits)
1976 node->local.disregard_inline_limits
1977 = DECL_DISREGARD_INLINE_LIMITS (node->decl);
1978 estimate_function_body_sizes (node);
1979 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1980 node->global.time = inline_summary (node)->self_time;
1981 node->global.size = inline_summary (node)->self_size;
1982 return 0;
1986 /* Compute parameters of functions used by inliner using
1987 current_function_decl. */
1988 static unsigned int
1989 compute_inline_parameters_for_current (void)
1991 compute_inline_parameters (cgraph_node (current_function_decl));
1992 return 0;
1995 struct gimple_opt_pass pass_inline_parameters =
1998 GIMPLE_PASS,
1999 "inline_param", /* name */
2000 NULL, /* gate */
2001 compute_inline_parameters_for_current,/* execute */
2002 NULL, /* sub */
2003 NULL, /* next */
2004 0, /* static_pass_number */
2005 TV_INLINE_HEURISTICS, /* tv_id */
2006 0, /* properties_required */
2007 0, /* properties_provided */
2008 0, /* properties_destroyed */
2009 0, /* todo_flags_start */
2010 0 /* todo_flags_finish */
2014 /* This function performs intraprocedural analyzis in NODE that is required to
2015 inline indirect calls. */
2016 static void
2017 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
2019 ipa_analyze_node (node);
2020 if (dump_file && (dump_flags & TDF_DETAILS))
2022 ipa_print_node_params (dump_file, node);
2023 ipa_print_node_jump_functions (dump_file, node);
2027 /* Note function body size. */
2028 static void
2029 analyze_function (struct cgraph_node *node)
2031 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2032 current_function_decl = node->decl;
2034 compute_inline_parameters (node);
2035 if (flag_indirect_inlining)
2036 inline_indirect_intraprocedural_analysis (node);
2038 current_function_decl = NULL;
2039 pop_cfun ();
2042 /* Called when new function is inserted to callgraph late. */
2043 static void
2044 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
2046 analyze_function (node);
2049 /* Note function body size. */
2050 static void
2051 inline_generate_summary (void)
2053 struct cgraph_node *node;
2055 function_insertion_hook_holder =
2056 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2058 if (flag_indirect_inlining)
2060 ipa_register_cgraph_hooks ();
2061 ipa_check_create_node_params ();
2062 ipa_check_create_edge_args ();
2065 for (node = cgraph_nodes; node; node = node->next)
2066 if (node->analyzed)
2067 analyze_function (node);
2069 return;
2072 /* Apply inline plan to function. */
2073 static unsigned int
2074 inline_transform (struct cgraph_node *node)
2076 unsigned int todo = 0;
2077 struct cgraph_edge *e;
2078 bool inline_p = false;
2080 /* FIXME: Currently the passmanager is adding inline transform more than once to some
2081 clones. This needs revisiting after WPA cleanups. */
2082 if (cfun->after_inlining)
2083 return 0;
2085 /* We might need the body of this function so that we can expand
2086 it inline somewhere else. */
2087 if (cgraph_preserve_function_body_p (node->decl))
2088 save_inline_function_body (node);
2090 for (e = node->callees; e; e = e->next_callee)
2092 cgraph_redirect_edge_call_stmt_to_callee (e);
2093 if (!e->inline_failed || warn_inline)
2094 inline_p = true;
2097 if (inline_p)
2099 timevar_push (TV_INTEGRATION);
2100 todo = optimize_inline_calls (current_function_decl);
2101 timevar_pop (TV_INTEGRATION);
2103 cfun->always_inline_functions_inlined = true;
2104 cfun->after_inlining = true;
2105 return todo | execute_fixup_cfg ();
2108 /* Read inline summary. Jump functions are shared among ipa-cp
2109 and inliner, so when ipa-cp is active, we don't need to write them
2110 twice. */
2112 static void
2113 inline_read_summary (void)
2115 if (flag_indirect_inlining)
2117 ipa_register_cgraph_hooks ();
2118 if (!flag_ipa_cp)
2119 ipa_prop_read_jump_functions ();
2121 function_insertion_hook_holder =
2122 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2125 /* Write inline summary for node in SET.
2126 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
2127 active, we don't need to write them twice. */
2129 static void
2130 inline_write_summary (cgraph_node_set set,
2131 varpool_node_set vset ATTRIBUTE_UNUSED)
2133 if (flag_indirect_inlining && !flag_ipa_cp)
2134 ipa_prop_write_jump_functions (set);
2137 /* When to run IPA inlining. Inlining of always-inline functions
2138 happens during early inlining. */
2140 static bool
2141 gate_cgraph_decide_inlining (void)
2143 /* ??? We'd like to skip this if not optimizing or not inlining as
2144 all always-inline functions have been processed by early
2145 inlining already. But this at least breaks EH with C++ as
2146 we need to unconditionally run fixup_cfg even at -O0.
2147 So leave it on unconditionally for now. */
2148 return 1;
2151 struct ipa_opt_pass_d pass_ipa_inline =
2154 IPA_PASS,
2155 "inline", /* name */
2156 gate_cgraph_decide_inlining, /* gate */
2157 cgraph_decide_inlining, /* execute */
2158 NULL, /* sub */
2159 NULL, /* next */
2160 0, /* static_pass_number */
2161 TV_INLINE_HEURISTICS, /* tv_id */
2162 0, /* properties_required */
2163 0, /* properties_provided */
2164 0, /* properties_destroyed */
2165 TODO_remove_functions, /* todo_flags_finish */
2166 TODO_dump_cgraph | TODO_dump_func
2167 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
2169 inline_generate_summary, /* generate_summary */
2170 inline_write_summary, /* write_summary */
2171 inline_read_summary, /* read_summary */
2172 NULL, /* write_optimization_summary */
2173 NULL, /* read_optimization_summary */
2174 NULL, /* stmt_fixup */
2175 0, /* TODOs */
2176 inline_transform, /* function_transform */
2177 NULL, /* variable_transform */
2181 #include "gt-ipa-inline.h"