2011-04-06 Tobias Burnus <burnus@net-b.de>
[official-gcc.git] / gcc / ipa-inline.c
blobc41c20bc9f1ead7e9dcc32f3346321ad109ea85c
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
29 There are three major parts of this file:
31 cgraph_mark_inline_edge implementation
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
37 inlining heuristics limits
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
43 inlining heuristics
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used by early inliner.
66 The inliner itself is split into several passes:
68 pass_inline_parameters
70 This pass computes local properties of functions that are used by inliner:
71 estimated function body size, whether function is inlinable at all and
72 stack frame consumption.
74 Before executing any of inliner passes, this local pass has to be applied
75 to each function in the callgraph (ie run as subpass of some earlier
76 IPA pass). The results are made out of date by any optimization applied
77 on the function body.
79 pass_early_inlining
81 Simple local inlining pass inlining callees into current function. This
82 pass makes no global whole compilation unit analysis and this when allowed
83 to do inlining expanding code size it might result in unbounded growth of
84 whole unit.
86 The pass is run during conversion into SSA form. Only functions already
87 converted into SSA form are inlined, so the conversion must happen in
88 topological order on the callgraph (that is maintained by pass manager).
89 The functions after inlining are early optimized so the early inliner sees
90 unoptimized function itself, but all considered callees are already
91 optimized allowing it to unfold abstraction penalty on C++ effectively and
92 cheaply.
94 pass_ipa_inline
96 This is the main pass implementing simple greedy algorithm to do inlining
97 of small functions that results in overall growth of compilation unit and
98 inlining of functions called once. The pass compute just so called inline
99 plan (representation of inlining to be done in callgraph) and unlike early
100 inlining it is not performing the inlining itself.
103 #include "config.h"
104 #include "system.h"
105 #include "coretypes.h"
106 #include "tm.h"
107 #include "tree.h"
108 #include "tree-inline.h"
109 #include "langhooks.h"
110 #include "flags.h"
111 #include "cgraph.h"
112 #include "diagnostic.h"
113 #include "gimple-pretty-print.h"
114 #include "timevar.h"
115 #include "params.h"
116 #include "fibheap.h"
117 #include "intl.h"
118 #include "tree-pass.h"
119 #include "hashtab.h"
120 #include "coverage.h"
121 #include "ggc.h"
122 #include "tree-flow.h"
123 #include "rtl.h"
124 #include "ipa-prop.h"
125 #include "except.h"
127 #define MAX_TIME 1000000000
130 /* Statistics we collect about inlining algorithm. */
131 static int ncalls_inlined;
132 static int nfunctions_inlined;
133 static int overall_size;
134 static gcov_type max_count, max_benefit;
136 /* Holders of ipa cgraph hooks: */
137 static struct cgraph_node_hook_list *function_insertion_hook_holder;
139 static inline struct inline_summary *
140 inline_summary (struct cgraph_node *node)
142 return &node->local.inline_summary;
145 /* Estimate the time cost for the caller when inlining EDGE. */
147 static inline int
148 cgraph_estimate_edge_time (struct cgraph_edge *edge)
150 int call_stmt_time;
151 /* ??? We throw away cgraph edges all the time so the information
152 we store in edges doesn't persist for early inlining. Ugh. */
153 if (!edge->call_stmt)
154 call_stmt_time = edge->call_stmt_time;
155 else
156 call_stmt_time = estimate_num_insns (edge->call_stmt, &eni_time_weights);
157 return (((gcov_type)edge->callee->global.time
158 - inline_summary (edge->callee)->time_inlining_benefit
159 - call_stmt_time) * edge->frequency
160 + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
163 /* Estimate self time of the function NODE after inlining EDGE. */
165 static int
166 cgraph_estimate_time_after_inlining (struct cgraph_node *node,
167 struct cgraph_edge *edge)
169 gcov_type time = node->global.time + cgraph_estimate_edge_time (edge);
170 if (time < 0)
171 time = 0;
172 if (time > MAX_TIME)
173 time = MAX_TIME;
174 return time;
177 /* Estimate the growth of the caller when inlining EDGE. */
179 static inline int
180 cgraph_estimate_edge_growth (struct cgraph_edge *edge)
182 int call_stmt_size;
183 /* ??? We throw away cgraph edges all the time so the information
184 we store in edges doesn't persist for early inlining. Ugh. */
185 if (!edge->call_stmt)
186 call_stmt_size = edge->call_stmt_size;
187 else
188 call_stmt_size = estimate_num_insns (edge->call_stmt, &eni_size_weights);
189 return (edge->callee->global.size
190 - inline_summary (edge->callee)->size_inlining_benefit
191 - call_stmt_size);
194 /* Estimate the size of NODE after inlining EDGE which should be an
195 edge to either NODE or a call inlined into NODE. */
197 static inline int
198 cgraph_estimate_size_after_inlining (struct cgraph_node *node,
199 struct cgraph_edge *edge)
201 int size = node->global.size + cgraph_estimate_edge_growth (edge);
202 gcc_assert (size >= 0);
203 return size;
206 /* Scale frequency of NODE edges by FREQ_SCALE and increase loop nest
207 by NEST. */
209 static void
210 update_noncloned_frequencies (struct cgraph_node *node,
211 int freq_scale, int nest)
213 struct cgraph_edge *e;
215 /* We do not want to ignore high loop nest after freq drops to 0. */
216 if (!freq_scale)
217 freq_scale = 1;
218 for (e = node->callees; e; e = e->next_callee)
220 e->loop_nest += nest;
221 e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
222 if (e->frequency > CGRAPH_FREQ_MAX)
223 e->frequency = CGRAPH_FREQ_MAX;
224 if (!e->inline_failed)
225 update_noncloned_frequencies (e->callee, freq_scale, nest);
229 /* E is expected to be an edge being inlined. Clone destination node of
230 the edge and redirect it to the new clone.
231 DUPLICATE is used for bookkeeping on whether we are actually creating new
232 clones or re-using node originally representing out-of-line function call.
234 void
235 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
236 bool update_original)
238 HOST_WIDE_INT peak;
240 if (duplicate)
242 /* We may eliminate the need for out-of-line copy to be output.
243 In that case just go ahead and re-use it. */
244 if (!e->callee->callers->next_caller
245 /* Recursive inlining never wants the master clone to be overwritten. */
246 && update_original
247 /* FIXME: When address is taken of DECL_EXTERNAL function we still can remove its
248 offline copy, but we would need to keep unanalyzed node in the callgraph so
249 references can point to it. */
250 && !e->callee->address_taken
251 && cgraph_can_remove_if_no_direct_calls_p (e->callee)
252 /* Inlining might enable more devirtualizing, so we want to remove
253 those only after all devirtualizable virtual calls are processed.
254 Lacking may edges in callgraph we just preserve them post
255 inlining. */
256 && (!DECL_VIRTUAL_P (e->callee->decl)
257 || (!DECL_COMDAT (e->callee->decl) && !DECL_EXTERNAL (e->callee->decl)))
258 /* Don't reuse if more than one function shares a comdat group.
259 If the other function(s) are needed, we need to emit even
260 this function out of line. */
261 && !e->callee->same_comdat_group
262 && !cgraph_new_nodes)
264 gcc_assert (!e->callee->global.inlined_to);
265 if (e->callee->analyzed && !DECL_EXTERNAL (e->callee->decl))
267 overall_size -= e->callee->global.size;
268 nfunctions_inlined++;
270 duplicate = false;
271 e->callee->local.externally_visible = false;
272 update_noncloned_frequencies (e->callee, e->frequency, e->loop_nest);
274 else
276 struct cgraph_node *n;
277 n = cgraph_clone_node (e->callee, e->callee->decl,
278 e->count, e->frequency, e->loop_nest,
279 update_original, NULL);
280 cgraph_redirect_edge_callee (e, n);
284 if (e->caller->global.inlined_to)
285 e->callee->global.inlined_to = e->caller->global.inlined_to;
286 else
287 e->callee->global.inlined_to = e->caller;
288 e->callee->global.stack_frame_offset
289 = e->caller->global.stack_frame_offset
290 + inline_summary (e->caller)->estimated_self_stack_size;
291 peak = e->callee->global.stack_frame_offset
292 + inline_summary (e->callee)->estimated_self_stack_size;
293 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
294 e->callee->global.inlined_to->global.estimated_stack_size = peak;
295 cgraph_propagate_frequency (e->callee);
297 /* Recursively clone all bodies. */
298 for (e = e->callee->callees; e; e = e->next_callee)
299 if (!e->inline_failed)
300 cgraph_clone_inlined_nodes (e, duplicate, update_original);
303 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
304 specify whether profile of original function should be updated. If any new
305 indirect edges are discovered in the process, add them to NEW_EDGES, unless
306 it is NULL. Return true iff any new callgraph edges were discovered as a
307 result of inlining. */
309 static bool
310 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
311 VEC (cgraph_edge_p, heap) **new_edges)
313 int old_size = 0, new_size = 0;
314 struct cgraph_node *to = NULL;
315 struct cgraph_edge *curr = e;
317 /* Don't inline inlined edges. */
318 gcc_assert (e->inline_failed);
319 /* Don't even think of inlining inline clone. */
320 gcc_assert (!e->callee->global.inlined_to);
322 e->inline_failed = CIF_OK;
323 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
325 cgraph_clone_inlined_nodes (e, true, update_original);
327 /* Now update size of caller and all functions caller is inlined into. */
328 for (;e && !e->inline_failed; e = e->caller->callers)
330 to = e->caller;
331 old_size = e->caller->global.size;
332 new_size = cgraph_estimate_size_after_inlining (to, curr);
333 to->global.size = new_size;
334 to->global.time = cgraph_estimate_time_after_inlining (to, curr);
336 gcc_assert (curr->callee->global.inlined_to == to);
337 if (new_size > old_size)
338 overall_size += new_size - old_size;
339 ncalls_inlined++;
341 /* FIXME: We should remove the optimize check after we ensure we never run
342 IPA passes when not optimizing. */
343 if (flag_indirect_inlining && optimize)
344 return ipa_propagate_indirect_call_infos (curr, new_edges);
345 else
346 return false;
349 /* Estimate the growth caused by inlining NODE into all callees. */
351 static int
352 cgraph_estimate_growth (struct cgraph_node *node)
354 int growth = 0;
355 struct cgraph_edge *e;
356 bool self_recursive = false;
358 if (node->global.estimated_growth != INT_MIN)
359 return node->global.estimated_growth;
361 for (e = node->callers; e; e = e->next_caller)
363 if (e->caller == node)
364 self_recursive = true;
365 if (e->inline_failed)
366 growth += cgraph_estimate_edge_growth (e);
369 /* ??? Wrong for non-trivially self recursive functions or cases where
370 we decide to not inline for different reasons, but it is not big deal
371 as in that case we will keep the body around, but we will also avoid
372 some inlining. */
373 if (cgraph_will_be_removed_from_program_if_no_direct_calls (node)
374 && !DECL_EXTERNAL (node->decl) && !self_recursive)
375 growth -= node->global.size;
376 /* COMDAT functions are very often not shared across multiple units since they
377 come from various template instantiations. Take this into account. */
378 else if (DECL_COMDAT (node->decl) && !self_recursive
379 && cgraph_can_remove_if_no_direct_calls_p (node))
380 growth -= (node->global.size
381 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY)) + 50) / 100;
383 node->global.estimated_growth = growth;
384 return growth;
387 /* Return false when inlining edge E is not good idea
388 as it would cause too large growth of the callers function body
389 or stack frame size. *REASON if non-NULL is updated if the
390 inlining is not a good idea. */
392 static bool
393 cgraph_check_inline_limits (struct cgraph_edge *e,
394 cgraph_inline_failed_t *reason)
396 struct cgraph_node *to = e->caller;
397 struct cgraph_node *what = e->callee;
398 int newsize;
399 int limit;
400 HOST_WIDE_INT stack_size_limit, inlined_stack;
402 if (to->global.inlined_to)
403 to = to->global.inlined_to;
405 /* When inlining large function body called once into small function,
406 take the inlined function as base for limiting the growth. */
407 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
408 limit = inline_summary (to)->self_size;
409 else
410 limit = inline_summary (what)->self_size;
412 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
414 /* Check the size after inlining against the function limits. But allow
415 the function to shrink if it went over the limits by forced inlining. */
416 newsize = cgraph_estimate_size_after_inlining (to, e);
417 if (newsize >= to->global.size
418 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
419 && newsize > limit)
421 if (reason)
422 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
423 return false;
426 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
428 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
430 inlined_stack = (to->global.stack_frame_offset
431 + inline_summary (to)->estimated_self_stack_size
432 + what->global.estimated_stack_size);
433 if (inlined_stack > stack_size_limit
434 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
436 if (reason)
437 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
438 return false;
440 return true;
443 /* Return true when function N is small enough to be inlined. */
445 static bool
446 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
448 tree decl = n->decl;
450 if (n->local.disregard_inline_limits)
451 return true;
453 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
455 if (reason)
456 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
457 return false;
459 if (!n->analyzed)
461 if (reason)
462 *reason = CIF_BODY_NOT_AVAILABLE;
463 return false;
465 if (cgraph_function_body_availability (n) <= AVAIL_OVERWRITABLE)
467 if (reason)
468 *reason = CIF_OVERWRITABLE;
469 return false;
473 if (DECL_DECLARED_INLINE_P (decl))
475 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
477 if (reason)
478 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
479 return false;
482 else
484 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
486 if (reason)
487 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
488 return false;
492 return true;
495 /* A cost model driving the inlining heuristics in a way so the edges with
496 smallest badness are inlined first. After each inlining is performed
497 the costs of all caller edges of nodes affected are recomputed so the
498 metrics may accurately depend on values such as number of inlinable callers
499 of the function or function body size. */
501 static int
502 cgraph_edge_badness (struct cgraph_edge *edge, bool dump)
504 gcov_type badness;
505 int growth;
507 if (edge->callee->local.disregard_inline_limits)
508 return INT_MIN;
510 growth = cgraph_estimate_edge_growth (edge);
512 if (dump)
514 fprintf (dump_file, " Badness calculation for %s -> %s\n",
515 cgraph_node_name (edge->caller),
516 cgraph_node_name (edge->callee));
517 fprintf (dump_file, " growth %i, time %i-%i, size %i-%i\n",
518 growth,
519 edge->callee->global.time,
520 inline_summary (edge->callee)->time_inlining_benefit
521 + edge->call_stmt_time,
522 edge->callee->global.size,
523 inline_summary (edge->callee)->size_inlining_benefit
524 + edge->call_stmt_size);
527 /* Always prefer inlining saving code size. */
528 if (growth <= 0)
530 badness = INT_MIN - growth;
531 if (dump)
532 fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
533 growth);
536 /* When profiling is available, base priorities -(#calls / growth).
537 So we optimize for overall number of "executed" inlined calls. */
538 else if (max_count)
540 badness =
541 ((int)
542 ((double) edge->count * INT_MIN / max_count / (max_benefit + 1)) *
543 (inline_summary (edge->callee)->time_inlining_benefit
544 + edge->call_stmt_time + 1)) / growth;
545 if (dump)
547 fprintf (dump_file,
548 " %i (relative %f): profile info. Relative count %f"
549 " * Relative benefit %f\n",
550 (int) badness, (double) badness / INT_MIN,
551 (double) edge->count / max_count,
552 (double) (inline_summary (edge->callee)->
553 time_inlining_benefit
554 + edge->call_stmt_time + 1) / (max_benefit + 1));
558 /* When function local profile is available, base priorities on
559 growth / frequency, so we optimize for overall frequency of inlined
560 calls. This is not too accurate since while the call might be frequent
561 within function, the function itself is infrequent.
563 Other objective to optimize for is number of different calls inlined.
564 We add the estimated growth after inlining all functions to bias the
565 priorities slightly in this direction (so fewer times called functions
566 of the same size gets priority). */
567 else if (flag_guess_branch_prob)
569 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
570 int benefitperc;
571 int growth_for_all;
572 badness = growth * 10000;
573 benefitperc =
574 100 * (inline_summary (edge->callee)->time_inlining_benefit
575 + edge->call_stmt_time)
576 / (edge->callee->global.time + 1) + 1;
577 benefitperc = MIN (benefitperc, 100);
578 div *= benefitperc;
580 /* Decrease badness if call is nested. */
581 /* Compress the range so we don't overflow. */
582 if (div > 10000)
583 div = 10000 + ceil_log2 (div) - 8;
584 if (div < 1)
585 div = 1;
586 if (badness > 0)
587 badness /= div;
588 growth_for_all = cgraph_estimate_growth (edge->callee);
589 badness += growth_for_all;
590 if (badness > INT_MAX)
591 badness = INT_MAX;
592 if (dump)
594 fprintf (dump_file,
595 " %i: guessed profile. frequency %i, overall growth %i,"
596 " benefit %i%%, divisor %i\n",
597 (int) badness, edge->frequency, growth_for_all, benefitperc, div);
600 /* When function local profile is not available or it does not give
601 useful information (ie frequency is zero), base the cost on
602 loop nest and overall size growth, so we optimize for overall number
603 of functions fully inlined in program. */
604 else
606 int nest = MIN (edge->loop_nest, 8);
607 badness = cgraph_estimate_growth (edge->callee) * 256;
609 /* Decrease badness if call is nested. */
610 if (badness > 0)
611 badness >>= nest;
612 else
614 badness <<= nest;
616 if (dump)
617 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
618 nest);
621 /* Ensure that we did not overflow in all the fixed point math above. */
622 gcc_assert (badness >= INT_MIN);
623 gcc_assert (badness <= INT_MAX - 1);
624 /* Make recursive inlining happen always after other inlining is done. */
625 if (cgraph_edge_recursive_p (edge))
626 return badness + 1;
627 else
628 return badness;
631 /* Recompute badness of EDGE and update its key in HEAP if needed. */
632 static void
633 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
635 int badness = cgraph_edge_badness (edge, false);
636 if (edge->aux)
638 fibnode_t n = (fibnode_t) edge->aux;
639 gcc_checking_assert (n->data == edge);
641 /* fibheap_replace_key only decrease the keys.
642 When we increase the key we do not update heap
643 and instead re-insert the element once it becomes
644 a minimum of heap. */
645 if (badness < n->key)
647 fibheap_replace_key (heap, n, badness);
648 gcc_checking_assert (n->key == badness);
651 else
652 edge->aux = fibheap_insert (heap, badness, edge);
655 /* Recompute heap nodes for each of caller edge. */
657 static void
658 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
659 bitmap updated_nodes)
661 struct cgraph_edge *edge;
662 cgraph_inline_failed_t failed_reason;
664 if (!node->local.inlinable
665 || cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE
666 || node->global.inlined_to)
667 return;
668 if (!bitmap_set_bit (updated_nodes, node->uid))
669 return;
670 node->global.estimated_growth = INT_MIN;
672 /* See if there is something to do. */
673 for (edge = node->callers; edge; edge = edge->next_caller)
674 if (edge->inline_failed)
675 break;
676 if (!edge)
677 return;
678 /* Prune out edges we won't inline into anymore. */
679 if (!cgraph_default_inline_p (node, &failed_reason))
681 for (; edge; edge = edge->next_caller)
682 if (edge->aux)
684 fibheap_delete_node (heap, (fibnode_t) edge->aux);
685 edge->aux = NULL;
686 if (edge->inline_failed)
687 edge->inline_failed = failed_reason;
689 return;
692 for (; edge; edge = edge->next_caller)
693 if (edge->inline_failed)
694 update_edge_key (heap, edge);
697 /* Recompute heap nodes for each uninlined call.
698 This is used when we know that edge badnesses are going only to increase
699 (we introduced new call site) and thus all we need is to insert newly
700 created edges into heap. */
702 static void
703 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
704 bitmap updated_nodes)
706 struct cgraph_edge *e = node->callees;
707 node->global.estimated_growth = INT_MIN;
709 if (!e)
710 return;
711 while (true)
712 if (!e->inline_failed && e->callee->callees)
713 e = e->callee->callees;
714 else
716 if (e->inline_failed
717 && e->callee->local.inlinable
718 && cgraph_function_body_availability (e->callee) >= AVAIL_AVAILABLE
719 && !bitmap_bit_p (updated_nodes, e->callee->uid))
721 node->global.estimated_growth = INT_MIN;
722 /* If function becomes uninlinable, we need to remove it from the heap. */
723 if (!cgraph_default_inline_p (e->callee, &e->inline_failed))
724 update_caller_keys (heap, e->callee, updated_nodes);
725 else
726 /* Otherwise update just edge E. */
727 update_edge_key (heap, e);
729 if (e->next_callee)
730 e = e->next_callee;
731 else
735 if (e->caller == node)
736 return;
737 e = e->caller->callers;
739 while (!e->next_callee);
740 e = e->next_callee;
745 /* Recompute heap nodes for each of caller edges of each of callees.
746 Walk recursively into all inline clones. */
748 static void
749 update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
750 bitmap updated_nodes)
752 struct cgraph_edge *e = node->callees;
753 node->global.estimated_growth = INT_MIN;
755 if (!e)
756 return;
757 while (true)
758 if (!e->inline_failed && e->callee->callees)
759 e = e->callee->callees;
760 else
762 if (e->inline_failed)
763 update_caller_keys (heap, e->callee, updated_nodes);
764 if (e->next_callee)
765 e = e->next_callee;
766 else
770 if (e->caller == node)
771 return;
772 e = e->caller->callers;
774 while (!e->next_callee);
775 e = e->next_callee;
780 /* Enqueue all recursive calls from NODE into priority queue depending on
781 how likely we want to recursively inline the call. */
783 static void
784 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
785 fibheap_t heap)
787 static int priority;
788 struct cgraph_edge *e;
789 for (e = where->callees; e; e = e->next_callee)
790 if (e->callee == node)
792 /* When profile feedback is available, prioritize by expected number
793 of calls. Without profile feedback we maintain simple queue
794 to order candidates via recursive depths. */
795 fibheap_insert (heap,
796 !max_count ? priority++
797 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
800 for (e = where->callees; e; e = e->next_callee)
801 if (!e->inline_failed)
802 lookup_recursive_calls (node, e->callee, heap);
805 /* Decide on recursive inlining: in the case function has recursive calls,
806 inline until body size reaches given argument. If any new indirect edges
807 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
808 is NULL. */
810 static bool
811 cgraph_decide_recursive_inlining (struct cgraph_edge *edge,
812 VEC (cgraph_edge_p, heap) **new_edges)
814 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
815 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
816 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
817 fibheap_t heap;
818 struct cgraph_node *node;
819 struct cgraph_edge *e;
820 struct cgraph_node *master_clone, *next;
821 int depth = 0;
822 int n = 0;
824 node = edge->caller;
825 if (node->global.inlined_to)
826 node = node->global.inlined_to;
828 /* It does not make sense to recursively inline always-inline functions
829 as we are going to sorry() on the remaining calls anyway. */
830 if (node->local.disregard_inline_limits
831 && lookup_attribute ("always_inline", DECL_ATTRIBUTES (node->decl)))
832 return false;
834 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
835 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
836 return false;
838 if (DECL_DECLARED_INLINE_P (node->decl))
840 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
841 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
844 /* Make sure that function is small enough to be considered for inlining. */
845 if (!max_depth
846 || cgraph_estimate_size_after_inlining (node, edge) >= limit)
847 return false;
848 heap = fibheap_new ();
849 lookup_recursive_calls (node, node, heap);
850 if (fibheap_empty (heap))
852 fibheap_delete (heap);
853 return false;
856 if (dump_file)
857 fprintf (dump_file,
858 " Performing recursive inlining on %s\n",
859 cgraph_node_name (node));
861 /* We need original clone to copy around. */
862 master_clone = cgraph_clone_node (node, node->decl,
863 node->count, CGRAPH_FREQ_BASE, 1,
864 false, NULL);
865 for (e = master_clone->callees; e; e = e->next_callee)
866 if (!e->inline_failed)
867 cgraph_clone_inlined_nodes (e, true, false);
869 /* Do the inlining and update list of recursive call during process. */
870 while (!fibheap_empty (heap))
872 struct cgraph_edge *curr
873 = (struct cgraph_edge *) fibheap_extract_min (heap);
874 struct cgraph_node *cnode;
876 if (cgraph_estimate_size_after_inlining (node, curr) > limit)
877 break;
879 depth = 1;
880 for (cnode = curr->caller;
881 cnode->global.inlined_to; cnode = cnode->callers->caller)
882 if (node->decl == curr->callee->decl)
883 depth++;
884 if (depth > max_depth)
886 if (dump_file)
887 fprintf (dump_file,
888 " maximal depth reached\n");
889 continue;
892 if (max_count)
894 if (!cgraph_maybe_hot_edge_p (curr))
896 if (dump_file)
897 fprintf (dump_file, " Not inlining cold call\n");
898 continue;
900 if (curr->count * 100 / node->count < probability)
902 if (dump_file)
903 fprintf (dump_file,
904 " Probability of edge is too small\n");
905 continue;
909 if (dump_file)
911 fprintf (dump_file,
912 " Inlining call of depth %i", depth);
913 if (node->count)
915 fprintf (dump_file, " called approx. %.2f times per call",
916 (double)curr->count / node->count);
918 fprintf (dump_file, "\n");
920 cgraph_redirect_edge_callee (curr, master_clone);
921 cgraph_mark_inline_edge (curr, false, new_edges);
922 lookup_recursive_calls (node, curr->callee, heap);
923 n++;
925 if (!fibheap_empty (heap) && dump_file)
926 fprintf (dump_file, " Recursive inlining growth limit met.\n");
928 fibheap_delete (heap);
929 if (dump_file)
930 fprintf (dump_file,
931 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
932 master_clone->global.size, node->global.size,
933 master_clone->global.time, node->global.time);
935 /* Remove master clone we used for inlining. We rely that clones inlined
936 into master clone gets queued just before master clone so we don't
937 need recursion. */
938 for (node = cgraph_nodes; node != master_clone;
939 node = next)
941 next = node->next;
942 if (node->global.inlined_to == master_clone)
943 cgraph_remove_node (node);
945 cgraph_remove_node (master_clone);
946 /* FIXME: Recursive inlining actually reduces number of calls of the
947 function. At this place we should probably walk the function and
948 inline clones and compensate the counts accordingly. This probably
949 doesn't matter much in practice. */
950 return n > 0;
953 /* Set inline_failed for all callers of given function to REASON. */
955 static void
956 cgraph_set_inline_failed (struct cgraph_node *node,
957 cgraph_inline_failed_t reason)
959 struct cgraph_edge *e;
961 if (dump_file)
962 fprintf (dump_file, "Inlining failed: %s\n",
963 cgraph_inline_failed_string (reason));
964 for (e = node->callers; e; e = e->next_caller)
965 if (e->inline_failed)
966 e->inline_failed = reason;
969 /* Given whole compilation unit estimate of INSNS, compute how large we can
970 allow the unit to grow. */
971 static int
972 compute_max_insns (int insns)
974 int max_insns = insns;
975 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
976 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
978 return ((HOST_WIDEST_INT) max_insns
979 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
982 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
983 static void
984 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
986 while (VEC_length (cgraph_edge_p, new_edges) > 0)
988 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
990 gcc_assert (!edge->aux);
991 if (edge->callee->local.inlinable
992 && edge->inline_failed
993 && cgraph_default_inline_p (edge->callee, &edge->inline_failed))
994 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
999 /* We use greedy algorithm for inlining of small functions:
1000 All inline candidates are put into prioritized heap based on estimated
1001 growth of the overall number of instructions and then update the estimates.
1003 INLINED and INLINED_CALLEES are just pointers to arrays large enough
1004 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
1006 static void
1007 cgraph_decide_inlining_of_small_functions (void)
1009 struct cgraph_node *node;
1010 struct cgraph_edge *edge;
1011 cgraph_inline_failed_t failed_reason;
1012 fibheap_t heap = fibheap_new ();
1013 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1014 int min_size, max_size;
1015 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
1017 if (flag_indirect_inlining)
1018 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
1020 if (dump_file)
1021 fprintf (dump_file, "\nDeciding on smaller functions:\n");
1023 /* Put all inline candidates into the heap. */
1025 for (node = cgraph_nodes; node; node = node->next)
1027 if (!node->local.inlinable || !node->callers)
1028 continue;
1029 if (dump_file)
1030 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
1032 node->global.estimated_growth = INT_MIN;
1033 if (!cgraph_default_inline_p (node, &failed_reason))
1035 cgraph_set_inline_failed (node, failed_reason);
1036 continue;
1039 for (edge = node->callers; edge; edge = edge->next_caller)
1040 if (edge->inline_failed)
1042 gcc_assert (!edge->aux);
1043 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1047 max_size = compute_max_insns (overall_size);
1048 min_size = overall_size;
1050 while (overall_size <= max_size
1051 && !fibheap_empty (heap))
1053 int old_size = overall_size;
1054 struct cgraph_node *where, *callee;
1055 int badness = fibheap_min_key (heap);
1056 int current_badness;
1057 int growth;
1058 cgraph_inline_failed_t not_good = CIF_OK;
1060 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1061 gcc_assert (edge->aux);
1062 edge->aux = NULL;
1063 if (!edge->inline_failed)
1064 continue;
1066 /* When updating the edge costs, we only decrease badness in the keys.
1067 When the badness increase, we keep the heap as it is and re-insert
1068 key now. */
1069 current_badness = cgraph_edge_badness (edge, false);
1070 gcc_assert (current_badness >= badness);
1071 if (current_badness != badness)
1073 edge->aux = fibheap_insert (heap, current_badness, edge);
1074 continue;
1077 callee = edge->callee;
1078 growth = cgraph_estimate_edge_growth (edge);
1079 if (dump_file)
1081 fprintf (dump_file,
1082 "\nConsidering %s with %i size\n",
1083 cgraph_node_name (edge->callee),
1084 edge->callee->global.size);
1085 fprintf (dump_file,
1086 " to be inlined into %s in %s:%i\n"
1087 " Estimated growth after inlined into all callees is %+i insns.\n"
1088 " Estimated badness is %i, frequency %.2f.\n",
1089 cgraph_node_name (edge->caller),
1090 flag_wpa ? "unknown"
1091 : gimple_filename ((const_gimple) edge->call_stmt),
1092 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1093 cgraph_estimate_growth (edge->callee),
1094 badness,
1095 edge->frequency / (double)CGRAPH_FREQ_BASE);
1096 if (edge->count)
1097 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1098 if (dump_flags & TDF_DETAILS)
1099 cgraph_edge_badness (edge, true);
1102 /* When not having profile info ready we don't weight by any way the
1103 position of call in procedure itself. This means if call of
1104 function A from function B seems profitable to inline, the recursive
1105 call of function A in inline copy of A in B will look profitable too
1106 and we end up inlining until reaching maximal function growth. This
1107 is not good idea so prohibit the recursive inlining.
1109 ??? When the frequencies are taken into account we might not need this
1110 restriction.
1112 We need to be careful here, in some testcases, e.g. directives.c in
1113 libcpp, we can estimate self recursive function to have negative growth
1114 for inlining completely.
1116 if (!edge->count)
1118 where = edge->caller;
1119 while (where->global.inlined_to)
1121 if (where->decl == edge->callee->decl)
1122 break;
1123 where = where->callers->caller;
1125 if (where->global.inlined_to)
1127 edge->inline_failed
1128 = (edge->callee->local.disregard_inline_limits
1129 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1130 if (dump_file)
1131 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
1132 continue;
1136 if (edge->callee->local.disregard_inline_limits)
1138 else if (!cgraph_maybe_hot_edge_p (edge))
1139 not_good = CIF_UNLIKELY_CALL;
1140 else if (!flag_inline_functions
1141 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
1142 not_good = CIF_NOT_DECLARED_INLINED;
1143 else if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
1144 not_good = CIF_OPTIMIZING_FOR_SIZE;
1145 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
1147 edge->inline_failed = not_good;
1148 if (dump_file)
1149 fprintf (dump_file, " inline_failed:%s.\n",
1150 cgraph_inline_failed_string (edge->inline_failed));
1151 continue;
1153 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1155 if (dump_file)
1156 fprintf (dump_file, " inline_failed:%s.\n",
1157 cgraph_inline_failed_string (edge->inline_failed));
1158 continue;
1160 if (!tree_can_inline_p (edge)
1161 || edge->call_stmt_cannot_inline_p)
1163 if (dump_file)
1164 fprintf (dump_file, " inline_failed:%s.\n",
1165 cgraph_inline_failed_string (edge->inline_failed));
1166 continue;
1168 if (cgraph_edge_recursive_p (edge))
1170 where = edge->caller;
1171 if (where->global.inlined_to)
1172 where = where->global.inlined_to;
1173 if (!cgraph_decide_recursive_inlining (edge,
1174 flag_indirect_inlining
1175 ? &new_indirect_edges : NULL))
1177 edge->inline_failed = CIF_RECURSIVE_INLINING;
1178 continue;
1180 if (flag_indirect_inlining)
1181 add_new_edges_to_heap (heap, new_indirect_edges);
1182 update_all_callee_keys (heap, where, updated_nodes);
1184 else
1186 struct cgraph_node *callee;
1187 if (!cgraph_check_inline_limits (edge, &edge->inline_failed))
1189 if (dump_file)
1190 fprintf (dump_file, " Not inlining into %s:%s.\n",
1191 cgraph_node_name (edge->caller),
1192 cgraph_inline_failed_string (edge->inline_failed));
1193 continue;
1195 callee = edge->callee;
1196 gcc_checking_assert (!callee->global.inlined_to);
1197 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1198 if (flag_indirect_inlining)
1199 add_new_edges_to_heap (heap, new_indirect_edges);
1201 /* We inlined last offline copy to the body. This might lead
1202 to callees of function having fewer call sites and thus they
1203 may need updating. */
1204 if (callee->global.inlined_to)
1205 update_all_callee_keys (heap, callee, updated_nodes);
1206 else
1207 update_callee_keys (heap, edge->callee, updated_nodes);
1209 where = edge->caller;
1210 if (where->global.inlined_to)
1211 where = where->global.inlined_to;
1213 /* Our profitability metric can depend on local properties
1214 such as number of inlinable calls and size of the function body.
1215 After inlining these properties might change for the function we
1216 inlined into (since it's body size changed) and for the functions
1217 called by function we inlined (since number of it inlinable callers
1218 might change). */
1219 update_caller_keys (heap, where, updated_nodes);
1221 /* We removed one call of the function we just inlined. If offline
1222 copy is still needed, be sure to update the keys. */
1223 if (callee != where && !callee->global.inlined_to)
1224 update_caller_keys (heap, callee, updated_nodes);
1225 bitmap_clear (updated_nodes);
1227 if (dump_file)
1229 fprintf (dump_file,
1230 " Inlined into %s which now has time %i and size %i,"
1231 "net change of %+i.\n",
1232 cgraph_node_name (edge->caller),
1233 edge->caller->global.time,
1234 edge->caller->global.size,
1235 overall_size - old_size);
1237 if (min_size > overall_size)
1239 min_size = overall_size;
1240 max_size = compute_max_insns (min_size);
1242 if (dump_file)
1243 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1246 while (!fibheap_empty (heap))
1248 int badness = fibheap_min_key (heap);
1250 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1251 gcc_assert (edge->aux);
1252 edge->aux = NULL;
1253 if (!edge->inline_failed)
1254 continue;
1255 #ifdef ENABLE_CHECKING
1256 gcc_assert (cgraph_edge_badness (edge, false) >= badness);
1257 #endif
1258 if (dump_file)
1260 fprintf (dump_file,
1261 "\nSkipping %s with %i size\n",
1262 cgraph_node_name (edge->callee),
1263 edge->callee->global.size);
1264 fprintf (dump_file,
1265 " called by %s in %s:%i\n"
1266 " Estimated growth after inlined into all callees is %+i insns.\n"
1267 " Estimated badness is %i, frequency %.2f.\n",
1268 cgraph_node_name (edge->caller),
1269 flag_wpa ? "unknown"
1270 : gimple_filename ((const_gimple) edge->call_stmt),
1271 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1272 cgraph_estimate_growth (edge->callee),
1273 badness,
1274 edge->frequency / (double)CGRAPH_FREQ_BASE);
1275 if (edge->count)
1276 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1277 if (dump_flags & TDF_DETAILS)
1278 cgraph_edge_badness (edge, true);
1280 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed)
1281 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1284 if (new_indirect_edges)
1285 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1286 fibheap_delete (heap);
1287 BITMAP_FREE (updated_nodes);
1290 /* Flatten NODE from the IPA inliner. */
1292 static void
1293 cgraph_flatten (struct cgraph_node *node)
1295 struct cgraph_edge *e;
1297 /* We shouldn't be called recursively when we are being processed. */
1298 gcc_assert (node->aux == NULL);
1300 node->aux = (void *) node;
1302 for (e = node->callees; e; e = e->next_callee)
1304 struct cgraph_node *orig_callee;
1306 if (e->call_stmt_cannot_inline_p)
1308 if (dump_file)
1309 fprintf (dump_file, "Not inlining: %s",
1310 cgraph_inline_failed_string (e->inline_failed));
1311 continue;
1314 if (!e->callee->analyzed)
1316 if (dump_file)
1317 fprintf (dump_file,
1318 "Not inlining: Function body not available.\n");
1319 continue;
1322 /* We've hit cycle? It is time to give up. */
1323 if (e->callee->aux)
1325 if (dump_file)
1326 fprintf (dump_file,
1327 "Not inlining %s into %s to avoid cycle.\n",
1328 cgraph_node_name (e->callee),
1329 cgraph_node_name (e->caller));
1330 e->inline_failed = CIF_RECURSIVE_INLINING;
1331 continue;
1334 /* When the edge is already inlined, we just need to recurse into
1335 it in order to fully flatten the leaves. */
1336 if (!e->inline_failed)
1338 cgraph_flatten (e->callee);
1339 continue;
1342 if (cgraph_edge_recursive_p (e))
1344 if (dump_file)
1345 fprintf (dump_file, "Not inlining: recursive call.\n");
1346 continue;
1349 if (!tree_can_inline_p (e))
1351 if (dump_file)
1352 fprintf (dump_file, "Not inlining: %s",
1353 cgraph_inline_failed_string (e->inline_failed));
1354 continue;
1357 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1358 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1360 if (dump_file)
1361 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1362 continue;
1365 /* Inline the edge and flatten the inline clone. Avoid
1366 recursing through the original node if the node was cloned. */
1367 if (dump_file)
1368 fprintf (dump_file, " Inlining %s into %s.\n",
1369 cgraph_node_name (e->callee),
1370 cgraph_node_name (e->caller));
1371 orig_callee = e->callee;
1372 cgraph_mark_inline_edge (e, true, NULL);
1373 if (e->callee != orig_callee)
1374 orig_callee->aux = (void *) node;
1375 cgraph_flatten (e->callee);
1376 if (e->callee != orig_callee)
1377 orig_callee->aux = NULL;
1380 node->aux = NULL;
1383 /* Decide on the inlining. We do so in the topological order to avoid
1384 expenses on updating data structures. */
1386 static unsigned int
1387 cgraph_decide_inlining (void)
1389 struct cgraph_node *node;
1390 int nnodes;
1391 struct cgraph_node **order =
1392 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1393 int old_size = 0;
1394 int i;
1395 int initial_size = 0;
1397 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1398 if (in_lto_p && flag_indirect_inlining)
1399 ipa_update_after_lto_read ();
1400 if (flag_indirect_inlining)
1401 ipa_create_all_structures_for_iinln ();
1403 max_count = 0;
1404 max_benefit = 0;
1405 for (node = cgraph_nodes; node; node = node->next)
1406 if (node->analyzed)
1408 struct cgraph_edge *e;
1410 gcc_assert (inline_summary (node)->self_size == node->global.size);
1411 if (!DECL_EXTERNAL (node->decl))
1412 initial_size += node->global.size;
1413 for (e = node->callees; e; e = e->next_callee)
1415 int benefit = (inline_summary (node)->time_inlining_benefit
1416 + e->call_stmt_time);
1417 if (max_count < e->count)
1418 max_count = e->count;
1419 if (max_benefit < benefit)
1420 max_benefit = benefit;
1423 gcc_assert (in_lto_p
1424 || !max_count
1425 || (profile_info && flag_branch_probabilities));
1426 overall_size = initial_size;
1428 nnodes = cgraph_postorder (order);
1430 if (dump_file)
1431 fprintf (dump_file,
1432 "\nDeciding on inlining. Starting with size %i.\n",
1433 initial_size);
1435 for (node = cgraph_nodes; node; node = node->next)
1436 node->aux = 0;
1438 if (dump_file)
1439 fprintf (dump_file, "\nFlattening functions:\n");
1441 /* In the first pass handle functions to be flattened. Do this with
1442 a priority so none of our later choices will make this impossible. */
1443 for (i = nnodes - 1; i >= 0; i--)
1445 node = order[i];
1447 /* Handle nodes to be flattened, but don't update overall unit
1448 size. Calling the incremental inliner here is lame,
1449 a simple worklist should be enough. What should be left
1450 here from the early inliner (if it runs) is cyclic cases.
1451 Ideally when processing callees we stop inlining at the
1452 entry of cycles, possibly cloning that entry point and
1453 try to flatten itself turning it into a self-recursive
1454 function. */
1455 if (lookup_attribute ("flatten",
1456 DECL_ATTRIBUTES (node->decl)) != NULL)
1458 if (dump_file)
1459 fprintf (dump_file,
1460 "Flattening %s\n", cgraph_node_name (node));
1461 cgraph_flatten (node);
1465 cgraph_decide_inlining_of_small_functions ();
1467 if (flag_inline_functions_called_once)
1469 if (dump_file)
1470 fprintf (dump_file, "\nDeciding on functions called once:\n");
1472 /* And finally decide what functions are called once. */
1473 for (i = nnodes - 1; i >= 0; i--)
1475 node = order[i];
1477 if (node->callers
1478 && !node->callers->next_caller
1479 && !node->global.inlined_to
1480 && cgraph_will_be_removed_from_program_if_no_direct_calls (node)
1481 && node->local.inlinable
1482 && cgraph_function_body_availability (node) >= AVAIL_AVAILABLE
1483 && node->callers->inline_failed
1484 && node->callers->caller != node
1485 && node->callers->caller->global.inlined_to != node
1486 && !node->callers->call_stmt_cannot_inline_p
1487 && tree_can_inline_p (node->callers)
1488 && !DECL_EXTERNAL (node->decl))
1490 cgraph_inline_failed_t reason;
1491 old_size = overall_size;
1492 if (dump_file)
1494 fprintf (dump_file,
1495 "\nConsidering %s size %i.\n",
1496 cgraph_node_name (node), node->global.size);
1497 fprintf (dump_file,
1498 " Called once from %s %i insns.\n",
1499 cgraph_node_name (node->callers->caller),
1500 node->callers->caller->global.size);
1503 if (cgraph_check_inline_limits (node->callers, &reason))
1505 struct cgraph_node *caller = node->callers->caller;
1506 cgraph_mark_inline_edge (node->callers, true, NULL);
1507 if (dump_file)
1508 fprintf (dump_file,
1509 " Inlined into %s which now has %i size"
1510 " for a net change of %+i size.\n",
1511 cgraph_node_name (caller),
1512 caller->global.size,
1513 overall_size - old_size);
1515 else
1517 if (dump_file)
1518 fprintf (dump_file,
1519 " Not inlining: %s.\n",
1520 cgraph_inline_failed_string (reason));
1526 /* Free ipa-prop structures if they are no longer needed. */
1527 if (flag_indirect_inlining)
1528 ipa_free_all_structures_after_iinln ();
1530 if (dump_file)
1531 fprintf (dump_file,
1532 "\nInlined %i calls, eliminated %i functions, "
1533 "size %i turned to %i size.\n\n",
1534 ncalls_inlined, nfunctions_inlined, initial_size,
1535 overall_size);
1536 free (order);
1537 return 0;
1540 /* Return true when N is leaf function. Accept cheap builtins
1541 in leaf functions. */
1543 static bool
1544 leaf_node_p (struct cgraph_node *n)
1546 struct cgraph_edge *e;
1547 for (e = n->callees; e; e = e->next_callee)
1548 if (!is_inexpensive_builtin (e->callee->decl))
1549 return false;
1550 return true;
1553 /* Return true if the edge E is inlinable during early inlining. */
1555 static bool
1556 cgraph_edge_early_inlinable_p (struct cgraph_edge *e, FILE *file)
1558 if (!e->callee->local.inlinable)
1560 if (file)
1561 fprintf (file, "Not inlining: Function not inlinable.\n");
1562 return false;
1564 if (!e->callee->analyzed)
1566 if (file)
1567 fprintf (file, "Not inlining: Function body not available.\n");
1568 return false;
1570 if (!tree_can_inline_p (e)
1571 || e->call_stmt_cannot_inline_p)
1573 if (file)
1574 fprintf (file, "Not inlining: %s.\n",
1575 cgraph_inline_failed_string (e->inline_failed));
1576 return false;
1578 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
1579 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1581 if (file)
1582 fprintf (file, "Not inlining: not in SSA form.\n");
1583 return false;
1585 return true;
1588 /* Inline always-inline function calls in NODE. */
1590 static bool
1591 cgraph_perform_always_inlining (struct cgraph_node *node)
1593 struct cgraph_edge *e;
1594 bool inlined = false;
1596 for (e = node->callees; e; e = e->next_callee)
1598 if (!e->callee->local.disregard_inline_limits)
1599 continue;
1601 if (dump_file)
1602 fprintf (dump_file,
1603 "Considering always-inline candidate %s.\n",
1604 cgraph_node_name (e->callee));
1606 if (cgraph_edge_recursive_p (e))
1608 if (dump_file)
1609 fprintf (dump_file, "Not inlining: recursive call.\n");
1610 e->inline_failed = CIF_RECURSIVE_INLINING;
1611 continue;
1614 if (!cgraph_edge_early_inlinable_p (e, dump_file))
1615 continue;
1617 if (dump_file)
1618 fprintf (dump_file, " Inlining %s into %s.\n",
1619 cgraph_node_name (e->callee),
1620 cgraph_node_name (e->caller));
1621 cgraph_mark_inline_edge (e, true, NULL);
1622 inlined = true;
1625 return inlined;
1628 /* Decide on the inlining. We do so in the topological order to avoid
1629 expenses on updating data structures. */
1631 static bool
1632 cgraph_decide_inlining_incrementally (struct cgraph_node *node)
1634 struct cgraph_edge *e;
1635 bool inlined = false;
1636 cgraph_inline_failed_t failed_reason;
1638 /* Never inline regular functions into always-inline functions
1639 during incremental inlining. */
1640 if (node->local.disregard_inline_limits)
1641 return false;
1643 for (e = node->callees; e; e = e->next_callee)
1645 int allowed_growth = 0;
1647 if (!e->callee->local.inlinable
1648 || !e->inline_failed
1649 || e->callee->local.disregard_inline_limits)
1650 continue;
1652 /* Do not consider functions not declared inline. */
1653 if (!DECL_DECLARED_INLINE_P (e->callee->decl)
1654 && !flag_inline_small_functions
1655 && !flag_inline_functions)
1656 continue;
1658 if (dump_file)
1659 fprintf (dump_file, "Considering inline candidate %s.\n",
1660 cgraph_node_name (e->callee));
1662 if (cgraph_edge_recursive_p (e))
1664 if (dump_file)
1665 fprintf (dump_file, "Not inlining: recursive call.\n");
1666 continue;
1669 if (!cgraph_edge_early_inlinable_p (e, dump_file))
1670 continue;
1672 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1673 && optimize_function_for_speed_p (cfun))
1674 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1676 /* When the function body would grow and inlining the function
1677 won't eliminate the need for offline copy of the function,
1678 don't inline. */
1679 if (cgraph_estimate_edge_growth (e) > allowed_growth
1680 && cgraph_estimate_growth (e->callee) > allowed_growth)
1682 if (dump_file)
1683 fprintf (dump_file,
1684 "Not inlining: code size would grow by %i.\n",
1685 cgraph_estimate_edge_growth (e));
1686 continue;
1688 if (!cgraph_check_inline_limits (e, &e->inline_failed))
1690 if (dump_file)
1691 fprintf (dump_file, "Not inlining: %s.\n",
1692 cgraph_inline_failed_string (e->inline_failed));
1693 continue;
1695 if (cgraph_default_inline_p (e->callee, &failed_reason))
1697 if (dump_file)
1698 fprintf (dump_file, " Inlining %s into %s.\n",
1699 cgraph_node_name (e->callee),
1700 cgraph_node_name (e->caller));
1701 cgraph_mark_inline_edge (e, true, NULL);
1702 inlined = true;
1706 return inlined;
1709 /* Because inlining might remove no-longer reachable nodes, we need to
1710 keep the array visible to garbage collector to avoid reading collected
1711 out nodes. */
1712 static int nnodes;
1713 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1715 /* Do inlining of small functions. Doing so early helps profiling and other
1716 passes to be somewhat more effective and avoids some code duplication in
1717 later real inlining pass for testcases with very many function calls. */
1718 static unsigned int
1719 cgraph_early_inlining (void)
1721 struct cgraph_node *node = cgraph_node (current_function_decl);
1722 unsigned int todo = 0;
1723 int iterations = 0;
1724 bool inlined = false;
1726 if (seen_error ())
1727 return 0;
1729 #ifdef ENABLE_CHECKING
1730 verify_cgraph_node (node);
1731 #endif
1733 /* Even when not optimizing or not inlining inline always-inline
1734 functions. */
1735 inlined = cgraph_perform_always_inlining (node);
1737 if (!optimize
1738 || flag_no_inline
1739 || !flag_early_inlining)
1741 else if (lookup_attribute ("flatten",
1742 DECL_ATTRIBUTES (node->decl)) != NULL)
1744 /* When the function is marked to be flattened, recursively inline
1745 all calls in it. */
1746 if (dump_file)
1747 fprintf (dump_file,
1748 "Flattening %s\n", cgraph_node_name (node));
1749 cgraph_flatten (node);
1750 inlined = true;
1752 else
1754 /* We iterate incremental inlining to get trivial cases of indirect
1755 inlining. */
1756 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1757 && cgraph_decide_inlining_incrementally (node))
1759 timevar_push (TV_INTEGRATION);
1760 todo |= optimize_inline_calls (current_function_decl);
1761 timevar_pop (TV_INTEGRATION);
1762 iterations++;
1763 inlined = false;
1765 if (dump_file)
1766 fprintf (dump_file, "Iterations: %i\n", iterations);
1769 if (inlined)
1771 timevar_push (TV_INTEGRATION);
1772 todo |= optimize_inline_calls (current_function_decl);
1773 timevar_pop (TV_INTEGRATION);
1776 cfun->always_inline_functions_inlined = true;
1778 return todo;
1781 struct gimple_opt_pass pass_early_inline =
1784 GIMPLE_PASS,
1785 "einline", /* name */
1786 NULL, /* gate */
1787 cgraph_early_inlining, /* execute */
1788 NULL, /* sub */
1789 NULL, /* next */
1790 0, /* static_pass_number */
1791 TV_INLINE_HEURISTICS, /* tv_id */
1792 PROP_ssa, /* properties_required */
1793 0, /* properties_provided */
1794 0, /* properties_destroyed */
1795 0, /* todo_flags_start */
1796 TODO_dump_func /* todo_flags_finish */
1801 /* See if statement might disappear after inlining.
1802 0 - means not eliminated
1803 1 - half of statements goes away
1804 2 - for sure it is eliminated.
1805 We are not terribly sophisticated, basically looking for simple abstraction
1806 penalty wrappers. */
1808 static int
1809 eliminated_by_inlining_prob (gimple stmt)
1811 enum gimple_code code = gimple_code (stmt);
1812 switch (code)
1814 case GIMPLE_RETURN:
1815 return 2;
1816 case GIMPLE_ASSIGN:
1817 if (gimple_num_ops (stmt) != 2)
1818 return 0;
1820 /* Casts of parameters, loads from parameters passed by reference
1821 and stores to return value or parameters are often free after
1822 inlining dua to SRA and further combining.
1823 Assume that half of statements goes away. */
1824 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1825 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1826 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1827 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1829 tree rhs = gimple_assign_rhs1 (stmt);
1830 tree lhs = gimple_assign_lhs (stmt);
1831 tree inner_rhs = rhs;
1832 tree inner_lhs = lhs;
1833 bool rhs_free = false;
1834 bool lhs_free = false;
1836 while (handled_component_p (inner_lhs)
1837 || TREE_CODE (inner_lhs) == MEM_REF)
1838 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1839 while (handled_component_p (inner_rhs)
1840 || TREE_CODE (inner_rhs) == ADDR_EXPR
1841 || TREE_CODE (inner_rhs) == MEM_REF)
1842 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1845 if (TREE_CODE (inner_rhs) == PARM_DECL
1846 || (TREE_CODE (inner_rhs) == SSA_NAME
1847 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1848 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1849 rhs_free = true;
1850 if (rhs_free && is_gimple_reg (lhs))
1851 lhs_free = true;
1852 if (((TREE_CODE (inner_lhs) == PARM_DECL
1853 || (TREE_CODE (inner_lhs) == SSA_NAME
1854 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1855 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1856 && inner_lhs != lhs)
1857 || TREE_CODE (inner_lhs) == RESULT_DECL
1858 || (TREE_CODE (inner_lhs) == SSA_NAME
1859 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1860 lhs_free = true;
1861 if (lhs_free
1862 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1863 rhs_free = true;
1864 if (lhs_free && rhs_free)
1865 return 1;
1867 return 0;
1868 default:
1869 return 0;
1873 /* Compute function body size parameters for NODE. */
1875 static void
1876 estimate_function_body_sizes (struct cgraph_node *node)
1878 gcov_type time = 0;
1879 gcov_type time_inlining_benefit = 0;
1880 /* Estimate static overhead for function prologue/epilogue and alignment. */
1881 int size = 2;
1882 /* Benefits are scaled by probability of elimination that is in range
1883 <0,2>. */
1884 int size_inlining_benefit = 2 * 2;
1885 basic_block bb;
1886 gimple_stmt_iterator bsi;
1887 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1888 int freq;
1890 if (dump_file)
1891 fprintf (dump_file, "Analyzing function body size: %s\n",
1892 cgraph_node_name (node));
1894 gcc_assert (my_function && my_function->cfg);
1895 FOR_EACH_BB_FN (bb, my_function)
1897 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1898 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1900 gimple stmt = gsi_stmt (bsi);
1901 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1902 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1903 int prob;
1905 if (dump_file && (dump_flags & TDF_DETAILS))
1907 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1908 freq, this_size, this_time);
1909 print_gimple_stmt (dump_file, stmt, 0, 0);
1911 this_time *= freq;
1912 time += this_time;
1913 size += this_size;
1914 prob = eliminated_by_inlining_prob (stmt);
1915 if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
1916 fprintf (dump_file, " 50%% will be eliminated by inlining\n");
1917 if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
1918 fprintf (dump_file, " will eliminated by inlining\n");
1919 size_inlining_benefit += this_size * prob;
1920 time_inlining_benefit += this_time * prob;
1921 gcc_assert (time >= 0);
1922 gcc_assert (size >= 0);
1925 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1926 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE)
1927 / (CGRAPH_FREQ_BASE * 2));
1928 size_inlining_benefit = (size_inlining_benefit + 1) / 2;
1929 if (time_inlining_benefit > MAX_TIME)
1930 time_inlining_benefit = MAX_TIME;
1931 if (time > MAX_TIME)
1932 time = MAX_TIME;
1933 if (dump_file)
1934 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1935 (int)time, (int)time_inlining_benefit,
1936 size, size_inlining_benefit);
1937 inline_summary (node)->self_time = time;
1938 inline_summary (node)->self_size = size;
1939 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
1940 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
1943 /* Compute parameters of functions used by inliner. */
1944 void
1945 compute_inline_parameters (struct cgraph_node *node)
1947 HOST_WIDE_INT self_stack_size;
1948 struct cgraph_edge *e;
1950 gcc_assert (!node->global.inlined_to);
1952 /* Estimate the stack size for the function if we're optimizing. */
1953 self_stack_size = optimize ? estimated_stack_frame_size (node) : 0;
1954 inline_summary (node)->estimated_self_stack_size = self_stack_size;
1955 node->global.estimated_stack_size = self_stack_size;
1956 node->global.stack_frame_offset = 0;
1958 /* Can this function be inlined at all? */
1959 node->local.inlinable = tree_inlinable_function_p (node->decl);
1960 if (!node->local.inlinable)
1961 node->local.disregard_inline_limits = 0;
1963 /* Inlinable functions always can change signature. */
1964 if (node->local.inlinable)
1965 node->local.can_change_signature = true;
1966 else
1968 /* Functions calling builtin_apply can not change signature. */
1969 for (e = node->callees; e; e = e->next_callee)
1970 if (DECL_BUILT_IN (e->callee->decl)
1971 && DECL_BUILT_IN_CLASS (e->callee->decl) == BUILT_IN_NORMAL
1972 && DECL_FUNCTION_CODE (e->callee->decl) == BUILT_IN_APPLY_ARGS)
1973 break;
1974 node->local.can_change_signature = !e;
1976 estimate_function_body_sizes (node);
1977 /* Compute size of call statements. We have to do this for callers here,
1978 those sizes need to be present for edges _to_ us as early as
1979 we are finished with early opts. */
1980 for (e = node->callers; e; e = e->next_caller)
1981 if (e->call_stmt)
1983 e->call_stmt_size
1984 = estimate_num_insns (e->call_stmt, &eni_size_weights);
1985 e->call_stmt_time
1986 = estimate_num_insns (e->call_stmt, &eni_time_weights);
1988 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
1989 node->global.time = inline_summary (node)->self_time;
1990 node->global.size = inline_summary (node)->self_size;
1994 /* Compute parameters of functions used by inliner using
1995 current_function_decl. */
1996 static unsigned int
1997 compute_inline_parameters_for_current (void)
1999 compute_inline_parameters (cgraph_node (current_function_decl));
2000 return 0;
2003 struct gimple_opt_pass pass_inline_parameters =
2006 GIMPLE_PASS,
2007 "inline_param", /* name */
2008 NULL, /* gate */
2009 compute_inline_parameters_for_current,/* execute */
2010 NULL, /* sub */
2011 NULL, /* next */
2012 0, /* static_pass_number */
2013 TV_INLINE_HEURISTICS, /* tv_id */
2014 0, /* properties_required */
2015 0, /* properties_provided */
2016 0, /* properties_destroyed */
2017 0, /* todo_flags_start */
2018 0 /* todo_flags_finish */
2022 /* This function performs intraprocedural analysis in NODE that is required to
2023 inline indirect calls. */
2024 static void
2025 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
2027 ipa_analyze_node (node);
2028 if (dump_file && (dump_flags & TDF_DETAILS))
2030 ipa_print_node_params (dump_file, node);
2031 ipa_print_node_jump_functions (dump_file, node);
2035 /* Note function body size. */
2036 static void
2037 analyze_function (struct cgraph_node *node)
2039 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2040 current_function_decl = node->decl;
2042 compute_inline_parameters (node);
2043 /* FIXME: We should remove the optimize check after we ensure we never run
2044 IPA passes when not optimizing. */
2045 if (flag_indirect_inlining && optimize)
2046 inline_indirect_intraprocedural_analysis (node);
2048 current_function_decl = NULL;
2049 pop_cfun ();
2052 /* Called when new function is inserted to callgraph late. */
2053 static void
2054 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
2056 analyze_function (node);
2059 /* Note function body size. */
2060 static void
2061 inline_generate_summary (void)
2063 struct cgraph_node *node;
2065 function_insertion_hook_holder =
2066 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2068 if (flag_indirect_inlining)
2070 ipa_register_cgraph_hooks ();
2071 ipa_check_create_node_params ();
2072 ipa_check_create_edge_args ();
2075 for (node = cgraph_nodes; node; node = node->next)
2076 if (node->analyzed)
2077 analyze_function (node);
2079 return;
2082 /* Apply inline plan to function. */
2083 static unsigned int
2084 inline_transform (struct cgraph_node *node)
2086 unsigned int todo = 0;
2087 struct cgraph_edge *e;
2088 bool inline_p = false;
2090 /* FIXME: Currently the pass manager is adding inline transform more than once to some
2091 clones. This needs revisiting after WPA cleanups. */
2092 if (cfun->after_inlining)
2093 return 0;
2095 /* We might need the body of this function so that we can expand
2096 it inline somewhere else. */
2097 if (cgraph_preserve_function_body_p (node->decl))
2098 save_inline_function_body (node);
2100 for (e = node->callees; e; e = e->next_callee)
2102 cgraph_redirect_edge_call_stmt_to_callee (e);
2103 if (!e->inline_failed || warn_inline)
2104 inline_p = true;
2107 if (inline_p)
2109 timevar_push (TV_INTEGRATION);
2110 todo = optimize_inline_calls (current_function_decl);
2111 timevar_pop (TV_INTEGRATION);
2113 cfun->always_inline_functions_inlined = true;
2114 cfun->after_inlining = true;
2115 return todo | execute_fixup_cfg ();
2118 /* Read inline summary. Jump functions are shared among ipa-cp
2119 and inliner, so when ipa-cp is active, we don't need to write them
2120 twice. */
2122 static void
2123 inline_read_summary (void)
2125 if (flag_indirect_inlining)
2127 ipa_register_cgraph_hooks ();
2128 if (!flag_ipa_cp)
2129 ipa_prop_read_jump_functions ();
2131 function_insertion_hook_holder =
2132 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2135 /* Write inline summary for node in SET.
2136 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
2137 active, we don't need to write them twice. */
2139 static void
2140 inline_write_summary (cgraph_node_set set,
2141 varpool_node_set vset ATTRIBUTE_UNUSED)
2143 if (flag_indirect_inlining && !flag_ipa_cp)
2144 ipa_prop_write_jump_functions (set);
2147 /* When to run IPA inlining. Inlining of always-inline functions
2148 happens during early inlining. */
2150 static bool
2151 gate_cgraph_decide_inlining (void)
2153 /* ??? We'd like to skip this if not optimizing or not inlining as
2154 all always-inline functions have been processed by early
2155 inlining already. But this at least breaks EH with C++ as
2156 we need to unconditionally run fixup_cfg even at -O0.
2157 So leave it on unconditionally for now. */
2158 return 1;
2161 struct ipa_opt_pass_d pass_ipa_inline =
2164 IPA_PASS,
2165 "inline", /* name */
2166 gate_cgraph_decide_inlining, /* gate */
2167 cgraph_decide_inlining, /* execute */
2168 NULL, /* sub */
2169 NULL, /* next */
2170 0, /* static_pass_number */
2171 TV_INLINE_HEURISTICS, /* tv_id */
2172 0, /* properties_required */
2173 0, /* properties_provided */
2174 0, /* properties_destroyed */
2175 TODO_remove_functions, /* todo_flags_finish */
2176 TODO_dump_cgraph | TODO_dump_func
2177 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
2179 inline_generate_summary, /* generate_summary */
2180 inline_write_summary, /* write_summary */
2181 inline_read_summary, /* read_summary */
2182 NULL, /* write_optimization_summary */
2183 NULL, /* read_optimization_summary */
2184 NULL, /* stmt_fixup */
2185 0, /* TODOs */
2186 inline_transform, /* function_transform */
2187 NULL, /* variable_transform */
2191 #include "gt-ipa-inline.h"