Remove the temporary array for reductions written to memory.
[official-gcc/graphite-test-results.git] / gcc / ipa-inline.c
blob7ac50dd31bb9a07835e3f141cfddf22ba5255def
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
29 There are three major parts of this file:
31 cgraph_mark_inline_edge implementation
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
37 inlining heuristics limits
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
43 inlining heuristics
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used by early inliner.
66 The inliner itself is split into several passes:
68 pass_inline_parameters
70 This pass computes local properties of functions that are used by inliner:
71 estimated function body size, whether function is inlinable at all and
72 stack frame consumption.
74 Before executing any of inliner passes, this local pass has to be applied
75 to each function in the callgraph (ie run as subpass of some earlier
76 IPA pass). The results are made out of date by any optimization applied
77 on the function body.
79 pass_early_inlining
81 Simple local inlining pass inlining callees into current function. This
82 pass makes no global whole compilation unit analysis and this when allowed
83 to do inlining expanding code size it might result in unbounded growth of
84 whole unit.
86 The pass is run during conversion into SSA form. Only functions already
87 converted into SSA form are inlined, so the conversion must happen in
88 topological order on the callgraph (that is maintained by pass manager).
89 The functions after inlining are early optimized so the early inliner sees
90 unoptimized function itself, but all considered callees are already
91 optimized allowing it to unfold abstraction penalty on C++ effectively and
92 cheaply.
94 pass_ipa_inline
96 This is the main pass implementing simple greedy algorithm to do inlining
97 of small functions that results in overall growth of compilation unit and
98 inlining of functions called once. The pass compute just so called inline
99 plan (representation of inlining to be done in callgraph) and unlike early
100 inlining it is not performing the inlining itself.
103 #include "config.h"
104 #include "system.h"
105 #include "coretypes.h"
106 #include "tm.h"
107 #include "tree.h"
108 #include "tree-inline.h"
109 #include "langhooks.h"
110 #include "flags.h"
111 #include "cgraph.h"
112 #include "diagnostic.h"
113 #include "gimple-pretty-print.h"
114 #include "timevar.h"
115 #include "params.h"
116 #include "fibheap.h"
117 #include "intl.h"
118 #include "tree-pass.h"
119 #include "hashtab.h"
120 #include "coverage.h"
121 #include "ggc.h"
122 #include "tree-flow.h"
123 #include "rtl.h"
124 #include "ipa-prop.h"
125 #include "except.h"
127 #define MAX_TIME 1000000000
129 /* Mode incremental inliner operate on:
131 In ALWAYS_INLINE only functions marked
132 always_inline are inlined. This mode is used after detecting cycle during
133 flattening.
135 In SIZE mode, only functions that reduce function body size after inlining
136 are inlined, this is used during early inlining.
138 in ALL mode, everything is inlined. This is used during flattening. */
139 enum inlining_mode {
140 INLINE_NONE = 0,
141 INLINE_ALWAYS_INLINE,
142 INLINE_SIZE_NORECURSIVE,
143 INLINE_SIZE,
144 INLINE_ALL
147 static bool
148 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode);
149 static void cgraph_flatten (struct cgraph_node *node);
152 /* Statistics we collect about inlining algorithm. */
153 static int ncalls_inlined;
154 static int nfunctions_inlined;
155 static int overall_size;
156 static gcov_type max_count, max_benefit;
158 /* Holders of ipa cgraph hooks: */
159 static struct cgraph_node_hook_list *function_insertion_hook_holder;
161 static inline struct inline_summary *
162 inline_summary (struct cgraph_node *node)
164 return &node->local.inline_summary;
167 /* Estimate self time of the function after inlining WHAT into TO. */
169 static int
170 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
171 struct cgraph_node *what)
173 gcov_type time = (((gcov_type)what->global.time
174 - inline_summary (what)->time_inlining_benefit)
175 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
176 + to->global.time;
177 if (time < 0)
178 time = 0;
179 if (time > MAX_TIME)
180 time = MAX_TIME;
181 return time;
184 /* Estimate self size of the function after inlining WHAT into TO. */
186 static inline int
187 cgraph_estimate_size_after_inlining (struct cgraph_node *to,
188 struct cgraph_node *what)
190 int size = ((what->global.size - inline_summary (what)->size_inlining_benefit)
191 + to->global.size);
192 gcc_assert (size >= 0);
193 return size;
196 /* Scale frequency of NODE edges by FREQ_SCALE and increase loop nest
197 by NEST. */
199 static void
200 update_noncloned_frequencies (struct cgraph_node *node,
201 int freq_scale, int nest)
203 struct cgraph_edge *e;
205 /* We do not want to ignore high loop nest after freq drops to 0. */
206 if (!freq_scale)
207 freq_scale = 1;
208 for (e = node->callees; e; e = e->next_callee)
210 e->loop_nest += nest;
211 e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
212 if (e->frequency > CGRAPH_FREQ_MAX)
213 e->frequency = CGRAPH_FREQ_MAX;
214 if (!e->inline_failed)
215 update_noncloned_frequencies (e->callee, freq_scale, nest);
219 /* E is expected to be an edge being inlined. Clone destination node of
220 the edge and redirect it to the new clone.
221 DUPLICATE is used for bookkeeping on whether we are actually creating new
222 clones or re-using node originally representing out-of-line function call.
224 void
225 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
226 bool update_original)
228 HOST_WIDE_INT peak;
230 if (duplicate)
232 /* We may eliminate the need for out-of-line copy to be output.
233 In that case just go ahead and re-use it. */
234 if (!e->callee->callers->next_caller
235 /* FIXME: When address is taken of DECL_EXTERNAL function we still can remove its
236 offline copy, but we would need to keep unanalyzed node in the callgraph so
237 references can point to it. */
238 && !e->callee->address_taken
239 && cgraph_can_remove_if_no_direct_calls_p (e->callee)
240 /* Inlining might enable more devirtualizing, so we want to remove
241 those only after all devirtualizable virtual calls are processed.
242 Lacking may edges in callgraph we just preserve them post
243 inlining. */
244 && (!DECL_VIRTUAL_P (e->callee->decl)
245 || (!DECL_COMDAT (e->callee->decl) && !DECL_EXTERNAL (e->callee->decl)))
246 /* Don't reuse if more than one function shares a comdat group.
247 If the other function(s) are needed, we need to emit even
248 this function out of line. */
249 && !e->callee->same_comdat_group
250 && !cgraph_new_nodes)
252 gcc_assert (!e->callee->global.inlined_to);
253 if (e->callee->analyzed && !DECL_EXTERNAL (e->callee->decl))
255 overall_size -= e->callee->global.size;
256 nfunctions_inlined++;
258 duplicate = false;
259 e->callee->local.externally_visible = false;
260 update_noncloned_frequencies (e->callee, e->frequency, e->loop_nest);
262 else
264 struct cgraph_node *n;
265 n = cgraph_clone_node (e->callee, e->callee->decl,
266 e->count, e->frequency, e->loop_nest,
267 update_original, NULL);
268 cgraph_redirect_edge_callee (e, n);
272 if (e->caller->global.inlined_to)
273 e->callee->global.inlined_to = e->caller->global.inlined_to;
274 else
275 e->callee->global.inlined_to = e->caller;
276 e->callee->global.stack_frame_offset
277 = e->caller->global.stack_frame_offset
278 + inline_summary (e->caller)->estimated_self_stack_size;
279 peak = e->callee->global.stack_frame_offset
280 + inline_summary (e->callee)->estimated_self_stack_size;
281 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
282 e->callee->global.inlined_to->global.estimated_stack_size = peak;
283 cgraph_propagate_frequency (e->callee);
285 /* Recursively clone all bodies. */
286 for (e = e->callee->callees; e; e = e->next_callee)
287 if (!e->inline_failed)
288 cgraph_clone_inlined_nodes (e, duplicate, update_original);
291 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
292 specify whether profile of original function should be updated. If any new
293 indirect edges are discovered in the process, add them to NEW_EDGES, unless
294 it is NULL. Return true iff any new callgraph edges were discovered as a
295 result of inlining. */
297 static bool
298 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
299 VEC (cgraph_edge_p, heap) **new_edges)
301 int old_size = 0, new_size = 0;
302 struct cgraph_node *to = NULL, *what;
303 struct cgraph_edge *curr = e;
304 int freq;
306 gcc_assert (e->inline_failed);
307 e->inline_failed = CIF_OK;
308 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
310 cgraph_clone_inlined_nodes (e, true, update_original);
312 what = e->callee;
314 freq = e->frequency;
315 /* Now update size of caller and all functions caller is inlined into. */
316 for (;e && !e->inline_failed; e = e->caller->callers)
318 to = e->caller;
319 old_size = e->caller->global.size;
320 new_size = cgraph_estimate_size_after_inlining (to, what);
321 to->global.size = new_size;
322 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
324 gcc_assert (what->global.inlined_to == to);
325 if (new_size > old_size)
326 overall_size += new_size - old_size;
327 ncalls_inlined++;
329 /* FIXME: We should remove the optimize check after we ensure we never run
330 IPA passes when not optimizng. */
331 if (flag_indirect_inlining && optimize)
332 return ipa_propagate_indirect_call_infos (curr, new_edges);
333 else
334 return false;
337 /* Estimate the growth caused by inlining NODE into all callees. */
339 static int
340 cgraph_estimate_growth (struct cgraph_node *node)
342 int growth = 0;
343 struct cgraph_edge *e;
344 bool self_recursive = false;
346 if (node->global.estimated_growth != INT_MIN)
347 return node->global.estimated_growth;
349 for (e = node->callers; e; e = e->next_caller)
351 if (e->caller == node)
352 self_recursive = true;
353 if (e->inline_failed)
354 growth += (cgraph_estimate_size_after_inlining (e->caller, node)
355 - e->caller->global.size);
358 /* ??? Wrong for non-trivially self recursive functions or cases where
359 we decide to not inline for different reasons, but it is not big deal
360 as in that case we will keep the body around, but we will also avoid
361 some inlining. */
362 if (cgraph_will_be_removed_from_program_if_no_direct_calls (node)
363 && !DECL_EXTERNAL (node->decl) && !self_recursive)
364 growth -= node->global.size;
365 /* COMDAT functions are very often not shared across multiple units since they
366 come from various template instantiations. Take this into account. */
367 else if (DECL_COMDAT (node->decl) && !self_recursive
368 && cgraph_can_remove_if_no_direct_calls_p (node))
369 growth -= (node->global.size
370 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY)) + 50) / 100;
372 node->global.estimated_growth = growth;
373 return growth;
376 /* Return false when inlining WHAT into TO is not good idea
377 as it would cause too large growth of function bodies.
378 When ONE_ONLY is true, assume that only one call site is going
379 to be inlined, otherwise figure out how many call sites in
380 TO calls WHAT and verify that all can be inlined.
383 static bool
384 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
385 cgraph_inline_failed_t *reason)
387 int newsize;
388 int limit;
389 HOST_WIDE_INT stack_size_limit, inlined_stack;
391 if (to->global.inlined_to)
392 to = to->global.inlined_to;
394 /* When inlining large function body called once into small function,
395 take the inlined function as base for limiting the growth. */
396 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
397 limit = inline_summary (to)->self_size;
398 else
399 limit = inline_summary (what)->self_size;
401 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
403 /* Check the size after inlining against the function limits. But allow
404 the function to shrink if it went over the limits by forced inlining. */
405 newsize = cgraph_estimate_size_after_inlining (to, what);
406 if (newsize >= to->global.size
407 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
408 && newsize > limit)
410 if (reason)
411 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
412 return false;
415 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
417 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
419 inlined_stack = (to->global.stack_frame_offset
420 + inline_summary (to)->estimated_self_stack_size
421 + what->global.estimated_stack_size);
422 if (inlined_stack > stack_size_limit
423 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
425 if (reason)
426 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
427 return false;
429 return true;
432 /* Return true when function N is small enough to be inlined. */
434 static bool
435 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
437 tree decl = n->decl;
439 if (n->local.disregard_inline_limits)
440 return true;
442 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
444 if (reason)
445 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
446 return false;
448 if (!n->analyzed)
450 if (reason)
451 *reason = CIF_BODY_NOT_AVAILABLE;
452 return false;
454 if (cgraph_function_body_availability (n) <= AVAIL_OVERWRITABLE)
456 if (reason)
457 *reason = CIF_OVERWRITABLE;
458 return false;
462 if (DECL_DECLARED_INLINE_P (decl))
464 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
466 if (reason)
467 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
468 return false;
471 else
473 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
475 if (reason)
476 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
477 return false;
481 return true;
484 /* Return true when inlining WHAT would create recursive inlining.
485 We call recursive inlining all cases where same function appears more than
486 once in the single recursion nest path in the inline graph. */
488 static inline bool
489 cgraph_recursive_inlining_p (struct cgraph_node *to,
490 struct cgraph_node *what,
491 cgraph_inline_failed_t *reason)
493 bool recursive;
494 if (to->global.inlined_to)
495 recursive = what->decl == to->global.inlined_to->decl;
496 else
497 recursive = what->decl == to->decl;
498 /* Marking recursive function inline has sane semantic and thus we should
499 not warn on it. */
500 if (recursive && reason)
501 *reason = (what->local.disregard_inline_limits
502 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
503 return recursive;
506 /* A cost model driving the inlining heuristics in a way so the edges with
507 smallest badness are inlined first. After each inlining is performed
508 the costs of all caller edges of nodes affected are recomputed so the
509 metrics may accurately depend on values such as number of inlinable callers
510 of the function or function body size. */
512 static int
513 cgraph_edge_badness (struct cgraph_edge *edge, bool dump)
515 gcov_type badness;
516 int growth =
517 (cgraph_estimate_size_after_inlining (edge->caller, edge->callee)
518 - edge->caller->global.size);
520 if (edge->callee->local.disregard_inline_limits)
521 return INT_MIN;
523 if (dump)
525 fprintf (dump_file, " Badness calculcation for %s -> %s\n",
526 cgraph_node_name (edge->caller),
527 cgraph_node_name (edge->callee));
528 fprintf (dump_file, " growth %i, time %i-%i, size %i-%i\n",
529 growth,
530 edge->callee->global.time,
531 inline_summary (edge->callee)->time_inlining_benefit,
532 edge->callee->global.size,
533 inline_summary (edge->callee)->size_inlining_benefit);
536 /* Always prefer inlining saving code size. */
537 if (growth <= 0)
539 badness = INT_MIN - growth;
540 if (dump)
541 fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
542 growth);
545 /* When profiling is available, base priorities -(#calls / growth).
546 So we optimize for overall number of "executed" inlined calls. */
547 else if (max_count)
549 badness =
550 ((int)
551 ((double) edge->count * INT_MIN / max_count / (max_benefit + 1)) *
552 (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
553 if (dump)
555 fprintf (dump_file,
556 " %i (relative %f): profile info. Relative count %f"
557 " * Relative benefit %f\n",
558 (int) badness, (double) badness / INT_MIN,
559 (double) edge->count / max_count,
560 (double) (inline_summary (edge->callee)->
561 time_inlining_benefit + 1) / (max_benefit + 1));
565 /* When function local profile is available, base priorities on
566 growth / frequency, so we optimize for overall frequency of inlined
567 calls. This is not too accurate since while the call might be frequent
568 within function, the function itself is infrequent.
570 Other objective to optimize for is number of different calls inlined.
571 We add the estimated growth after inlining all functions to bias the
572 priorities slightly in this direction (so fewer times called functions
573 of the same size gets priority). */
574 else if (flag_guess_branch_prob)
576 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
577 int benefitperc;
578 int growth_for_all;
579 badness = growth * 10000;
580 benefitperc =
581 MIN (100 * inline_summary (edge->callee)->time_inlining_benefit /
582 (edge->callee->global.time + 1) +1, 100);
583 div *= benefitperc;
586 /* Decrease badness if call is nested. */
587 /* Compress the range so we don't overflow. */
588 if (div > 10000)
589 div = 10000 + ceil_log2 (div) - 8;
590 if (div < 1)
591 div = 1;
592 if (badness > 0)
593 badness /= div;
594 growth_for_all = cgraph_estimate_growth (edge->callee);
595 badness += growth_for_all;
596 if (badness > INT_MAX)
597 badness = INT_MAX;
598 if (dump)
600 fprintf (dump_file,
601 " %i: guessed profile. frequency %i, overall growth %i,"
602 " benefit %i%%, divisor %i\n",
603 (int) badness, edge->frequency, growth_for_all, benefitperc, div);
606 /* When function local profile is not available or it does not give
607 useful information (ie frequency is zero), base the cost on
608 loop nest and overall size growth, so we optimize for overall number
609 of functions fully inlined in program. */
610 else
612 int nest = MIN (edge->loop_nest, 8);
613 badness = cgraph_estimate_growth (edge->callee) * 256;
615 /* Decrease badness if call is nested. */
616 if (badness > 0)
617 badness >>= nest;
618 else
620 badness <<= nest;
622 if (dump)
623 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
624 nest);
627 /* Ensure that we did not overflow in all the fixed point math above. */
628 gcc_assert (badness >= INT_MIN);
629 gcc_assert (badness <= INT_MAX - 1);
630 /* Make recursive inlining happen always after other inlining is done. */
631 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
632 return badness + 1;
633 else
634 return badness;
637 /* Recompute badness of EDGE and update its key in HEAP if needed. */
638 static void
639 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
641 int badness = cgraph_edge_badness (edge, false);
642 if (edge->aux)
644 fibnode_t n = (fibnode_t) edge->aux;
645 gcc_checking_assert (n->data == edge);
647 /* fibheap_replace_key only decrease the keys.
648 When we increase the key we do not update heap
649 and instead re-insert the element once it becomes
650 a minium of heap. */
651 if (badness < n->key)
653 fibheap_replace_key (heap, n, badness);
654 gcc_checking_assert (n->key == badness);
657 else
658 edge->aux = fibheap_insert (heap, badness, edge);
661 /* Recompute heap nodes for each of caller edge. */
663 static void
664 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
665 bitmap updated_nodes)
667 struct cgraph_edge *edge;
668 cgraph_inline_failed_t failed_reason;
670 if (!node->local.inlinable
671 || cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE
672 || node->global.inlined_to)
673 return;
674 if (!bitmap_set_bit (updated_nodes, node->uid))
675 return;
676 node->global.estimated_growth = INT_MIN;
678 /* See if there is something to do. */
679 for (edge = node->callers; edge; edge = edge->next_caller)
680 if (edge->inline_failed)
681 break;
682 if (!edge)
683 return;
684 /* Prune out edges we won't inline into anymore. */
685 if (!cgraph_default_inline_p (node, &failed_reason))
687 for (; edge; edge = edge->next_caller)
688 if (edge->aux)
690 fibheap_delete_node (heap, (fibnode_t) edge->aux);
691 edge->aux = NULL;
692 if (edge->inline_failed)
693 edge->inline_failed = failed_reason;
695 return;
698 for (; edge; edge = edge->next_caller)
699 if (edge->inline_failed)
700 update_edge_key (heap, edge);
703 /* Recompute heap nodes for each uninlined call.
704 This is used when we know that edge badnesses are going only to increase
705 (we introduced new call site) and thus all we need is to insert newly
706 created edges into heap. */
708 static void
709 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
710 bitmap updated_nodes)
712 struct cgraph_edge *e = node->callees;
713 node->global.estimated_growth = INT_MIN;
715 if (!e)
716 return;
717 while (true)
718 if (!e->inline_failed && e->callee->callees)
719 e = e->callee->callees;
720 else
722 if (e->inline_failed
723 && e->callee->local.inlinable
724 && cgraph_function_body_availability (e->callee) >= AVAIL_AVAILABLE
725 && !bitmap_bit_p (updated_nodes, e->callee->uid))
727 node->global.estimated_growth = INT_MIN;
728 /* If function becomes uninlinable, we need to remove it from the heap. */
729 if (!cgraph_default_inline_p (e->callee, &e->inline_failed))
730 update_caller_keys (heap, e->callee, updated_nodes);
731 else
732 /* Otherwise update just edge E. */
733 update_edge_key (heap, e);
735 if (e->next_callee)
736 e = e->next_callee;
737 else
741 if (e->caller == node)
742 return;
743 e = e->caller->callers;
745 while (!e->next_callee);
746 e = e->next_callee;
751 /* Recompute heap nodes for each of caller edges of each of callees.
752 Walk recursively into all inline clones. */
754 static void
755 update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
756 bitmap updated_nodes)
758 struct cgraph_edge *e = node->callees;
759 node->global.estimated_growth = INT_MIN;
761 if (!e)
762 return;
763 while (true)
764 if (!e->inline_failed && e->callee->callees)
765 e = e->callee->callees;
766 else
768 if (e->inline_failed)
769 update_caller_keys (heap, e->callee, updated_nodes);
770 if (e->next_callee)
771 e = e->next_callee;
772 else
776 if (e->caller == node)
777 return;
778 e = e->caller->callers;
780 while (!e->next_callee);
781 e = e->next_callee;
786 /* Enqueue all recursive calls from NODE into priority queue depending on
787 how likely we want to recursively inline the call. */
789 static void
790 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
791 fibheap_t heap)
793 static int priority;
794 struct cgraph_edge *e;
795 for (e = where->callees; e; e = e->next_callee)
796 if (e->callee == node)
798 /* When profile feedback is available, prioritize by expected number
799 of calls. Without profile feedback we maintain simple queue
800 to order candidates via recursive depths. */
801 fibheap_insert (heap,
802 !max_count ? priority++
803 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
806 for (e = where->callees; e; e = e->next_callee)
807 if (!e->inline_failed)
808 lookup_recursive_calls (node, e->callee, heap);
811 /* Decide on recursive inlining: in the case function has recursive calls,
812 inline until body size reaches given argument. If any new indirect edges
813 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
814 is NULL. */
816 static bool
817 cgraph_decide_recursive_inlining (struct cgraph_node *node,
818 VEC (cgraph_edge_p, heap) **new_edges)
820 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
821 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
822 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
823 fibheap_t heap;
824 struct cgraph_edge *e;
825 struct cgraph_node *master_clone, *next;
826 int depth = 0;
827 int n = 0;
829 /* It does not make sense to recursively inline always-inline functions
830 as we are going to sorry() on the remaining calls anyway. */
831 if (node->local.disregard_inline_limits
832 && lookup_attribute ("always_inline", DECL_ATTRIBUTES (node->decl)))
833 return false;
835 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
836 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
837 return false;
839 if (DECL_DECLARED_INLINE_P (node->decl))
841 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
842 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
845 /* Make sure that function is small enough to be considered for inlining. */
846 if (!max_depth
847 || cgraph_estimate_size_after_inlining (node, node) >= limit)
848 return false;
849 heap = fibheap_new ();
850 lookup_recursive_calls (node, node, heap);
851 if (fibheap_empty (heap))
853 fibheap_delete (heap);
854 return false;
857 if (dump_file)
858 fprintf (dump_file,
859 " Performing recursive inlining on %s\n",
860 cgraph_node_name (node));
862 /* We need original clone to copy around. */
863 master_clone = cgraph_clone_node (node, node->decl,
864 node->count, CGRAPH_FREQ_BASE, 1,
865 false, NULL);
866 master_clone->needed = true;
867 for (e = master_clone->callees; e; e = e->next_callee)
868 if (!e->inline_failed)
869 cgraph_clone_inlined_nodes (e, true, false);
871 /* Do the inlining and update list of recursive call during process. */
872 while (!fibheap_empty (heap)
873 && (cgraph_estimate_size_after_inlining (node, master_clone)
874 <= limit))
876 struct cgraph_edge *curr
877 = (struct cgraph_edge *) fibheap_extract_min (heap);
878 struct cgraph_node *cnode;
880 depth = 1;
881 for (cnode = curr->caller;
882 cnode->global.inlined_to; cnode = cnode->callers->caller)
883 if (node->decl == curr->callee->decl)
884 depth++;
885 if (depth > max_depth)
887 if (dump_file)
888 fprintf (dump_file,
889 " maximal depth reached\n");
890 continue;
893 if (max_count)
895 if (!cgraph_maybe_hot_edge_p (curr))
897 if (dump_file)
898 fprintf (dump_file, " Not inlining cold call\n");
899 continue;
901 if (curr->count * 100 / node->count < probability)
903 if (dump_file)
904 fprintf (dump_file,
905 " Probability of edge is too small\n");
906 continue;
910 if (dump_file)
912 fprintf (dump_file,
913 " Inlining call of depth %i", depth);
914 if (node->count)
916 fprintf (dump_file, " called approx. %.2f times per call",
917 (double)curr->count / node->count);
919 fprintf (dump_file, "\n");
921 cgraph_redirect_edge_callee (curr, master_clone);
922 cgraph_mark_inline_edge (curr, false, new_edges);
923 lookup_recursive_calls (node, curr->callee, heap);
924 n++;
926 if (!fibheap_empty (heap) && dump_file)
927 fprintf (dump_file, " Recursive inlining growth limit met.\n");
929 fibheap_delete (heap);
930 if (dump_file)
931 fprintf (dump_file,
932 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
933 master_clone->global.size, node->global.size,
934 master_clone->global.time, node->global.time);
936 /* Remove master clone we used for inlining. We rely that clones inlined
937 into master clone gets queued just before master clone so we don't
938 need recursion. */
939 for (node = cgraph_nodes; node != master_clone;
940 node = next)
942 next = node->next;
943 if (node->global.inlined_to == master_clone)
944 cgraph_remove_node (node);
946 cgraph_remove_node (master_clone);
947 /* FIXME: Recursive inlining actually reduces number of calls of the
948 function. At this place we should probably walk the function and
949 inline clones and compensate the counts accordingly. This probably
950 doesn't matter much in practice. */
951 return n > 0;
954 /* Set inline_failed for all callers of given function to REASON. */
956 static void
957 cgraph_set_inline_failed (struct cgraph_node *node,
958 cgraph_inline_failed_t reason)
960 struct cgraph_edge *e;
962 if (dump_file)
963 fprintf (dump_file, "Inlining failed: %s\n",
964 cgraph_inline_failed_string (reason));
965 for (e = node->callers; e; e = e->next_caller)
966 if (e->inline_failed)
967 e->inline_failed = reason;
970 /* Given whole compilation unit estimate of INSNS, compute how large we can
971 allow the unit to grow. */
972 static int
973 compute_max_insns (int insns)
975 int max_insns = insns;
976 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
977 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
979 return ((HOST_WIDEST_INT) max_insns
980 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
983 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
984 static void
985 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
987 while (VEC_length (cgraph_edge_p, new_edges) > 0)
989 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
991 gcc_assert (!edge->aux);
992 if (edge->callee->local.inlinable
993 && edge->inline_failed
994 && cgraph_default_inline_p (edge->callee, &edge->inline_failed))
995 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1000 /* We use greedy algorithm for inlining of small functions:
1001 All inline candidates are put into prioritized heap based on estimated
1002 growth of the overall number of instructions and then update the estimates.
1004 INLINED and INLINED_CALEES are just pointers to arrays large enough
1005 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
1007 static void
1008 cgraph_decide_inlining_of_small_functions (void)
1010 struct cgraph_node *node;
1011 struct cgraph_edge *edge;
1012 cgraph_inline_failed_t failed_reason;
1013 fibheap_t heap = fibheap_new ();
1014 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1015 int min_size, max_size;
1016 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
1018 if (flag_indirect_inlining)
1019 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
1021 if (dump_file)
1022 fprintf (dump_file, "\nDeciding on smaller functions:\n");
1024 /* Put all inline candidates into the heap. */
1026 for (node = cgraph_nodes; node; node = node->next)
1028 if (!node->local.inlinable || !node->callers)
1029 continue;
1030 if (dump_file)
1031 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
1033 node->global.estimated_growth = INT_MIN;
1034 if (!cgraph_default_inline_p (node, &failed_reason))
1036 cgraph_set_inline_failed (node, failed_reason);
1037 continue;
1040 for (edge = node->callers; edge; edge = edge->next_caller)
1041 if (edge->inline_failed)
1043 gcc_assert (!edge->aux);
1044 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1048 max_size = compute_max_insns (overall_size);
1049 min_size = overall_size;
1051 while (overall_size <= max_size
1052 && !fibheap_empty (heap))
1054 int old_size = overall_size;
1055 struct cgraph_node *where, *callee;
1056 int badness = fibheap_min_key (heap);
1057 int current_badness;
1058 int growth;
1059 cgraph_inline_failed_t not_good = CIF_OK;
1061 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1062 gcc_assert (edge->aux);
1063 edge->aux = NULL;
1064 if (!edge->inline_failed)
1065 continue;
1067 /* When updating the edge costs, we only decrease badness in the keys.
1068 When the badness increase, we keep the heap as it is and re-insert
1069 key now. */
1070 current_badness = cgraph_edge_badness (edge, false);
1071 gcc_assert (current_badness >= badness);
1072 if (current_badness != badness)
1074 edge->aux = fibheap_insert (heap, current_badness, edge);
1075 continue;
1078 callee = edge->callee;
1080 growth = (cgraph_estimate_size_after_inlining (edge->caller, edge->callee)
1081 - edge->caller->global.size);
1083 if (dump_file)
1085 fprintf (dump_file,
1086 "\nConsidering %s with %i size\n",
1087 cgraph_node_name (edge->callee),
1088 edge->callee->global.size);
1089 fprintf (dump_file,
1090 " to be inlined into %s in %s:%i\n"
1091 " Estimated growth after inlined into all callees is %+i insns.\n"
1092 " Estimated badness is %i, frequency %.2f.\n",
1093 cgraph_node_name (edge->caller),
1094 flag_wpa ? "unknown"
1095 : gimple_filename ((const_gimple) edge->call_stmt),
1096 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1097 cgraph_estimate_growth (edge->callee),
1098 badness,
1099 edge->frequency / (double)CGRAPH_FREQ_BASE);
1100 if (edge->count)
1101 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1102 if (dump_flags & TDF_DETAILS)
1103 cgraph_edge_badness (edge, true);
1106 /* When not having profile info ready we don't weight by any way the
1107 position of call in procedure itself. This means if call of
1108 function A from function B seems profitable to inline, the recursive
1109 call of function A in inline copy of A in B will look profitable too
1110 and we end up inlining until reaching maximal function growth. This
1111 is not good idea so prohibit the recursive inlining.
1113 ??? When the frequencies are taken into account we might not need this
1114 restriction.
1116 We need to be cureful here, in some testcases, e.g. directivec.c in
1117 libcpp, we can estimate self recursive function to have negative growth
1118 for inlining completely.
1120 if (!edge->count)
1122 where = edge->caller;
1123 while (where->global.inlined_to)
1125 if (where->decl == edge->callee->decl)
1126 break;
1127 where = where->callers->caller;
1129 if (where->global.inlined_to)
1131 edge->inline_failed
1132 = (edge->callee->local.disregard_inline_limits
1133 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1134 if (dump_file)
1135 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
1136 continue;
1140 if (edge->callee->local.disregard_inline_limits)
1142 else if (!cgraph_maybe_hot_edge_p (edge))
1143 not_good = CIF_UNLIKELY_CALL;
1144 else if (!flag_inline_functions
1145 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
1146 not_good = CIF_NOT_DECLARED_INLINED;
1147 else if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
1148 not_good = CIF_OPTIMIZING_FOR_SIZE;
1149 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
1151 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1152 &edge->inline_failed))
1154 edge->inline_failed = not_good;
1155 if (dump_file)
1156 fprintf (dump_file, " inline_failed:%s.\n",
1157 cgraph_inline_failed_string (edge->inline_failed));
1159 continue;
1161 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1163 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1164 &edge->inline_failed))
1166 if (dump_file)
1167 fprintf (dump_file, " inline_failed:%s.\n",
1168 cgraph_inline_failed_string (edge->inline_failed));
1170 continue;
1172 if (!tree_can_inline_p (edge)
1173 || edge->call_stmt_cannot_inline_p)
1175 if (dump_file)
1176 fprintf (dump_file, " inline_failed:%s.\n",
1177 cgraph_inline_failed_string (edge->inline_failed));
1178 continue;
1180 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1181 &edge->inline_failed))
1183 where = edge->caller;
1184 if (where->global.inlined_to)
1185 where = where->global.inlined_to;
1186 if (!cgraph_decide_recursive_inlining (where,
1187 flag_indirect_inlining
1188 ? &new_indirect_edges : NULL))
1189 continue;
1190 if (flag_indirect_inlining)
1191 add_new_edges_to_heap (heap, new_indirect_edges);
1192 update_all_callee_keys (heap, where, updated_nodes);
1194 else
1196 struct cgraph_node *callee;
1197 if (!cgraph_check_inline_limits (edge->caller, edge->callee,
1198 &edge->inline_failed))
1200 if (dump_file)
1201 fprintf (dump_file, " Not inlining into %s:%s.\n",
1202 cgraph_node_name (edge->caller),
1203 cgraph_inline_failed_string (edge->inline_failed));
1204 continue;
1206 callee = edge->callee;
1207 gcc_checking_assert (!callee->global.inlined_to);
1208 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1209 if (flag_indirect_inlining)
1210 add_new_edges_to_heap (heap, new_indirect_edges);
1212 /* We inlined last offline copy to the body. This might lead
1213 to callees of function having fewer call sites and thus they
1214 may need updating. */
1215 if (callee->global.inlined_to)
1216 update_all_callee_keys (heap, callee, updated_nodes);
1217 else
1218 update_callee_keys (heap, edge->callee, updated_nodes);
1220 where = edge->caller;
1221 if (where->global.inlined_to)
1222 where = where->global.inlined_to;
1224 /* Our profitability metric can depend on local properties
1225 such as number of inlinable calls and size of the function body.
1226 After inlining these properties might change for the function we
1227 inlined into (since it's body size changed) and for the functions
1228 called by function we inlined (since number of it inlinable callers
1229 might change). */
1230 update_caller_keys (heap, where, updated_nodes);
1232 /* We removed one call of the function we just inlined. If offline
1233 copy is still needed, be sure to update the keys. */
1234 if (callee != where && !callee->global.inlined_to)
1235 update_caller_keys (heap, callee, updated_nodes);
1236 bitmap_clear (updated_nodes);
1238 if (dump_file)
1240 fprintf (dump_file,
1241 " Inlined into %s which now has size %i and self time %i,"
1242 "net change of %+i.\n",
1243 cgraph_node_name (edge->caller),
1244 edge->caller->global.time,
1245 edge->caller->global.size,
1246 overall_size - old_size);
1248 if (min_size > overall_size)
1250 min_size = overall_size;
1251 max_size = compute_max_insns (min_size);
1253 if (dump_file)
1254 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1257 while (!fibheap_empty (heap))
1259 int badness = fibheap_min_key (heap);
1261 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1262 gcc_assert (edge->aux);
1263 edge->aux = NULL;
1264 if (!edge->inline_failed)
1265 continue;
1266 #ifdef ENABLE_CHECKING
1267 gcc_assert (cgraph_edge_badness (edge, false) >= badness);
1268 #endif
1269 if (dump_file)
1271 fprintf (dump_file,
1272 "\nSkipping %s with %i size\n",
1273 cgraph_node_name (edge->callee),
1274 edge->callee->global.size);
1275 fprintf (dump_file,
1276 " called by %s in %s:%i\n"
1277 " Estimated growth after inlined into all callees is %+i insns.\n"
1278 " Estimated badness is %i, frequency %.2f.\n",
1279 cgraph_node_name (edge->caller),
1280 flag_wpa ? "unknown"
1281 : gimple_filename ((const_gimple) edge->call_stmt),
1282 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1283 cgraph_estimate_growth (edge->callee),
1284 badness,
1285 edge->frequency / (double)CGRAPH_FREQ_BASE);
1286 if (edge->count)
1287 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1288 if (dump_flags & TDF_DETAILS)
1289 cgraph_edge_badness (edge, true);
1291 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1292 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1293 &edge->inline_failed))
1294 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1297 if (new_indirect_edges)
1298 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1299 fibheap_delete (heap);
1300 BITMAP_FREE (updated_nodes);
1303 /* Flatten NODE from the IPA inliner. */
1305 static void
1306 cgraph_flatten (struct cgraph_node *node)
1308 struct cgraph_edge *e;
1310 /* We shouldn't be called recursively when we are being processed. */
1311 gcc_assert (node->aux == NULL);
1313 node->aux = (void *)(size_t) INLINE_ALL;
1315 for (e = node->callees; e; e = e->next_callee)
1317 struct cgraph_node *orig_callee;
1319 if (e->call_stmt_cannot_inline_p)
1321 if (dump_file)
1322 fprintf (dump_file, "Not inlining: %s",
1323 cgraph_inline_failed_string (e->inline_failed));
1324 continue;
1327 if (!e->callee->analyzed)
1329 if (dump_file)
1330 fprintf (dump_file,
1331 "Not inlining: Function body not available.\n");
1332 continue;
1335 /* We've hit cycle? It is time to give up. */
1336 if (e->callee->aux)
1338 if (dump_file)
1339 fprintf (dump_file,
1340 "Not inlining %s into %s to avoid cycle.\n",
1341 cgraph_node_name (e->callee),
1342 cgraph_node_name (e->caller));
1343 e->inline_failed = CIF_RECURSIVE_INLINING;
1344 continue;
1347 /* When the edge is already inlined, we just need to recurse into
1348 it in order to fully flatten the leaves. */
1349 if (!e->inline_failed)
1351 cgraph_flatten (e->callee);
1352 continue;
1355 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1357 if (dump_file)
1358 fprintf (dump_file, "Not inlining: recursive call.\n");
1359 continue;
1362 if (!tree_can_inline_p (e))
1364 if (dump_file)
1365 fprintf (dump_file, "Not inlining: %s",
1366 cgraph_inline_failed_string (e->inline_failed));
1367 continue;
1370 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1371 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1373 if (dump_file)
1374 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1375 continue;
1378 /* Inline the edge and flatten the inline clone. Avoid
1379 recursing through the original node if the node was cloned. */
1380 if (dump_file)
1381 fprintf (dump_file, " Inlining %s into %s.\n",
1382 cgraph_node_name (e->callee),
1383 cgraph_node_name (e->caller));
1384 orig_callee = e->callee;
1385 cgraph_mark_inline_edge (e, true, NULL);
1386 if (e->callee != orig_callee)
1387 orig_callee->aux = (void *)(size_t) INLINE_ALL;
1388 cgraph_flatten (e->callee);
1389 if (e->callee != orig_callee)
1390 orig_callee->aux = NULL;
1393 node->aux = NULL;
1396 /* Decide on the inlining. We do so in the topological order to avoid
1397 expenses on updating data structures. */
1399 static unsigned int
1400 cgraph_decide_inlining (void)
1402 struct cgraph_node *node;
1403 int nnodes;
1404 struct cgraph_node **order =
1405 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1406 int old_size = 0;
1407 int i;
1408 int initial_size = 0;
1410 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1411 if (in_lto_p && flag_indirect_inlining)
1412 ipa_update_after_lto_read ();
1413 if (flag_indirect_inlining)
1414 ipa_create_all_structures_for_iinln ();
1416 max_count = 0;
1417 max_benefit = 0;
1418 for (node = cgraph_nodes; node; node = node->next)
1419 if (node->analyzed)
1421 struct cgraph_edge *e;
1423 gcc_assert (inline_summary (node)->self_size == node->global.size);
1424 if (!DECL_EXTERNAL (node->decl))
1425 initial_size += node->global.size;
1426 for (e = node->callees; e; e = e->next_callee)
1427 if (max_count < e->count)
1428 max_count = e->count;
1429 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1430 max_benefit = inline_summary (node)->time_inlining_benefit;
1432 gcc_assert (in_lto_p
1433 || !max_count
1434 || (profile_info && flag_branch_probabilities));
1435 overall_size = initial_size;
1437 nnodes = cgraph_postorder (order);
1439 if (dump_file)
1440 fprintf (dump_file,
1441 "\nDeciding on inlining. Starting with size %i.\n",
1442 initial_size);
1444 for (node = cgraph_nodes; node; node = node->next)
1445 node->aux = 0;
1447 if (dump_file)
1448 fprintf (dump_file, "\nFlattening functions:\n");
1450 /* In the first pass handle functions to be flattened. Do this with
1451 a priority so none of our later choices will make this impossible. */
1452 for (i = nnodes - 1; i >= 0; i--)
1454 node = order[i];
1456 /* Handle nodes to be flattened, but don't update overall unit
1457 size. Calling the incremental inliner here is lame,
1458 a simple worklist should be enough. What should be left
1459 here from the early inliner (if it runs) is cyclic cases.
1460 Ideally when processing callees we stop inlining at the
1461 entry of cycles, possibly cloning that entry point and
1462 try to flatten itself turning it into a self-recursive
1463 function. */
1464 if (lookup_attribute ("flatten",
1465 DECL_ATTRIBUTES (node->decl)) != NULL)
1467 if (dump_file)
1468 fprintf (dump_file,
1469 "Flattening %s\n", cgraph_node_name (node));
1470 cgraph_flatten (node);
1474 cgraph_decide_inlining_of_small_functions ();
1476 if (flag_inline_functions_called_once)
1478 if (dump_file)
1479 fprintf (dump_file, "\nDeciding on functions called once:\n");
1481 /* And finally decide what functions are called once. */
1482 for (i = nnodes - 1; i >= 0; i--)
1484 node = order[i];
1486 if (node->callers
1487 && !node->callers->next_caller
1488 && !node->global.inlined_to
1489 && cgraph_will_be_removed_from_program_if_no_direct_calls (node)
1490 && node->local.inlinable
1491 && cgraph_function_body_availability (node) >= AVAIL_AVAILABLE
1492 && node->callers->inline_failed
1493 && node->callers->caller != node
1494 && node->callers->caller->global.inlined_to != node
1495 && !node->callers->call_stmt_cannot_inline_p
1496 && !DECL_EXTERNAL (node->decl))
1498 cgraph_inline_failed_t reason;
1499 old_size = overall_size;
1500 if (dump_file)
1502 fprintf (dump_file,
1503 "\nConsidering %s size %i.\n",
1504 cgraph_node_name (node), node->global.size);
1505 fprintf (dump_file,
1506 " Called once from %s %i insns.\n",
1507 cgraph_node_name (node->callers->caller),
1508 node->callers->caller->global.size);
1511 if (cgraph_check_inline_limits (node->callers->caller, node,
1512 &reason))
1514 struct cgraph_node *caller = node->callers->caller;
1515 cgraph_mark_inline_edge (node->callers, true, NULL);
1516 if (dump_file)
1517 fprintf (dump_file,
1518 " Inlined into %s which now has %i size"
1519 " for a net change of %+i size.\n",
1520 cgraph_node_name (caller),
1521 caller->global.size,
1522 overall_size - old_size);
1524 else
1526 if (dump_file)
1527 fprintf (dump_file,
1528 " Not inlining: %s.\n",
1529 cgraph_inline_failed_string (reason));
1535 /* Free ipa-prop structures if they are no longer needed. */
1536 if (flag_indirect_inlining)
1537 ipa_free_all_structures_after_iinln ();
1539 if (dump_file)
1540 fprintf (dump_file,
1541 "\nInlined %i calls, eliminated %i functions, "
1542 "size %i turned to %i size.\n\n",
1543 ncalls_inlined, nfunctions_inlined, initial_size,
1544 overall_size);
1545 free (order);
1546 return 0;
1549 /* Return true when N is leaf function. Accept cheap builtins
1550 in leaf functions. */
1552 static bool
1553 leaf_node_p (struct cgraph_node *n)
1555 struct cgraph_edge *e;
1556 for (e = n->callees; e; e = e->next_callee)
1557 if (!is_inexpensive_builtin (e->callee->decl))
1558 return false;
1559 return true;
1562 /* Decide on the inlining. We do so in the topological order to avoid
1563 expenses on updating data structures. */
1565 static bool
1566 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1567 enum inlining_mode mode)
1569 struct cgraph_edge *e;
1570 bool inlined = false;
1571 cgraph_inline_failed_t failed_reason;
1573 #ifdef ENABLE_CHECKING
1574 verify_cgraph_node (node);
1575 #endif
1577 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1578 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1580 if (dump_file)
1581 fprintf (dump_file, "Incrementally flattening %s\n",
1582 cgraph_node_name (node));
1583 mode = INLINE_ALL;
1586 /* First of all look for always inline functions. */
1587 if (mode != INLINE_SIZE_NORECURSIVE)
1588 for (e = node->callees; e; e = e->next_callee)
1590 if (!e->callee->local.disregard_inline_limits
1591 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1592 continue;
1593 if (dump_file)
1594 fprintf (dump_file,
1595 "Considering to always inline inline candidate %s.\n",
1596 cgraph_node_name (e->callee));
1597 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1599 if (dump_file)
1600 fprintf (dump_file, "Not inlining: recursive call.\n");
1601 continue;
1603 if (!tree_can_inline_p (e)
1604 || e->call_stmt_cannot_inline_p)
1606 if (dump_file)
1607 fprintf (dump_file,
1608 "Not inlining: %s",
1609 cgraph_inline_failed_string (e->inline_failed));
1610 continue;
1612 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1613 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1615 if (dump_file)
1616 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1617 continue;
1619 if (!e->callee->analyzed)
1621 if (dump_file)
1622 fprintf (dump_file,
1623 "Not inlining: Function body no longer available.\n");
1624 continue;
1627 if (dump_file)
1628 fprintf (dump_file, " Inlining %s into %s.\n",
1629 cgraph_node_name (e->callee),
1630 cgraph_node_name (e->caller));
1631 cgraph_mark_inline_edge (e, true, NULL);
1632 inlined = true;
1635 /* Now do the automatic inlining. */
1636 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE
1637 /* Never inline regular functions into always-inline functions
1638 during incremental inlining. */
1639 && !node->local.disregard_inline_limits)
1641 bitmap visited = BITMAP_ALLOC (NULL);
1642 for (e = node->callees; e; e = e->next_callee)
1644 int allowed_growth = 0;
1645 if (!e->callee->local.inlinable
1646 || !e->inline_failed
1647 || e->callee->local.disregard_inline_limits)
1648 continue;
1649 /* We are inlining a function to all call-sites in node
1650 or to none. So visit each candidate only once. */
1651 if (!bitmap_set_bit (visited, e->callee->uid))
1652 continue;
1653 if (dump_file)
1654 fprintf (dump_file, "Considering inline candidate %s.\n",
1655 cgraph_node_name (e->callee));
1656 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1658 if (dump_file)
1659 fprintf (dump_file, "Not inlining: recursive call.\n");
1660 continue;
1662 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1663 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1665 if (dump_file)
1666 fprintf (dump_file,
1667 "Not inlining: SSA form does not match.\n");
1668 continue;
1671 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1672 && optimize_function_for_speed_p (cfun))
1673 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1675 /* When the function body would grow and inlining the function
1676 won't eliminate the need for offline copy of the function,
1677 don't inline. */
1678 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1679 || (!flag_inline_functions
1680 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1681 && (cgraph_estimate_size_after_inlining (e->caller, e->callee)
1682 > e->caller->global.size + allowed_growth)
1683 && cgraph_estimate_growth (e->callee) > allowed_growth)
1685 if (dump_file)
1686 fprintf (dump_file,
1687 "Not inlining: code size would grow by %i.\n",
1688 cgraph_estimate_size_after_inlining (e->caller,
1689 e->callee)
1690 - e->caller->global.size);
1691 continue;
1693 if (e->call_stmt_cannot_inline_p
1694 || !tree_can_inline_p (e))
1696 if (dump_file)
1697 fprintf (dump_file,
1698 "Not inlining: call site not inlinable.\n");
1699 continue;
1701 if (!e->callee->analyzed)
1703 if (dump_file)
1704 fprintf (dump_file,
1705 "Not inlining: Function body no longer available.\n");
1706 continue;
1708 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed))
1710 if (dump_file)
1711 fprintf (dump_file, "Not inlining: %s.\n",
1712 cgraph_inline_failed_string (e->inline_failed));
1713 continue;
1715 if (cgraph_default_inline_p (e->callee, &failed_reason))
1717 if (dump_file)
1718 fprintf (dump_file, " Inlining %s into %s.\n",
1719 cgraph_node_name (e->callee),
1720 cgraph_node_name (e->caller));
1721 cgraph_mark_inline_edge (e, true, NULL);
1722 inlined = true;
1725 BITMAP_FREE (visited);
1727 return inlined;
1730 /* Because inlining might remove no-longer reachable nodes, we need to
1731 keep the array visible to garbage collector to avoid reading collected
1732 out nodes. */
1733 static int nnodes;
1734 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1736 /* Do inlining of small functions. Doing so early helps profiling and other
1737 passes to be somewhat more effective and avoids some code duplication in
1738 later real inlining pass for testcases with very many function calls. */
1739 static unsigned int
1740 cgraph_early_inlining (void)
1742 struct cgraph_node *node = cgraph_node (current_function_decl);
1743 unsigned int todo = 0;
1744 int iterations = 0;
1746 if (seen_error ())
1747 return 0;
1749 if (!optimize
1750 || flag_no_inline
1751 || !flag_early_inlining)
1753 /* When not optimizing or not inlining inline only always-inline
1754 functions. */
1755 cgraph_decide_inlining_incrementally (node, INLINE_ALWAYS_INLINE);
1756 timevar_push (TV_INTEGRATION);
1757 todo |= optimize_inline_calls (current_function_decl);
1758 timevar_pop (TV_INTEGRATION);
1760 else
1762 if (lookup_attribute ("flatten",
1763 DECL_ATTRIBUTES (node->decl)) != NULL)
1765 if (dump_file)
1766 fprintf (dump_file,
1767 "Flattening %s\n", cgraph_node_name (node));
1768 cgraph_flatten (node);
1769 timevar_push (TV_INTEGRATION);
1770 todo |= optimize_inline_calls (current_function_decl);
1771 timevar_pop (TV_INTEGRATION);
1773 /* We iterate incremental inlining to get trivial cases of indirect
1774 inlining. */
1775 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1776 && cgraph_decide_inlining_incrementally (node,
1777 iterations
1778 ? INLINE_SIZE_NORECURSIVE
1779 : INLINE_SIZE))
1781 timevar_push (TV_INTEGRATION);
1782 todo |= optimize_inline_calls (current_function_decl);
1783 iterations++;
1784 timevar_pop (TV_INTEGRATION);
1786 if (dump_file)
1787 fprintf (dump_file, "Iterations: %i\n", iterations);
1790 cfun->always_inline_functions_inlined = true;
1792 return todo;
1795 struct gimple_opt_pass pass_early_inline =
1798 GIMPLE_PASS,
1799 "einline", /* name */
1800 NULL, /* gate */
1801 cgraph_early_inlining, /* execute */
1802 NULL, /* sub */
1803 NULL, /* next */
1804 0, /* static_pass_number */
1805 TV_INLINE_HEURISTICS, /* tv_id */
1806 0, /* properties_required */
1807 0, /* properties_provided */
1808 0, /* properties_destroyed */
1809 0, /* todo_flags_start */
1810 TODO_dump_func /* todo_flags_finish */
1815 /* See if statement might disappear after inlining.
1816 0 - means not eliminated
1817 1 - half of statements goes away
1818 2 - for sure it is eliminated.
1819 We are not terribly sophisficated, basically looking for simple abstraction
1820 penalty wrappers. */
1822 static int
1823 eliminated_by_inlining_prob (gimple stmt)
1825 enum gimple_code code = gimple_code (stmt);
1826 switch (code)
1828 case GIMPLE_RETURN:
1829 return 2;
1830 case GIMPLE_ASSIGN:
1831 if (gimple_num_ops (stmt) != 2)
1832 return 0;
1834 /* Casts of parameters, loads from parameters passed by reference
1835 and stores to return value or parameters are often free after
1836 inlining dua to SRA and further combining.
1837 Assume that half of statements goes away. */
1838 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1839 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1840 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1841 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1843 tree rhs = gimple_assign_rhs1 (stmt);
1844 tree lhs = gimple_assign_lhs (stmt);
1845 tree inner_rhs = rhs;
1846 tree inner_lhs = lhs;
1847 bool rhs_free = false;
1848 bool lhs_free = false;
1850 while (handled_component_p (inner_lhs)
1851 || TREE_CODE (inner_lhs) == MEM_REF)
1852 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1853 while (handled_component_p (inner_rhs)
1854 || TREE_CODE (inner_rhs) == ADDR_EXPR
1855 || TREE_CODE (inner_rhs) == MEM_REF)
1856 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1859 if (TREE_CODE (inner_rhs) == PARM_DECL
1860 || (TREE_CODE (inner_rhs) == SSA_NAME
1861 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1862 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1863 rhs_free = true;
1864 if (rhs_free && is_gimple_reg (lhs))
1865 lhs_free = true;
1866 if (((TREE_CODE (inner_lhs) == PARM_DECL
1867 || (TREE_CODE (inner_lhs) == SSA_NAME
1868 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1869 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1870 && inner_lhs != lhs)
1871 || TREE_CODE (inner_lhs) == RESULT_DECL
1872 || (TREE_CODE (inner_lhs) == SSA_NAME
1873 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1874 lhs_free = true;
1875 if (lhs_free
1876 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1877 rhs_free = true;
1878 if (lhs_free && rhs_free)
1879 return 1;
1881 return 0;
1882 default:
1883 return 0;
1887 /* Compute function body size parameters for NODE. */
1889 static void
1890 estimate_function_body_sizes (struct cgraph_node *node)
1892 gcov_type time = 0;
1893 gcov_type time_inlining_benefit = 0;
1894 /* Estimate static overhead for function prologue/epilogue and alignment. */
1895 int size = 2;
1896 /* Benefits are scaled by probability of elimination that is in range
1897 <0,2>. */
1898 int size_inlining_benefit = 2 * 2;
1899 basic_block bb;
1900 gimple_stmt_iterator bsi;
1901 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1902 tree arg;
1903 int freq;
1904 tree funtype = TREE_TYPE (node->decl);
1906 if (dump_file)
1907 fprintf (dump_file, "Analyzing function body size: %s\n",
1908 cgraph_node_name (node));
1910 gcc_assert (my_function && my_function->cfg);
1911 FOR_EACH_BB_FN (bb, my_function)
1913 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1914 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1916 gimple stmt = gsi_stmt (bsi);
1917 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1918 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1919 int prob;
1921 if (dump_file && (dump_flags & TDF_DETAILS))
1923 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1924 freq, this_size, this_time);
1925 print_gimple_stmt (dump_file, stmt, 0, 0);
1927 this_time *= freq;
1928 time += this_time;
1929 size += this_size;
1930 prob = eliminated_by_inlining_prob (stmt);
1931 if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS))
1932 fprintf (dump_file, " 50%% will be eliminated by inlining\n");
1933 if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
1934 fprintf (dump_file, " will eliminated by inlining\n");
1935 size_inlining_benefit += this_size * prob;
1936 time_inlining_benefit += this_time * prob;
1937 gcc_assert (time >= 0);
1938 gcc_assert (size >= 0);
1941 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1942 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE)
1943 / (CGRAPH_FREQ_BASE * 2));
1944 size_inlining_benefit = (size_inlining_benefit + 1) / 2;
1945 if (dump_file)
1946 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1947 (int)time, (int)time_inlining_benefit,
1948 size, size_inlining_benefit);
1949 time_inlining_benefit += eni_time_weights.call_cost;
1950 size_inlining_benefit += eni_size_weights.call_cost;
1951 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1953 int cost = estimate_move_cost (TREE_TYPE (funtype));
1954 time_inlining_benefit += cost;
1955 size_inlining_benefit += cost;
1957 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = DECL_CHAIN (arg))
1958 if (!VOID_TYPE_P (TREE_TYPE (arg)))
1960 int cost = estimate_move_cost (TREE_TYPE (arg));
1961 time_inlining_benefit += cost;
1962 size_inlining_benefit += cost;
1964 if (time_inlining_benefit > MAX_TIME)
1965 time_inlining_benefit = MAX_TIME;
1966 if (time > MAX_TIME)
1967 time = MAX_TIME;
1968 inline_summary (node)->self_time = time;
1969 inline_summary (node)->self_size = size;
1970 if (dump_file)
1971 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
1972 (int)time, (int)time_inlining_benefit,
1973 size, size_inlining_benefit);
1974 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
1975 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
1978 /* Compute parameters of functions used by inliner. */
1979 unsigned int
1980 compute_inline_parameters (struct cgraph_node *node)
1982 HOST_WIDE_INT self_stack_size;
1984 gcc_assert (!node->global.inlined_to);
1986 /* Estimate the stack size for the function. But not at -O0
1987 because estimated_stack_frame_size is a quadratic problem. */
1988 self_stack_size = optimize ? estimated_stack_frame_size (node->decl) : 0;
1989 inline_summary (node)->estimated_self_stack_size = self_stack_size;
1990 node->global.estimated_stack_size = self_stack_size;
1991 node->global.stack_frame_offset = 0;
1993 /* Can this function be inlined at all? */
1994 node->local.inlinable = tree_inlinable_function_p (node->decl);
1995 if (node->local.inlinable && !node->local.disregard_inline_limits)
1996 node->local.disregard_inline_limits
1997 = DECL_DISREGARD_INLINE_LIMITS (node->decl);
1998 estimate_function_body_sizes (node);
1999 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2000 node->global.time = inline_summary (node)->self_time;
2001 node->global.size = inline_summary (node)->self_size;
2002 return 0;
2006 /* Compute parameters of functions used by inliner using
2007 current_function_decl. */
2008 static unsigned int
2009 compute_inline_parameters_for_current (void)
2011 compute_inline_parameters (cgraph_node (current_function_decl));
2012 return 0;
2015 struct gimple_opt_pass pass_inline_parameters =
2018 GIMPLE_PASS,
2019 "inline_param", /* name */
2020 NULL, /* gate */
2021 compute_inline_parameters_for_current,/* execute */
2022 NULL, /* sub */
2023 NULL, /* next */
2024 0, /* static_pass_number */
2025 TV_INLINE_HEURISTICS, /* tv_id */
2026 0, /* properties_required */
2027 0, /* properties_provided */
2028 0, /* properties_destroyed */
2029 0, /* todo_flags_start */
2030 0 /* todo_flags_finish */
2034 /* This function performs intraprocedural analyzis in NODE that is required to
2035 inline indirect calls. */
2036 static void
2037 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
2039 ipa_analyze_node (node);
2040 if (dump_file && (dump_flags & TDF_DETAILS))
2042 ipa_print_node_params (dump_file, node);
2043 ipa_print_node_jump_functions (dump_file, node);
2047 /* Note function body size. */
2048 static void
2049 analyze_function (struct cgraph_node *node)
2051 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2052 current_function_decl = node->decl;
2054 compute_inline_parameters (node);
2055 /* FIXME: We should remove the optimize check after we ensure we never run
2056 IPA passes when not optimizng. */
2057 if (flag_indirect_inlining && optimize)
2058 inline_indirect_intraprocedural_analysis (node);
2060 current_function_decl = NULL;
2061 pop_cfun ();
2064 /* Called when new function is inserted to callgraph late. */
2065 static void
2066 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
2068 analyze_function (node);
2071 /* Note function body size. */
2072 static void
2073 inline_generate_summary (void)
2075 struct cgraph_node *node;
2077 function_insertion_hook_holder =
2078 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2080 if (flag_indirect_inlining)
2082 ipa_register_cgraph_hooks ();
2083 ipa_check_create_node_params ();
2084 ipa_check_create_edge_args ();
2087 for (node = cgraph_nodes; node; node = node->next)
2088 if (node->analyzed)
2089 analyze_function (node);
2091 return;
2094 /* Apply inline plan to function. */
2095 static unsigned int
2096 inline_transform (struct cgraph_node *node)
2098 unsigned int todo = 0;
2099 struct cgraph_edge *e;
2100 bool inline_p = false;
2102 /* FIXME: Currently the passmanager is adding inline transform more than once to some
2103 clones. This needs revisiting after WPA cleanups. */
2104 if (cfun->after_inlining)
2105 return 0;
2107 /* We might need the body of this function so that we can expand
2108 it inline somewhere else. */
2109 if (cgraph_preserve_function_body_p (node->decl))
2110 save_inline_function_body (node);
2112 for (e = node->callees; e; e = e->next_callee)
2114 cgraph_redirect_edge_call_stmt_to_callee (e);
2115 if (!e->inline_failed || warn_inline)
2116 inline_p = true;
2119 if (inline_p)
2121 timevar_push (TV_INTEGRATION);
2122 todo = optimize_inline_calls (current_function_decl);
2123 timevar_pop (TV_INTEGRATION);
2125 cfun->always_inline_functions_inlined = true;
2126 cfun->after_inlining = true;
2127 return todo | execute_fixup_cfg ();
2130 /* Read inline summary. Jump functions are shared among ipa-cp
2131 and inliner, so when ipa-cp is active, we don't need to write them
2132 twice. */
2134 static void
2135 inline_read_summary (void)
2137 if (flag_indirect_inlining)
2139 ipa_register_cgraph_hooks ();
2140 if (!flag_ipa_cp)
2141 ipa_prop_read_jump_functions ();
2143 function_insertion_hook_holder =
2144 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2147 /* Write inline summary for node in SET.
2148 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
2149 active, we don't need to write them twice. */
2151 static void
2152 inline_write_summary (cgraph_node_set set,
2153 varpool_node_set vset ATTRIBUTE_UNUSED)
2155 if (flag_indirect_inlining && !flag_ipa_cp)
2156 ipa_prop_write_jump_functions (set);
2159 /* When to run IPA inlining. Inlining of always-inline functions
2160 happens during early inlining. */
2162 static bool
2163 gate_cgraph_decide_inlining (void)
2165 /* ??? We'd like to skip this if not optimizing or not inlining as
2166 all always-inline functions have been processed by early
2167 inlining already. But this at least breaks EH with C++ as
2168 we need to unconditionally run fixup_cfg even at -O0.
2169 So leave it on unconditionally for now. */
2170 return 1;
2173 struct ipa_opt_pass_d pass_ipa_inline =
2176 IPA_PASS,
2177 "inline", /* name */
2178 gate_cgraph_decide_inlining, /* gate */
2179 cgraph_decide_inlining, /* execute */
2180 NULL, /* sub */
2181 NULL, /* next */
2182 0, /* static_pass_number */
2183 TV_INLINE_HEURISTICS, /* tv_id */
2184 0, /* properties_required */
2185 0, /* properties_provided */
2186 0, /* properties_destroyed */
2187 TODO_remove_functions, /* todo_flags_finish */
2188 TODO_dump_cgraph | TODO_dump_func
2189 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
2191 inline_generate_summary, /* generate_summary */
2192 inline_write_summary, /* write_summary */
2193 inline_read_summary, /* read_summary */
2194 NULL, /* write_optimization_summary */
2195 NULL, /* read_optimization_summary */
2196 NULL, /* stmt_fixup */
2197 0, /* TODOs */
2198 inline_transform, /* function_transform */
2199 NULL, /* variable_transform */
2203 #include "gt-ipa-inline.h"