Retry rdrand if the carry flag isn't valid.
[official-gcc.git] / gcc / ipa-inline.c
blob08f2ac3607d41c36267dcf5107804d8a76a751fe
1 /* Inlining decision heuristics.
2 Copyright (C) 2003, 2004, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Jan Hubicka
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* Inlining decision heuristics
24 We separate inlining decisions from the inliner itself and store it
25 inside callgraph as so called inline plan. Refer to cgraph.c
26 documentation about particular representation of inline plans in the
27 callgraph.
29 There are three major parts of this file:
31 cgraph_mark_inline implementation
33 This function allows to mark given call inline and performs necessary
34 modifications of cgraph (production of the clones and updating overall
35 statistics)
37 inlining heuristics limits
39 These functions allow to check that particular inlining is allowed
40 by the limits specified by user (allowed function growth, overall unit
41 growth and so on).
43 inlining heuristics
45 This is implementation of IPA pass aiming to get as much of benefit
46 from inlining obeying the limits checked above.
48 The implementation of particular heuristics is separated from
49 the rest of code to make it easier to replace it with more complicated
50 implementation in the future. The rest of inlining code acts as a
51 library aimed to modify the callgraph and verify that the parameters
52 on code size growth fits.
54 To mark given call inline, use cgraph_mark_inline function, the
55 verification is performed by cgraph_default_inline_p and
56 cgraph_check_inline_limits.
58 The heuristics implements simple knapsack style algorithm ordering
59 all functions by their "profitability" (estimated by code size growth)
60 and inlining them in priority order.
62 cgraph_decide_inlining implements heuristics taking whole callgraph
63 into account, while cgraph_decide_inlining_incrementally considers
64 only one function at a time and is used by early inliner.
66 The inliner itself is split into several passes:
68 pass_inline_parameters
70 This pass computes local properties of functions that are used by inliner:
71 estimated function body size, whether function is inlinable at all and
72 stack frame consumption.
74 Before executing any of inliner passes, this local pass has to be applied
75 to each function in the callgraph (ie run as subpass of some earlier
76 IPA pass). The results are made out of date by any optimization applied
77 on the function body.
79 pass_early_inlining
81 Simple local inlining pass inlining callees into current function. This
82 pass makes no global whole compilation unit analysis and this when allowed
83 to do inlining expanding code size it might result in unbounded growth of
84 whole unit.
86 The pass is run during conversion into SSA form. Only functions already
87 converted into SSA form are inlined, so the conversion must happen in
88 topological order on the callgraph (that is maintained by pass manager).
89 The functions after inlining are early optimized so the early inliner sees
90 unoptimized function itself, but all considered callees are already
91 optimized allowing it to unfold abstraction penalty on C++ effectively and
92 cheaply.
94 pass_ipa_early_inlining
96 With profiling, the early inlining is also necessary to reduce
97 instrumentation costs on program with high abstraction penalty (doing
98 many redundant calls). This can't happen in parallel with early
99 optimization and profile instrumentation, because we would end up
100 re-instrumenting already instrumented function bodies we brought in via
101 inlining.
103 To avoid this, this pass is executed as IPA pass before profiling. It is
104 simple wrapper to pass_early_inlining and ensures first inlining.
106 pass_ipa_inline
108 This is the main pass implementing simple greedy algorithm to do inlining
109 of small functions that results in overall growth of compilation unit and
110 inlining of functions called once. The pass compute just so called inline
111 plan (representation of inlining to be done in callgraph) and unlike early
112 inlining it is not performing the inlining itself.
114 pass_apply_inline
116 This pass performs actual inlining according to pass_ipa_inline on given
117 function. Possible the function body before inlining is saved when it is
118 needed for further inlining later.
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "tm.h"
125 #include "tree.h"
126 #include "tree-inline.h"
127 #include "langhooks.h"
128 #include "flags.h"
129 #include "cgraph.h"
130 #include "diagnostic.h"
131 #include "gimple-pretty-print.h"
132 #include "timevar.h"
133 #include "params.h"
134 #include "fibheap.h"
135 #include "intl.h"
136 #include "tree-pass.h"
137 #include "hashtab.h"
138 #include "coverage.h"
139 #include "ggc.h"
140 #include "tree-flow.h"
141 #include "rtl.h"
142 #include "ipa-prop.h"
143 #include "except.h"
145 #define MAX_TIME 1000000000
147 /* Mode incremental inliner operate on:
149 In ALWAYS_INLINE only functions marked
150 always_inline are inlined. This mode is used after detecting cycle during
151 flattening.
153 In SIZE mode, only functions that reduce function body size after inlining
154 are inlined, this is used during early inlining.
156 in ALL mode, everything is inlined. This is used during flattening. */
157 enum inlining_mode {
158 INLINE_NONE = 0,
159 INLINE_ALWAYS_INLINE,
160 INLINE_SIZE_NORECURSIVE,
161 INLINE_SIZE,
162 INLINE_ALL
165 static bool
166 cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode);
167 static void cgraph_flatten (struct cgraph_node *node);
170 /* Statistics we collect about inlining algorithm. */
171 static int ncalls_inlined;
172 static int nfunctions_inlined;
173 static int overall_size;
174 static gcov_type max_count, max_benefit;
176 /* Holders of ipa cgraph hooks: */
177 static struct cgraph_node_hook_list *function_insertion_hook_holder;
179 static inline struct inline_summary *
180 inline_summary (struct cgraph_node *node)
182 return &node->local.inline_summary;
185 /* Estimate self time of the function after inlining WHAT into TO. */
187 static int
188 cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
189 struct cgraph_node *what)
191 gcov_type time = (((gcov_type)what->global.time
192 - inline_summary (what)->time_inlining_benefit)
193 * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
194 + to->global.time;
195 if (time < 0)
196 time = 0;
197 if (time > MAX_TIME)
198 time = MAX_TIME;
199 return time;
202 /* Estimate self time of the function after inlining WHAT into TO. */
204 static inline int
205 cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
206 struct cgraph_node *what)
208 int size = ((what->global.size - inline_summary (what)->size_inlining_benefit)
209 * times + to->global.size);
210 gcc_assert (size >= 0);
211 return size;
214 /* Scale frequency of NODE edges by FREQ_SCALE and increase loop nest
215 by NEST. */
217 static void
218 update_noncloned_frequencies (struct cgraph_node *node,
219 int freq_scale, int nest)
221 struct cgraph_edge *e;
223 /* We do not want to ignore high loop nest after freq drops to 0. */
224 if (!freq_scale)
225 freq_scale = 1;
226 for (e = node->callees; e; e = e->next_callee)
228 e->loop_nest += nest;
229 e->frequency = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
230 if (e->frequency > CGRAPH_FREQ_MAX)
231 e->frequency = CGRAPH_FREQ_MAX;
232 if (!e->inline_failed)
233 update_noncloned_frequencies (e->callee, freq_scale, nest);
237 /* E is expected to be an edge being inlined. Clone destination node of
238 the edge and redirect it to the new clone.
239 DUPLICATE is used for bookkeeping on whether we are actually creating new
240 clones or re-using node originally representing out-of-line function call.
242 void
243 cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
244 bool update_original)
246 HOST_WIDE_INT peak;
248 if (duplicate)
250 /* We may eliminate the need for out-of-line copy to be output.
251 In that case just go ahead and re-use it. */
252 if (!e->callee->callers->next_caller
253 && cgraph_can_remove_if_no_direct_calls_p (e->callee)
254 /* Don't reuse if more than one function shares a comdat group.
255 If the other function(s) are needed, we need to emit even
256 this function out of line. */
257 && !e->callee->same_comdat_group
258 && !cgraph_new_nodes)
260 gcc_assert (!e->callee->global.inlined_to);
261 if (e->callee->analyzed)
263 overall_size -= e->callee->global.size;
264 nfunctions_inlined++;
266 duplicate = false;
267 e->callee->local.externally_visible = false;
268 update_noncloned_frequencies (e->callee, e->frequency, e->loop_nest);
270 else
272 struct cgraph_node *n;
273 n = cgraph_clone_node (e->callee, e->callee->decl,
274 e->count, e->frequency, e->loop_nest,
275 update_original, NULL);
276 cgraph_redirect_edge_callee (e, n);
280 if (e->caller->global.inlined_to)
281 e->callee->global.inlined_to = e->caller->global.inlined_to;
282 else
283 e->callee->global.inlined_to = e->caller;
284 e->callee->global.stack_frame_offset
285 = e->caller->global.stack_frame_offset
286 + inline_summary (e->caller)->estimated_self_stack_size;
287 peak = e->callee->global.stack_frame_offset
288 + inline_summary (e->callee)->estimated_self_stack_size;
289 if (e->callee->global.inlined_to->global.estimated_stack_size < peak)
290 e->callee->global.inlined_to->global.estimated_stack_size = peak;
291 cgraph_propagate_frequency (e->callee);
293 /* Recursively clone all bodies. */
294 for (e = e->callee->callees; e; e = e->next_callee)
295 if (!e->inline_failed)
296 cgraph_clone_inlined_nodes (e, duplicate, update_original);
299 /* Mark edge E as inlined and update callgraph accordingly. UPDATE_ORIGINAL
300 specify whether profile of original function should be updated. If any new
301 indirect edges are discovered in the process, add them to NEW_EDGES, unless
302 it is NULL. Return true iff any new callgraph edges were discovered as a
303 result of inlining. */
305 static bool
306 cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
307 VEC (cgraph_edge_p, heap) **new_edges)
309 int old_size = 0, new_size = 0;
310 struct cgraph_node *to = NULL, *what;
311 struct cgraph_edge *curr = e;
312 int freq;
314 gcc_assert (e->inline_failed);
315 e->inline_failed = CIF_OK;
316 DECL_POSSIBLY_INLINED (e->callee->decl) = true;
318 cgraph_clone_inlined_nodes (e, true, update_original);
320 what = e->callee;
322 freq = e->frequency;
323 /* Now update size of caller and all functions caller is inlined into. */
324 for (;e && !e->inline_failed; e = e->caller->callers)
326 to = e->caller;
327 old_size = e->caller->global.size;
328 new_size = cgraph_estimate_size_after_inlining (1, to, what);
329 to->global.size = new_size;
330 to->global.time = cgraph_estimate_time_after_inlining (freq, to, what);
332 gcc_assert (what->global.inlined_to == to);
333 if (new_size > old_size)
334 overall_size += new_size - old_size;
335 ncalls_inlined++;
337 if (flag_indirect_inlining)
338 return ipa_propagate_indirect_call_infos (curr, new_edges);
339 else
340 return false;
343 /* Mark all calls of EDGE->CALLEE inlined into EDGE->CALLER. */
345 static void
346 cgraph_mark_inline (struct cgraph_edge *edge)
348 struct cgraph_node *to = edge->caller;
349 struct cgraph_node *what = edge->callee;
350 struct cgraph_edge *e, *next;
352 gcc_assert (!edge->call_stmt_cannot_inline_p);
353 /* Look for all calls, mark them inline and clone recursively
354 all inlined functions. */
355 for (e = what->callers; e; e = next)
357 next = e->next_caller;
358 if (e->caller == to && e->inline_failed)
360 cgraph_mark_inline_edge (e, true, NULL);
361 if (e == edge)
362 edge = next;
367 /* Estimate the growth caused by inlining NODE into all callees. */
369 static int
370 cgraph_estimate_growth (struct cgraph_node *node)
372 int growth = 0;
373 struct cgraph_edge *e;
374 bool self_recursive = false;
376 if (node->global.estimated_growth != INT_MIN)
377 return node->global.estimated_growth;
379 for (e = node->callers; e; e = e->next_caller)
381 if (e->caller == node)
382 self_recursive = true;
383 if (e->inline_failed)
384 growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
385 - e->caller->global.size);
388 /* ??? Wrong for non-trivially self recursive functions or cases where
389 we decide to not inline for different reasons, but it is not big deal
390 as in that case we will keep the body around, but we will also avoid
391 some inlining. */
392 if (cgraph_only_called_directly_p (node)
393 && !DECL_EXTERNAL (node->decl) && !self_recursive)
394 growth -= node->global.size;
396 node->global.estimated_growth = growth;
397 return growth;
400 /* Return false when inlining WHAT into TO is not good idea
401 as it would cause too large growth of function bodies.
402 When ONE_ONLY is true, assume that only one call site is going
403 to be inlined, otherwise figure out how many call sites in
404 TO calls WHAT and verify that all can be inlined.
407 static bool
408 cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
409 cgraph_inline_failed_t *reason, bool one_only)
411 int times = 0;
412 struct cgraph_edge *e;
413 int newsize;
414 int limit;
415 HOST_WIDE_INT stack_size_limit, inlined_stack;
417 if (one_only)
418 times = 1;
419 else
420 for (e = to->callees; e; e = e->next_callee)
421 if (e->callee == what)
422 times++;
424 if (to->global.inlined_to)
425 to = to->global.inlined_to;
427 /* When inlining large function body called once into small function,
428 take the inlined function as base for limiting the growth. */
429 if (inline_summary (to)->self_size > inline_summary(what)->self_size)
430 limit = inline_summary (to)->self_size;
431 else
432 limit = inline_summary (what)->self_size;
434 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
436 /* Check the size after inlining against the function limits. But allow
437 the function to shrink if it went over the limits by forced inlining. */
438 newsize = cgraph_estimate_size_after_inlining (times, to, what);
439 if (newsize >= to->global.size
440 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
441 && newsize > limit)
443 if (reason)
444 *reason = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
445 return false;
448 stack_size_limit = inline_summary (to)->estimated_self_stack_size;
450 stack_size_limit += stack_size_limit * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100;
452 inlined_stack = (to->global.stack_frame_offset
453 + inline_summary (to)->estimated_self_stack_size
454 + what->global.estimated_stack_size);
455 if (inlined_stack > stack_size_limit
456 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
458 if (reason)
459 *reason = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
460 return false;
462 return true;
465 /* Return true when function N is small enough to be inlined. */
467 static bool
468 cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
470 tree decl = n->decl;
472 if (n->local.disregard_inline_limits)
473 return true;
475 if (!flag_inline_small_functions && !DECL_DECLARED_INLINE_P (decl))
477 if (reason)
478 *reason = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
479 return false;
482 if (!n->analyzed)
484 if (reason)
485 *reason = CIF_BODY_NOT_AVAILABLE;
486 return false;
489 if (DECL_DECLARED_INLINE_P (decl))
491 if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
493 if (reason)
494 *reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
495 return false;
498 else
500 if (n->global.size >= MAX_INLINE_INSNS_AUTO)
502 if (reason)
503 *reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
504 return false;
508 return true;
511 /* Return true when inlining WHAT would create recursive inlining.
512 We call recursive inlining all cases where same function appears more than
513 once in the single recursion nest path in the inline graph. */
515 static inline bool
516 cgraph_recursive_inlining_p (struct cgraph_node *to,
517 struct cgraph_node *what,
518 cgraph_inline_failed_t *reason)
520 bool recursive;
521 if (to->global.inlined_to)
522 recursive = what->decl == to->global.inlined_to->decl;
523 else
524 recursive = what->decl == to->decl;
525 /* Marking recursive function inline has sane semantic and thus we should
526 not warn on it. */
527 if (recursive && reason)
528 *reason = (what->local.disregard_inline_limits
529 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
530 return recursive;
533 /* A cost model driving the inlining heuristics in a way so the edges with
534 smallest badness are inlined first. After each inlining is performed
535 the costs of all caller edges of nodes affected are recomputed so the
536 metrics may accurately depend on values such as number of inlinable callers
537 of the function or function body size. */
539 static int
540 cgraph_edge_badness (struct cgraph_edge *edge, bool dump)
542 gcov_type badness;
543 int growth =
544 (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
545 - edge->caller->global.size);
547 if (edge->callee->local.disregard_inline_limits)
548 return INT_MIN;
550 if (dump)
552 fprintf (dump_file, " Badness calculcation for %s -> %s\n",
553 cgraph_node_name (edge->caller),
554 cgraph_node_name (edge->callee));
555 fprintf (dump_file, " growth %i, time %i-%i, size %i-%i\n",
556 growth,
557 edge->callee->global.time,
558 inline_summary (edge->callee)->time_inlining_benefit,
559 edge->callee->global.size,
560 inline_summary (edge->callee)->size_inlining_benefit);
563 /* Always prefer inlining saving code size. */
564 if (growth <= 0)
566 badness = INT_MIN - growth;
567 if (dump)
568 fprintf (dump_file, " %i: Growth %i < 0\n", (int) badness,
569 growth);
572 /* When profiling is available, base priorities -(#calls / growth).
573 So we optimize for overall number of "executed" inlined calls. */
574 else if (max_count)
576 badness =
577 ((int)
578 ((double) edge->count * INT_MIN / max_count / (max_benefit + 1)) *
579 (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
580 if (dump)
582 fprintf (dump_file,
583 " %i (relative %f): profile info. Relative count %f"
584 " * Relative benefit %f\n",
585 (int) badness, (double) badness / INT_MIN,
586 (double) edge->count / max_count,
587 (double) (inline_summary (edge->callee)->
588 time_inlining_benefit + 1) / (max_benefit + 1));
592 /* When function local profile is available, base priorities on
593 growth / frequency, so we optimize for overall frequency of inlined
594 calls. This is not too accurate since while the call might be frequent
595 within function, the function itself is infrequent.
597 Other objective to optimize for is number of different calls inlined.
598 We add the estimated growth after inlining all functions to bias the
599 priorities slightly in this direction (so fewer times called functions
600 of the same size gets priority). */
601 else if (flag_guess_branch_prob)
603 int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
604 int benefitperc;
605 int growth_for_all;
606 badness = growth * 10000;
607 benefitperc =
608 MIN (100 * inline_summary (edge->callee)->time_inlining_benefit /
609 (edge->callee->global.time + 1) +1, 100);
610 div *= benefitperc;
613 /* Decrease badness if call is nested. */
614 /* Compress the range so we don't overflow. */
615 if (div > 10000)
616 div = 10000 + ceil_log2 (div) - 8;
617 if (div < 1)
618 div = 1;
619 if (badness > 0)
620 badness /= div;
621 growth_for_all = cgraph_estimate_growth (edge->callee);
622 badness += growth_for_all;
623 if (badness > INT_MAX)
624 badness = INT_MAX;
625 if (dump)
627 fprintf (dump_file,
628 " %i: guessed profile. frequency %i, overall growth %i,"
629 " benefit %i%%, divisor %i\n",
630 (int) badness, edge->frequency, growth_for_all, benefitperc, div);
633 /* When function local profile is not available or it does not give
634 useful information (ie frequency is zero), base the cost on
635 loop nest and overall size growth, so we optimize for overall number
636 of functions fully inlined in program. */
637 else
639 int nest = MIN (edge->loop_nest, 8);
640 badness = cgraph_estimate_growth (edge->callee) * 256;
642 /* Decrease badness if call is nested. */
643 if (badness > 0)
644 badness >>= nest;
645 else
647 badness <<= nest;
649 if (dump)
650 fprintf (dump_file, " %i: no profile. nest %i\n", (int) badness,
651 nest);
654 /* Ensure that we did not overflow in all the fixed point math above. */
655 gcc_assert (badness >= INT_MIN);
656 gcc_assert (badness <= INT_MAX - 1);
657 /* Make recursive inlining happen always after other inlining is done. */
658 if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL))
659 return badness + 1;
660 else
661 return badness;
664 /* Recompute badness of EDGE and update its key in HEAP if needed. */
665 static void
666 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
668 int badness = cgraph_edge_badness (edge, false);
669 if (edge->aux)
671 fibnode_t n = (fibnode_t) edge->aux;
672 gcc_checking_assert (n->data == edge);
674 /* fibheap_replace_key only decrease the keys.
675 When we increase the key we do not update heap
676 and instead re-insert the element once it becomes
677 a minium of heap. */
678 if (badness < n->key)
680 fibheap_replace_key (heap, n, badness);
681 gcc_checking_assert (n->key == badness);
684 else
685 edge->aux = fibheap_insert (heap, badness, edge);
688 /* Recompute heap nodes for each of caller edge. */
690 static void
691 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
692 bitmap updated_nodes)
694 struct cgraph_edge *edge;
695 cgraph_inline_failed_t failed_reason;
697 if (!node->local.inlinable
698 || node->global.inlined_to)
699 return;
700 if (bitmap_bit_p (updated_nodes, node->uid))
701 return;
702 bitmap_set_bit (updated_nodes, node->uid);
703 node->global.estimated_growth = INT_MIN;
705 /* See if there is something to do. */
706 for (edge = node->callers; edge; edge = edge->next_caller)
707 if (edge->inline_failed)
708 break;
709 if (!edge)
710 return;
711 /* Prune out edges we won't inline into anymore. */
712 if (!cgraph_default_inline_p (node, &failed_reason))
714 for (; edge; edge = edge->next_caller)
715 if (edge->aux)
717 fibheap_delete_node (heap, (fibnode_t) edge->aux);
718 edge->aux = NULL;
719 if (edge->inline_failed)
720 edge->inline_failed = failed_reason;
722 return;
725 for (; edge; edge = edge->next_caller)
726 if (edge->inline_failed)
727 update_edge_key (heap, edge);
730 /* Recompute heap nodes for each uninlined call.
731 This is used when we know that edge badnesses are going only to increase
732 (we introduced new call site) and thus all we need is to insert newly
733 created edges into heap. */
735 static void
736 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
737 bitmap updated_nodes)
739 struct cgraph_edge *e = node->callees;
740 node->global.estimated_growth = INT_MIN;
742 if (!e)
743 return;
744 while (true)
745 if (!e->inline_failed && e->callee->callees)
746 e = e->callee->callees;
747 else
749 if (e->inline_failed
750 && e->callee->local.inlinable
751 && !bitmap_bit_p (updated_nodes, e->callee->uid))
753 node->global.estimated_growth = INT_MIN;
754 /* If function becomes uninlinable, we need to remove it from the heap. */
755 if (!cgraph_default_inline_p (e->callee, &e->inline_failed))
756 update_caller_keys (heap, e->callee, updated_nodes);
757 else
758 /* Otherwise update just edge E. */
759 update_edge_key (heap, e);
761 if (e->next_callee)
762 e = e->next_callee;
763 else
767 if (e->caller == node)
768 return;
769 e = e->caller->callers;
771 while (!e->next_callee);
772 e = e->next_callee;
777 /* Recompute heap nodes for each of caller edges of each of callees.
778 Walk recursively into all inline clones. */
780 static void
781 update_all_callee_keys (fibheap_t heap, struct cgraph_node *node,
782 bitmap updated_nodes)
784 struct cgraph_edge *e = node->callees;
785 node->global.estimated_growth = INT_MIN;
787 if (!e)
788 return;
789 while (true)
790 if (!e->inline_failed && e->callee->callees)
791 e = e->callee->callees;
792 else
794 if (e->inline_failed)
795 update_caller_keys (heap, e->callee, updated_nodes);
796 if (e->next_callee)
797 e = e->next_callee;
798 else
802 if (e->caller == node)
803 return;
804 e = e->caller->callers;
806 while (!e->next_callee);
807 e = e->next_callee;
812 /* Enqueue all recursive calls from NODE into priority queue depending on
813 how likely we want to recursively inline the call. */
815 static void
816 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
817 fibheap_t heap)
819 static int priority;
820 struct cgraph_edge *e;
821 for (e = where->callees; e; e = e->next_callee)
822 if (e->callee == node)
824 /* When profile feedback is available, prioritize by expected number
825 of calls. Without profile feedback we maintain simple queue
826 to order candidates via recursive depths. */
827 fibheap_insert (heap,
828 !max_count ? priority++
829 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
832 for (e = where->callees; e; e = e->next_callee)
833 if (!e->inline_failed)
834 lookup_recursive_calls (node, e->callee, heap);
837 /* Decide on recursive inlining: in the case function has recursive calls,
838 inline until body size reaches given argument. If any new indirect edges
839 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
840 is NULL. */
842 static bool
843 cgraph_decide_recursive_inlining (struct cgraph_node *node,
844 VEC (cgraph_edge_p, heap) **new_edges)
846 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
847 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
848 int probability = PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY);
849 fibheap_t heap;
850 struct cgraph_edge *e;
851 struct cgraph_node *master_clone, *next;
852 int depth = 0;
853 int n = 0;
855 /* It does not make sense to recursively inline always-inline functions
856 as we are going to sorry() on the remaining calls anyway. */
857 if (node->local.disregard_inline_limits
858 && lookup_attribute ("always_inline", DECL_ATTRIBUTES (node->decl)))
859 return false;
861 if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION (node->decl))
862 || (!flag_inline_functions && !DECL_DECLARED_INLINE_P (node->decl)))
863 return false;
865 if (DECL_DECLARED_INLINE_P (node->decl))
867 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
868 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
871 /* Make sure that function is small enough to be considered for inlining. */
872 if (!max_depth
873 || cgraph_estimate_size_after_inlining (1, node, node) >= limit)
874 return false;
875 heap = fibheap_new ();
876 lookup_recursive_calls (node, node, heap);
877 if (fibheap_empty (heap))
879 fibheap_delete (heap);
880 return false;
883 if (dump_file)
884 fprintf (dump_file,
885 " Performing recursive inlining on %s\n",
886 cgraph_node_name (node));
888 /* We need original clone to copy around. */
889 master_clone = cgraph_clone_node (node, node->decl,
890 node->count, CGRAPH_FREQ_BASE, 1,
891 false, NULL);
892 master_clone->needed = true;
893 for (e = master_clone->callees; e; e = e->next_callee)
894 if (!e->inline_failed)
895 cgraph_clone_inlined_nodes (e, true, false);
897 /* Do the inlining and update list of recursive call during process. */
898 while (!fibheap_empty (heap)
899 && (cgraph_estimate_size_after_inlining (1, node, master_clone)
900 <= limit))
902 struct cgraph_edge *curr
903 = (struct cgraph_edge *) fibheap_extract_min (heap);
904 struct cgraph_node *cnode;
906 depth = 1;
907 for (cnode = curr->caller;
908 cnode->global.inlined_to; cnode = cnode->callers->caller)
909 if (node->decl == curr->callee->decl)
910 depth++;
911 if (depth > max_depth)
913 if (dump_file)
914 fprintf (dump_file,
915 " maximal depth reached\n");
916 continue;
919 if (max_count)
921 if (!cgraph_maybe_hot_edge_p (curr))
923 if (dump_file)
924 fprintf (dump_file, " Not inlining cold call\n");
925 continue;
927 if (curr->count * 100 / node->count < probability)
929 if (dump_file)
930 fprintf (dump_file,
931 " Probability of edge is too small\n");
932 continue;
936 if (dump_file)
938 fprintf (dump_file,
939 " Inlining call of depth %i", depth);
940 if (node->count)
942 fprintf (dump_file, " called approx. %.2f times per call",
943 (double)curr->count / node->count);
945 fprintf (dump_file, "\n");
947 cgraph_redirect_edge_callee (curr, master_clone);
948 cgraph_mark_inline_edge (curr, false, new_edges);
949 lookup_recursive_calls (node, curr->callee, heap);
950 n++;
952 if (!fibheap_empty (heap) && dump_file)
953 fprintf (dump_file, " Recursive inlining growth limit met.\n");
955 fibheap_delete (heap);
956 if (dump_file)
957 fprintf (dump_file,
958 "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
959 master_clone->global.size, node->global.size,
960 master_clone->global.time, node->global.time);
962 /* Remove master clone we used for inlining. We rely that clones inlined
963 into master clone gets queued just before master clone so we don't
964 need recursion. */
965 for (node = cgraph_nodes; node != master_clone;
966 node = next)
968 next = node->next;
969 if (node->global.inlined_to == master_clone)
970 cgraph_remove_node (node);
972 cgraph_remove_node (master_clone);
973 /* FIXME: Recursive inlining actually reduces number of calls of the
974 function. At this place we should probably walk the function and
975 inline clones and compensate the counts accordingly. This probably
976 doesn't matter much in practice. */
977 return n > 0;
980 /* Set inline_failed for all callers of given function to REASON. */
982 static void
983 cgraph_set_inline_failed (struct cgraph_node *node,
984 cgraph_inline_failed_t reason)
986 struct cgraph_edge *e;
988 if (dump_file)
989 fprintf (dump_file, "Inlining failed: %s\n",
990 cgraph_inline_failed_string (reason));
991 for (e = node->callers; e; e = e->next_caller)
992 if (e->inline_failed)
993 e->inline_failed = reason;
996 /* Given whole compilation unit estimate of INSNS, compute how large we can
997 allow the unit to grow. */
998 static int
999 compute_max_insns (int insns)
1001 int max_insns = insns;
1002 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1003 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1005 return ((HOST_WIDEST_INT) max_insns
1006 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1009 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1010 static void
1011 add_new_edges_to_heap (fibheap_t heap, VEC (cgraph_edge_p, heap) *new_edges)
1013 while (VEC_length (cgraph_edge_p, new_edges) > 0)
1015 struct cgraph_edge *edge = VEC_pop (cgraph_edge_p, new_edges);
1017 gcc_assert (!edge->aux);
1018 if (edge->callee->local.inlinable
1019 && cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1020 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1025 /* We use greedy algorithm for inlining of small functions:
1026 All inline candidates are put into prioritized heap based on estimated
1027 growth of the overall number of instructions and then update the estimates.
1029 INLINED and INLINED_CALEES are just pointers to arrays large enough
1030 to be passed to cgraph_inlined_into and cgraph_inlined_callees. */
1032 static void
1033 cgraph_decide_inlining_of_small_functions (void)
1035 struct cgraph_node *node;
1036 struct cgraph_edge *edge;
1037 cgraph_inline_failed_t failed_reason;
1038 fibheap_t heap = fibheap_new ();
1039 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1040 int min_size, max_size;
1041 VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
1043 if (flag_indirect_inlining)
1044 new_indirect_edges = VEC_alloc (cgraph_edge_p, heap, 8);
1046 if (dump_file)
1047 fprintf (dump_file, "\nDeciding on smaller functions:\n");
1049 /* Put all inline candidates into the heap. */
1051 for (node = cgraph_nodes; node; node = node->next)
1053 if (!node->local.inlinable || !node->callers)
1054 continue;
1055 if (dump_file)
1056 fprintf (dump_file, "Considering inline candidate %s.\n", cgraph_node_name (node));
1058 node->global.estimated_growth = INT_MIN;
1059 if (!cgraph_default_inline_p (node, &failed_reason))
1061 cgraph_set_inline_failed (node, failed_reason);
1062 continue;
1065 for (edge = node->callers; edge; edge = edge->next_caller)
1066 if (edge->inline_failed)
1068 gcc_assert (!edge->aux);
1069 edge->aux = fibheap_insert (heap, cgraph_edge_badness (edge, false), edge);
1073 max_size = compute_max_insns (overall_size);
1074 min_size = overall_size;
1076 while (overall_size <= max_size
1077 && !fibheap_empty (heap))
1079 int old_size = overall_size;
1080 struct cgraph_node *where, *callee;
1081 int badness = fibheap_min_key (heap);
1082 int current_badness;
1083 int growth;
1084 cgraph_inline_failed_t not_good = CIF_OK;
1086 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1087 gcc_assert (edge->aux);
1088 edge->aux = NULL;
1089 if (!edge->inline_failed)
1090 continue;
1092 /* When updating the edge costs, we only decrease badness in the keys.
1093 When the badness increase, we keep the heap as it is and re-insert
1094 key now. */
1095 current_badness = cgraph_edge_badness (edge, false);
1096 gcc_assert (current_badness >= badness);
1097 if (current_badness != badness)
1099 edge->aux = fibheap_insert (heap, current_badness, edge);
1100 continue;
1103 callee = edge->callee;
1105 growth = (cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee)
1106 - edge->caller->global.size);
1108 if (dump_file)
1110 fprintf (dump_file,
1111 "\nConsidering %s with %i size\n",
1112 cgraph_node_name (edge->callee),
1113 edge->callee->global.size);
1114 fprintf (dump_file,
1115 " to be inlined into %s in %s:%i\n"
1116 " Estimated growth after inlined into all callees is %+i insns.\n"
1117 " Estimated badness is %i, frequency %.2f.\n",
1118 cgraph_node_name (edge->caller),
1119 flag_wpa ? "unknown"
1120 : gimple_filename ((const_gimple) edge->call_stmt),
1121 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1122 cgraph_estimate_growth (edge->callee),
1123 badness,
1124 edge->frequency / (double)CGRAPH_FREQ_BASE);
1125 if (edge->count)
1126 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1127 if (dump_flags & TDF_DETAILS)
1128 cgraph_edge_badness (edge, true);
1131 /* When not having profile info ready we don't weight by any way the
1132 position of call in procedure itself. This means if call of
1133 function A from function B seems profitable to inline, the recursive
1134 call of function A in inline copy of A in B will look profitable too
1135 and we end up inlining until reaching maximal function growth. This
1136 is not good idea so prohibit the recursive inlining.
1138 ??? When the frequencies are taken into account we might not need this
1139 restriction.
1141 We need to be cureful here, in some testcases, e.g. directivec.c in
1142 libcpp, we can estimate self recursive function to have negative growth
1143 for inlining completely.
1145 if (!edge->count)
1147 where = edge->caller;
1148 while (where->global.inlined_to)
1150 if (where->decl == edge->callee->decl)
1151 break;
1152 where = where->callers->caller;
1154 if (where->global.inlined_to)
1156 edge->inline_failed
1157 = (edge->callee->local.disregard_inline_limits
1158 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1159 if (dump_file)
1160 fprintf (dump_file, " inline_failed:Recursive inlining performed only for function itself.\n");
1161 continue;
1165 if (edge->callee->local.disregard_inline_limits)
1167 else if (!cgraph_maybe_hot_edge_p (edge))
1168 not_good = CIF_UNLIKELY_CALL;
1169 else if (!flag_inline_functions
1170 && !DECL_DECLARED_INLINE_P (edge->callee->decl))
1171 not_good = CIF_NOT_DECLARED_INLINED;
1172 else if (optimize_function_for_size_p (DECL_STRUCT_FUNCTION(edge->caller->decl)))
1173 not_good = CIF_OPTIMIZING_FOR_SIZE;
1174 if (not_good && growth > 0 && cgraph_estimate_growth (edge->callee) > 0)
1176 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1177 &edge->inline_failed))
1179 edge->inline_failed = not_good;
1180 if (dump_file)
1181 fprintf (dump_file, " inline_failed:%s.\n",
1182 cgraph_inline_failed_string (edge->inline_failed));
1184 continue;
1186 if (!cgraph_default_inline_p (edge->callee, &edge->inline_failed))
1188 if (!cgraph_recursive_inlining_p (edge->caller, edge->callee,
1189 &edge->inline_failed))
1191 if (dump_file)
1192 fprintf (dump_file, " inline_failed:%s.\n",
1193 cgraph_inline_failed_string (edge->inline_failed));
1195 continue;
1197 if (!tree_can_inline_p (edge))
1199 if (dump_file)
1200 fprintf (dump_file, " inline_failed:%s.\n",
1201 cgraph_inline_failed_string (edge->inline_failed));
1202 continue;
1204 if (cgraph_recursive_inlining_p (edge->caller, edge->callee,
1205 &edge->inline_failed))
1207 where = edge->caller;
1208 if (where->global.inlined_to)
1209 where = where->global.inlined_to;
1210 if (!cgraph_decide_recursive_inlining (where,
1211 flag_indirect_inlining
1212 ? &new_indirect_edges : NULL))
1213 continue;
1214 if (flag_indirect_inlining)
1215 add_new_edges_to_heap (heap, new_indirect_edges);
1216 update_all_callee_keys (heap, where, updated_nodes);
1218 else
1220 struct cgraph_node *callee;
1221 if (edge->call_stmt_cannot_inline_p
1222 || !cgraph_check_inline_limits (edge->caller, edge->callee,
1223 &edge->inline_failed, true))
1225 if (dump_file)
1226 fprintf (dump_file, " Not inlining into %s:%s.\n",
1227 cgraph_node_name (edge->caller),
1228 cgraph_inline_failed_string (edge->inline_failed));
1229 continue;
1231 callee = edge->callee;
1232 gcc_checking_assert (!callee->global.inlined_to);
1233 cgraph_mark_inline_edge (edge, true, &new_indirect_edges);
1234 if (flag_indirect_inlining)
1235 add_new_edges_to_heap (heap, new_indirect_edges);
1237 /* We inlined last offline copy to the body. This might lead
1238 to callees of function having fewer call sites and thus they
1239 may need updating. */
1240 if (callee->global.inlined_to)
1241 update_all_callee_keys (heap, callee, updated_nodes);
1242 else
1243 update_callee_keys (heap, edge->callee, updated_nodes);
1245 where = edge->caller;
1246 if (where->global.inlined_to)
1247 where = where->global.inlined_to;
1249 /* Our profitability metric can depend on local properties
1250 such as number of inlinable calls and size of the function body.
1251 After inlining these properties might change for the function we
1252 inlined into (since it's body size changed) and for the functions
1253 called by function we inlined (since number of it inlinable callers
1254 might change). */
1255 update_caller_keys (heap, where, updated_nodes);
1257 /* We removed one call of the function we just inlined. If offline
1258 copy is still needed, be sure to update the keys. */
1259 if (callee != where && !callee->global.inlined_to)
1260 update_caller_keys (heap, callee, updated_nodes);
1261 bitmap_clear (updated_nodes);
1263 if (dump_file)
1265 fprintf (dump_file,
1266 " Inlined into %s which now has size %i and self time %i,"
1267 "net change of %+i.\n",
1268 cgraph_node_name (edge->caller),
1269 edge->caller->global.time,
1270 edge->caller->global.size,
1271 overall_size - old_size);
1273 if (min_size > overall_size)
1275 min_size = overall_size;
1276 max_size = compute_max_insns (min_size);
1278 if (dump_file)
1279 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1282 while (!fibheap_empty (heap))
1284 int badness = fibheap_min_key (heap);
1286 edge = (struct cgraph_edge *) fibheap_extract_min (heap);
1287 gcc_assert (edge->aux);
1288 edge->aux = NULL;
1289 if (!edge->inline_failed)
1290 continue;
1291 #ifdef ENABLE_CHECKING
1292 gcc_assert (cgraph_edge_badness (edge, false) >= badness);
1293 #endif
1294 if (dump_file)
1296 fprintf (dump_file,
1297 "\nSkipping %s with %i size\n",
1298 cgraph_node_name (edge->callee),
1299 edge->callee->global.size);
1300 fprintf (dump_file,
1301 " called by %s in %s:%i\n"
1302 " Estimated growth after inlined into all callees is %+i insns.\n"
1303 " Estimated badness is %i, frequency %.2f.\n",
1304 cgraph_node_name (edge->caller),
1305 flag_wpa ? "unknown"
1306 : gimple_filename ((const_gimple) edge->call_stmt),
1307 flag_wpa ? -1 : gimple_lineno ((const_gimple) edge->call_stmt),
1308 cgraph_estimate_growth (edge->callee),
1309 badness,
1310 edge->frequency / (double)CGRAPH_FREQ_BASE);
1311 if (edge->count)
1312 fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count);
1313 if (dump_flags & TDF_DETAILS)
1314 cgraph_edge_badness (edge, true);
1316 if (!edge->callee->local.disregard_inline_limits && edge->inline_failed
1317 && !cgraph_recursive_inlining_p (edge->caller, edge->callee,
1318 &edge->inline_failed))
1319 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1322 if (new_indirect_edges)
1323 VEC_free (cgraph_edge_p, heap, new_indirect_edges);
1324 fibheap_delete (heap);
1325 BITMAP_FREE (updated_nodes);
1328 /* Flatten NODE from the IPA inliner. */
1330 static void
1331 cgraph_flatten (struct cgraph_node *node)
1333 struct cgraph_edge *e;
1335 /* We shouldn't be called recursively when we are being processed. */
1336 gcc_assert (node->aux == NULL);
1338 node->aux = (void *)(size_t) INLINE_ALL;
1340 for (e = node->callees; e; e = e->next_callee)
1342 struct cgraph_node *orig_callee;
1344 if (e->call_stmt_cannot_inline_p)
1345 continue;
1347 if (!e->callee->analyzed)
1349 if (dump_file)
1350 fprintf (dump_file,
1351 "Not inlining: Function body not available.\n");
1352 continue;
1355 /* We've hit cycle? It is time to give up. */
1356 if (e->callee->aux)
1358 if (dump_file)
1359 fprintf (dump_file,
1360 "Not inlining %s into %s to avoid cycle.\n",
1361 cgraph_node_name (e->callee),
1362 cgraph_node_name (e->caller));
1363 e->inline_failed = CIF_RECURSIVE_INLINING;
1364 continue;
1367 /* When the edge is already inlined, we just need to recurse into
1368 it in order to fully flatten the leaves. */
1369 if (!e->inline_failed)
1371 cgraph_flatten (e->callee);
1372 continue;
1375 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1377 if (dump_file)
1378 fprintf (dump_file, "Not inlining: recursive call.\n");
1379 continue;
1382 if (!tree_can_inline_p (e))
1384 if (dump_file)
1385 fprintf (dump_file, "Not inlining: %s",
1386 cgraph_inline_failed_string (e->inline_failed));
1387 continue;
1390 /* Inline the edge and flatten the inline clone. Avoid
1391 recursing through the original node if the node was cloned. */
1392 if (dump_file)
1393 fprintf (dump_file, " Inlining %s into %s.\n",
1394 cgraph_node_name (e->callee),
1395 cgraph_node_name (e->caller));
1396 orig_callee = e->callee;
1397 cgraph_mark_inline_edge (e, true, NULL);
1398 if (e->callee != orig_callee)
1399 orig_callee->aux = (void *)(size_t) INLINE_ALL;
1400 cgraph_flatten (e->callee);
1401 if (e->callee != orig_callee)
1402 orig_callee->aux = NULL;
1405 node->aux = NULL;
1408 /* Decide on the inlining. We do so in the topological order to avoid
1409 expenses on updating data structures. */
1411 static unsigned int
1412 cgraph_decide_inlining (void)
1414 struct cgraph_node *node;
1415 int nnodes;
1416 struct cgraph_node **order =
1417 XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1418 int old_size = 0;
1419 int i;
1420 int initial_size = 0;
1422 cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
1423 if (in_lto_p && flag_indirect_inlining)
1424 ipa_update_after_lto_read ();
1425 if (flag_indirect_inlining)
1426 ipa_create_all_structures_for_iinln ();
1428 max_count = 0;
1429 max_benefit = 0;
1430 for (node = cgraph_nodes; node; node = node->next)
1431 if (node->analyzed)
1433 struct cgraph_edge *e;
1435 gcc_assert (inline_summary (node)->self_size == node->global.size);
1436 initial_size += node->global.size;
1437 for (e = node->callees; e; e = e->next_callee)
1438 if (max_count < e->count)
1439 max_count = e->count;
1440 if (max_benefit < inline_summary (node)->time_inlining_benefit)
1441 max_benefit = inline_summary (node)->time_inlining_benefit;
1443 gcc_assert (in_lto_p
1444 || !max_count
1445 || (profile_info && flag_branch_probabilities));
1446 overall_size = initial_size;
1448 nnodes = cgraph_postorder (order);
1450 if (dump_file)
1451 fprintf (dump_file,
1452 "\nDeciding on inlining. Starting with size %i.\n",
1453 initial_size);
1455 for (node = cgraph_nodes; node; node = node->next)
1456 node->aux = 0;
1458 if (dump_file)
1459 fprintf (dump_file, "\nFlattening functions:\n");
1461 /* In the first pass handle functions to be flattened. Do this with
1462 a priority so none of our later choices will make this impossible. */
1463 for (i = nnodes - 1; i >= 0; i--)
1465 node = order[i];
1467 /* Handle nodes to be flattened, but don't update overall unit
1468 size. Calling the incremental inliner here is lame,
1469 a simple worklist should be enough. What should be left
1470 here from the early inliner (if it runs) is cyclic cases.
1471 Ideally when processing callees we stop inlining at the
1472 entry of cycles, possibly cloning that entry point and
1473 try to flatten itself turning it into a self-recursive
1474 function. */
1475 if (lookup_attribute ("flatten",
1476 DECL_ATTRIBUTES (node->decl)) != NULL)
1478 if (dump_file)
1479 fprintf (dump_file,
1480 "Flattening %s\n", cgraph_node_name (node));
1481 cgraph_flatten (node);
1485 cgraph_decide_inlining_of_small_functions ();
1487 if (flag_inline_functions_called_once)
1489 if (dump_file)
1490 fprintf (dump_file, "\nDeciding on functions called once:\n");
1492 /* And finally decide what functions are called once. */
1493 for (i = nnodes - 1; i >= 0; i--)
1495 node = order[i];
1497 if (node->callers
1498 && !node->callers->next_caller
1499 && cgraph_only_called_directly_p (node)
1500 && node->local.inlinable
1501 && node->callers->inline_failed
1502 && node->callers->caller != node
1503 && node->callers->caller->global.inlined_to != node
1504 && !node->callers->call_stmt_cannot_inline_p
1505 && !DECL_EXTERNAL (node->decl)
1506 && !DECL_COMDAT (node->decl))
1508 cgraph_inline_failed_t reason;
1509 old_size = overall_size;
1510 if (dump_file)
1512 fprintf (dump_file,
1513 "\nConsidering %s size %i.\n",
1514 cgraph_node_name (node), node->global.size);
1515 fprintf (dump_file,
1516 " Called once from %s %i insns.\n",
1517 cgraph_node_name (node->callers->caller),
1518 node->callers->caller->global.size);
1521 if (cgraph_check_inline_limits (node->callers->caller, node,
1522 &reason, false))
1524 struct cgraph_node *caller = node->callers->caller;
1525 cgraph_mark_inline (node->callers);
1526 if (dump_file)
1527 fprintf (dump_file,
1528 " Inlined into %s which now has %i size"
1529 " for a net change of %+i size.\n",
1530 cgraph_node_name (caller),
1531 caller->global.size,
1532 overall_size - old_size);
1534 else
1536 if (dump_file)
1537 fprintf (dump_file,
1538 " Not inlining: %s.\n",
1539 cgraph_inline_failed_string (reason));
1545 /* Free ipa-prop structures if they are no longer needed. */
1546 if (flag_indirect_inlining)
1547 ipa_free_all_structures_after_iinln ();
1549 if (dump_file)
1550 fprintf (dump_file,
1551 "\nInlined %i calls, eliminated %i functions, "
1552 "size %i turned to %i size.\n\n",
1553 ncalls_inlined, nfunctions_inlined, initial_size,
1554 overall_size);
1555 free (order);
1556 return 0;
1559 /* Return true when N is leaf function. Accept cheap (pure&const) builtins
1560 in leaf functions. */
1561 static bool
1562 leaf_node_p (struct cgraph_node *n)
1564 struct cgraph_edge *e;
1565 for (e = n->callees; e; e = e->next_callee)
1566 if (!DECL_BUILT_IN (e->callee->decl)
1567 || (!TREE_READONLY (e->callee->decl)
1568 || DECL_PURE_P (e->callee->decl)))
1569 return false;
1570 return true;
1573 /* Decide on the inlining. We do so in the topological order to avoid
1574 expenses on updating data structures. */
1576 static bool
1577 cgraph_decide_inlining_incrementally (struct cgraph_node *node,
1578 enum inlining_mode mode)
1580 struct cgraph_edge *e;
1581 bool inlined = false;
1582 cgraph_inline_failed_t failed_reason;
1584 #ifdef ENABLE_CHECKING
1585 verify_cgraph_node (node);
1586 #endif
1588 if (mode != INLINE_ALWAYS_INLINE && mode != INLINE_SIZE_NORECURSIVE
1589 && lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) != NULL)
1591 if (dump_file)
1592 fprintf (dump_file, "Incrementally flattening %s\n",
1593 cgraph_node_name (node));
1594 mode = INLINE_ALL;
1597 /* First of all look for always inline functions. */
1598 if (mode != INLINE_SIZE_NORECURSIVE)
1599 for (e = node->callees; e; e = e->next_callee)
1601 if (!e->callee->local.disregard_inline_limits
1602 && (mode != INLINE_ALL || !e->callee->local.inlinable))
1603 continue;
1604 if (e->call_stmt_cannot_inline_p)
1605 continue;
1606 if (dump_file)
1607 fprintf (dump_file,
1608 "Considering to always inline inline candidate %s.\n",
1609 cgraph_node_name (e->callee));
1610 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1612 if (dump_file)
1613 fprintf (dump_file, "Not inlining: recursive call.\n");
1614 continue;
1616 if (!tree_can_inline_p (e))
1618 if (dump_file)
1619 fprintf (dump_file,
1620 "Not inlining: %s",
1621 cgraph_inline_failed_string (e->inline_failed));
1622 continue;
1624 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1625 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1627 if (dump_file)
1628 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1629 continue;
1631 if (!e->callee->analyzed)
1633 if (dump_file)
1634 fprintf (dump_file,
1635 "Not inlining: Function body no longer available.\n");
1636 continue;
1639 if (dump_file)
1640 fprintf (dump_file, " Inlining %s into %s.\n",
1641 cgraph_node_name (e->callee),
1642 cgraph_node_name (e->caller));
1643 cgraph_mark_inline (e);
1644 inlined = true;
1647 /* Now do the automatic inlining. */
1648 if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE
1649 /* Never inline regular functions into always-inline functions
1650 during incremental inlining. */
1651 && !node->local.disregard_inline_limits)
1653 bitmap visited = BITMAP_ALLOC (NULL);
1654 for (e = node->callees; e; e = e->next_callee)
1656 int allowed_growth = 0;
1657 if (!e->callee->local.inlinable
1658 || !e->inline_failed
1659 || e->callee->local.disregard_inline_limits)
1660 continue;
1661 /* We are inlining a function to all call-sites in node
1662 or to none. So visit each candidate only once. */
1663 if (!bitmap_set_bit (visited, e->callee->uid))
1664 continue;
1665 if (dump_file)
1666 fprintf (dump_file, "Considering inline candidate %s.\n",
1667 cgraph_node_name (e->callee));
1668 if (cgraph_recursive_inlining_p (node, e->callee, &e->inline_failed))
1670 if (dump_file)
1671 fprintf (dump_file, "Not inlining: recursive call.\n");
1672 continue;
1674 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
1675 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->callee->decl)))
1677 if (dump_file)
1678 fprintf (dump_file,
1679 "Not inlining: SSA form does not match.\n");
1680 continue;
1683 if (cgraph_maybe_hot_edge_p (e) && leaf_node_p (e->callee)
1684 && optimize_function_for_speed_p (cfun))
1685 allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
1687 /* When the function body would grow and inlining the function
1688 won't eliminate the need for offline copy of the function,
1689 don't inline. */
1690 if (((mode == INLINE_SIZE || mode == INLINE_SIZE_NORECURSIVE)
1691 || (!flag_inline_functions
1692 && !DECL_DECLARED_INLINE_P (e->callee->decl)))
1693 && (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
1694 > e->caller->global.size + allowed_growth)
1695 && cgraph_estimate_growth (e->callee) > allowed_growth)
1697 if (dump_file)
1698 fprintf (dump_file,
1699 "Not inlining: code size would grow by %i.\n",
1700 cgraph_estimate_size_after_inlining (1, e->caller,
1701 e->callee)
1702 - e->caller->global.size);
1703 continue;
1705 if (!cgraph_check_inline_limits (node, e->callee, &e->inline_failed,
1706 false)
1707 || e->call_stmt_cannot_inline_p)
1709 if (dump_file)
1710 fprintf (dump_file, "Not inlining: %s.\n",
1711 cgraph_inline_failed_string (e->inline_failed));
1712 continue;
1714 if (!e->callee->analyzed)
1716 if (dump_file)
1717 fprintf (dump_file,
1718 "Not inlining: Function body no longer available.\n");
1719 continue;
1721 if (!tree_can_inline_p (e))
1723 if (dump_file)
1724 fprintf (dump_file,
1725 "Not inlining: %s.",
1726 cgraph_inline_failed_string (e->inline_failed));
1727 continue;
1729 if (cgraph_default_inline_p (e->callee, &failed_reason))
1731 if (dump_file)
1732 fprintf (dump_file, " Inlining %s into %s.\n",
1733 cgraph_node_name (e->callee),
1734 cgraph_node_name (e->caller));
1735 cgraph_mark_inline (e);
1736 inlined = true;
1739 BITMAP_FREE (visited);
1741 return inlined;
1744 /* Because inlining might remove no-longer reachable nodes, we need to
1745 keep the array visible to garbage collector to avoid reading collected
1746 out nodes. */
1747 static int nnodes;
1748 static GTY ((length ("nnodes"))) struct cgraph_node **order;
1750 /* Do inlining of small functions. Doing so early helps profiling and other
1751 passes to be somewhat more effective and avoids some code duplication in
1752 later real inlining pass for testcases with very many function calls. */
1753 static unsigned int
1754 cgraph_early_inlining (void)
1756 struct cgraph_node *node = cgraph_node (current_function_decl);
1757 unsigned int todo = 0;
1758 int iterations = 0;
1760 if (seen_error ())
1761 return 0;
1763 if (!optimize
1764 || flag_no_inline
1765 || !flag_early_inlining)
1767 /* When not optimizing or not inlining inline only always-inline
1768 functions. */
1769 cgraph_decide_inlining_incrementally (node, INLINE_ALWAYS_INLINE);
1770 timevar_push (TV_INTEGRATION);
1771 todo |= optimize_inline_calls (current_function_decl);
1772 timevar_pop (TV_INTEGRATION);
1774 else
1776 if (lookup_attribute ("flatten",
1777 DECL_ATTRIBUTES (node->decl)) != NULL)
1779 if (dump_file)
1780 fprintf (dump_file,
1781 "Flattening %s\n", cgraph_node_name (node));
1782 cgraph_flatten (node);
1783 timevar_push (TV_INTEGRATION);
1784 todo |= optimize_inline_calls (current_function_decl);
1785 timevar_pop (TV_INTEGRATION);
1787 /* We iterate incremental inlining to get trivial cases of indirect
1788 inlining. */
1789 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
1790 && cgraph_decide_inlining_incrementally (node,
1791 iterations
1792 ? INLINE_SIZE_NORECURSIVE
1793 : INLINE_SIZE))
1795 timevar_push (TV_INTEGRATION);
1796 todo |= optimize_inline_calls (current_function_decl);
1797 iterations++;
1798 timevar_pop (TV_INTEGRATION);
1800 if (dump_file)
1801 fprintf (dump_file, "Iterations: %i\n", iterations);
1804 cfun->always_inline_functions_inlined = true;
1806 return todo;
1809 struct gimple_opt_pass pass_early_inline =
1812 GIMPLE_PASS,
1813 "einline", /* name */
1814 NULL, /* gate */
1815 cgraph_early_inlining, /* execute */
1816 NULL, /* sub */
1817 NULL, /* next */
1818 0, /* static_pass_number */
1819 TV_INLINE_HEURISTICS, /* tv_id */
1820 0, /* properties_required */
1821 0, /* properties_provided */
1822 0, /* properties_destroyed */
1823 0, /* todo_flags_start */
1824 TODO_dump_func /* todo_flags_finish */
1828 /* When inlining shall be performed. */
1829 static bool
1830 cgraph_gate_ipa_early_inlining (void)
1832 return (flag_early_inlining
1833 && !in_lto_p
1834 && (flag_branch_probabilities || flag_test_coverage
1835 || profile_arc_flag));
1838 /* IPA pass wrapper for early inlining pass. We need to run early inlining
1839 before tree profiling so we have stand alone IPA pass for doing so. */
1840 struct simple_ipa_opt_pass pass_ipa_early_inline =
1843 SIMPLE_IPA_PASS,
1844 "einline_ipa", /* name */
1845 cgraph_gate_ipa_early_inlining, /* gate */
1846 NULL, /* execute */
1847 NULL, /* sub */
1848 NULL, /* next */
1849 0, /* static_pass_number */
1850 TV_INLINE_HEURISTICS, /* tv_id */
1851 0, /* properties_required */
1852 0, /* properties_provided */
1853 0, /* properties_destroyed */
1854 0, /* todo_flags_start */
1855 TODO_dump_cgraph /* todo_flags_finish */
1859 /* See if statement might disappear after inlining. We are not terribly
1860 sophisficated, basically looking for simple abstraction penalty wrappers. */
1862 static bool
1863 likely_eliminated_by_inlining_p (gimple stmt)
1865 enum gimple_code code = gimple_code (stmt);
1866 switch (code)
1868 case GIMPLE_RETURN:
1869 return true;
1870 case GIMPLE_ASSIGN:
1871 if (gimple_num_ops (stmt) != 2)
1872 return false;
1874 /* Casts of parameters, loads from parameters passed by reference
1875 and stores to return value or parameters are probably free after
1876 inlining. */
1877 if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
1878 || gimple_assign_rhs_code (stmt) == NOP_EXPR
1879 || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
1880 || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
1882 tree rhs = gimple_assign_rhs1 (stmt);
1883 tree lhs = gimple_assign_lhs (stmt);
1884 tree inner_rhs = rhs;
1885 tree inner_lhs = lhs;
1886 bool rhs_free = false;
1887 bool lhs_free = false;
1889 while (handled_component_p (inner_lhs)
1890 || TREE_CODE (inner_lhs) == MEM_REF)
1891 inner_lhs = TREE_OPERAND (inner_lhs, 0);
1892 while (handled_component_p (inner_rhs)
1893 || TREE_CODE (inner_rhs) == ADDR_EXPR
1894 || TREE_CODE (inner_rhs) == MEM_REF)
1895 inner_rhs = TREE_OPERAND (inner_rhs, 0);
1898 if (TREE_CODE (inner_rhs) == PARM_DECL
1899 || (TREE_CODE (inner_rhs) == SSA_NAME
1900 && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
1901 && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
1902 rhs_free = true;
1903 if (rhs_free && is_gimple_reg (lhs))
1904 lhs_free = true;
1905 if (((TREE_CODE (inner_lhs) == PARM_DECL
1906 || (TREE_CODE (inner_lhs) == SSA_NAME
1907 && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
1908 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
1909 && inner_lhs != lhs)
1910 || TREE_CODE (inner_lhs) == RESULT_DECL
1911 || (TREE_CODE (inner_lhs) == SSA_NAME
1912 && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
1913 lhs_free = true;
1914 if (lhs_free
1915 && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
1916 rhs_free = true;
1917 if (lhs_free && rhs_free)
1918 return true;
1920 return false;
1921 default:
1922 return false;
1926 /* Compute function body size parameters for NODE. */
1928 static void
1929 estimate_function_body_sizes (struct cgraph_node *node)
1931 gcov_type time = 0;
1932 gcov_type time_inlining_benefit = 0;
1933 int size = 0;
1934 int size_inlining_benefit = 0;
1935 basic_block bb;
1936 gimple_stmt_iterator bsi;
1937 struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
1938 tree arg;
1939 int freq;
1940 tree funtype = TREE_TYPE (node->decl);
1942 if (dump_file)
1943 fprintf (dump_file, "Analyzing function body size: %s\n",
1944 cgraph_node_name (node));
1946 gcc_assert (my_function && my_function->cfg);
1947 FOR_EACH_BB_FN (bb, my_function)
1949 freq = compute_call_stmt_bb_frequency (node->decl, bb);
1950 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1952 gimple stmt = gsi_stmt (bsi);
1953 int this_size = estimate_num_insns (stmt, &eni_size_weights);
1954 int this_time = estimate_num_insns (stmt, &eni_time_weights);
1956 if (dump_file && (dump_flags & TDF_DETAILS))
1958 fprintf (dump_file, " freq:%6i size:%3i time:%3i ",
1959 freq, this_size, this_time);
1960 print_gimple_stmt (dump_file, stmt, 0, 0);
1962 this_time *= freq;
1963 time += this_time;
1964 size += this_size;
1965 if (likely_eliminated_by_inlining_p (stmt))
1967 size_inlining_benefit += this_size;
1968 time_inlining_benefit += this_time;
1969 if (dump_file && (dump_flags & TDF_DETAILS))
1970 fprintf (dump_file, " Likely eliminated\n");
1972 gcc_assert (time >= 0);
1973 gcc_assert (size >= 0);
1976 time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
1977 time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
1978 / CGRAPH_FREQ_BASE);
1979 if (dump_file)
1980 fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
1981 (int)time, (int)time_inlining_benefit,
1982 size, size_inlining_benefit);
1983 time_inlining_benefit += eni_time_weights.call_cost;
1984 size_inlining_benefit += eni_size_weights.call_cost;
1985 if (!VOID_TYPE_P (TREE_TYPE (funtype)))
1987 int cost = estimate_move_cost (TREE_TYPE (funtype));
1988 time_inlining_benefit += cost;
1989 size_inlining_benefit += cost;
1991 for (arg = DECL_ARGUMENTS (node->decl); arg; arg = TREE_CHAIN (arg))
1992 if (!VOID_TYPE_P (TREE_TYPE (arg)))
1994 int cost = estimate_move_cost (TREE_TYPE (arg));
1995 time_inlining_benefit += cost;
1996 size_inlining_benefit += cost;
1998 if (time_inlining_benefit > MAX_TIME)
1999 time_inlining_benefit = MAX_TIME;
2000 if (time > MAX_TIME)
2001 time = MAX_TIME;
2002 inline_summary (node)->self_time = time;
2003 inline_summary (node)->self_size = size;
2004 if (dump_file)
2005 fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
2006 (int)time, (int)time_inlining_benefit,
2007 size, size_inlining_benefit);
2008 inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
2009 inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
2012 /* Compute parameters of functions used by inliner. */
2013 unsigned int
2014 compute_inline_parameters (struct cgraph_node *node)
2016 HOST_WIDE_INT self_stack_size;
2018 gcc_assert (!node->global.inlined_to);
2020 /* Estimate the stack size for the function. But not at -O0
2021 because estimated_stack_frame_size is a quadratic problem. */
2022 self_stack_size = optimize ? estimated_stack_frame_size () : 0;
2023 inline_summary (node)->estimated_self_stack_size = self_stack_size;
2024 node->global.estimated_stack_size = self_stack_size;
2025 node->global.stack_frame_offset = 0;
2027 /* Can this function be inlined at all? */
2028 node->local.inlinable = tree_inlinable_function_p (node->decl);
2029 if (node->local.inlinable && !node->local.disregard_inline_limits)
2030 node->local.disregard_inline_limits
2031 = DECL_DISREGARD_INLINE_LIMITS (node->decl);
2032 estimate_function_body_sizes (node);
2033 /* Inlining characteristics are maintained by the cgraph_mark_inline. */
2034 node->global.time = inline_summary (node)->self_time;
2035 node->global.size = inline_summary (node)->self_size;
2036 return 0;
2040 /* Compute parameters of functions used by inliner using
2041 current_function_decl. */
2042 static unsigned int
2043 compute_inline_parameters_for_current (void)
2045 compute_inline_parameters (cgraph_node (current_function_decl));
2046 return 0;
2049 struct gimple_opt_pass pass_inline_parameters =
2052 GIMPLE_PASS,
2053 "inline_param", /* name */
2054 NULL, /* gate */
2055 compute_inline_parameters_for_current,/* execute */
2056 NULL, /* sub */
2057 NULL, /* next */
2058 0, /* static_pass_number */
2059 TV_INLINE_HEURISTICS, /* tv_id */
2060 0, /* properties_required */
2061 0, /* properties_provided */
2062 0, /* properties_destroyed */
2063 0, /* todo_flags_start */
2064 0 /* todo_flags_finish */
2068 /* This function performs intraprocedural analyzis in NODE that is required to
2069 inline indirect calls. */
2070 static void
2071 inline_indirect_intraprocedural_analysis (struct cgraph_node *node)
2073 ipa_analyze_node (node);
2074 if (dump_file && (dump_flags & TDF_DETAILS))
2076 ipa_print_node_params (dump_file, node);
2077 ipa_print_node_jump_functions (dump_file, node);
2081 /* Note function body size. */
2082 static void
2083 analyze_function (struct cgraph_node *node)
2085 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
2086 current_function_decl = node->decl;
2088 compute_inline_parameters (node);
2089 if (flag_indirect_inlining)
2090 inline_indirect_intraprocedural_analysis (node);
2092 current_function_decl = NULL;
2093 pop_cfun ();
2096 /* Called when new function is inserted to callgraph late. */
2097 static void
2098 add_new_function (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
2100 analyze_function (node);
2103 /* Note function body size. */
2104 static void
2105 inline_generate_summary (void)
2107 struct cgraph_node *node;
2109 function_insertion_hook_holder =
2110 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2112 if (flag_indirect_inlining)
2114 ipa_register_cgraph_hooks ();
2115 ipa_check_create_node_params ();
2116 ipa_check_create_edge_args ();
2119 for (node = cgraph_nodes; node; node = node->next)
2120 if (node->analyzed)
2121 analyze_function (node);
2123 return;
2126 /* Apply inline plan to function. */
2127 static unsigned int
2128 inline_transform (struct cgraph_node *node)
2130 unsigned int todo = 0;
2131 struct cgraph_edge *e;
2132 bool inline_p = false;
2134 /* FIXME: Currently the passmanager is adding inline transform more than once to some
2135 clones. This needs revisiting after WPA cleanups. */
2136 if (cfun->after_inlining)
2137 return 0;
2139 /* We might need the body of this function so that we can expand
2140 it inline somewhere else. */
2141 if (cgraph_preserve_function_body_p (node->decl))
2142 save_inline_function_body (node);
2144 for (e = node->callees; e; e = e->next_callee)
2146 cgraph_redirect_edge_call_stmt_to_callee (e);
2147 if (!e->inline_failed || warn_inline)
2148 inline_p = true;
2151 if (inline_p)
2153 timevar_push (TV_INTEGRATION);
2154 todo = optimize_inline_calls (current_function_decl);
2155 timevar_pop (TV_INTEGRATION);
2157 cfun->always_inline_functions_inlined = true;
2158 cfun->after_inlining = true;
2159 return todo | execute_fixup_cfg ();
2162 /* Read inline summary. Jump functions are shared among ipa-cp
2163 and inliner, so when ipa-cp is active, we don't need to write them
2164 twice. */
2166 static void
2167 inline_read_summary (void)
2169 if (flag_indirect_inlining)
2171 ipa_register_cgraph_hooks ();
2172 if (!flag_ipa_cp)
2173 ipa_prop_read_jump_functions ();
2175 function_insertion_hook_holder =
2176 cgraph_add_function_insertion_hook (&add_new_function, NULL);
2179 /* Write inline summary for node in SET.
2180 Jump functions are shared among ipa-cp and inliner, so when ipa-cp is
2181 active, we don't need to write them twice. */
2183 static void
2184 inline_write_summary (cgraph_node_set set,
2185 varpool_node_set vset ATTRIBUTE_UNUSED)
2187 if (flag_indirect_inlining && !flag_ipa_cp)
2188 ipa_prop_write_jump_functions (set);
2191 /* When to run IPA inlining. Inlining of always-inline functions
2192 happens during early inlining. */
2194 static bool
2195 gate_cgraph_decide_inlining (void)
2197 /* ??? We'd like to skip this if not optimizing or not inlining as
2198 all always-inline functions have been processed by early
2199 inlining already. But this at least breaks EH with C++ as
2200 we need to unconditionally run fixup_cfg even at -O0.
2201 So leave it on unconditionally for now. */
2202 return 1;
2205 struct ipa_opt_pass_d pass_ipa_inline =
2208 IPA_PASS,
2209 "inline", /* name */
2210 gate_cgraph_decide_inlining, /* gate */
2211 cgraph_decide_inlining, /* execute */
2212 NULL, /* sub */
2213 NULL, /* next */
2214 0, /* static_pass_number */
2215 TV_INLINE_HEURISTICS, /* tv_id */
2216 0, /* properties_required */
2217 0, /* properties_provided */
2218 0, /* properties_destroyed */
2219 TODO_remove_functions, /* todo_flags_finish */
2220 TODO_dump_cgraph | TODO_dump_func
2221 | TODO_remove_functions | TODO_ggc_collect /* todo_flags_finish */
2223 inline_generate_summary, /* generate_summary */
2224 inline_write_summary, /* write_summary */
2225 inline_read_summary, /* read_summary */
2226 NULL, /* write_optimization_summary */
2227 NULL, /* read_optimization_summary */
2228 NULL, /* stmt_fixup */
2229 0, /* TODOs */
2230 inline_transform, /* function_transform */
2231 NULL, /* variable_transform */
2235 #include "gt-ipa-inline.h"