PR c++/83720 - ICE with lambda and LTO.
[official-gcc.git] / gcc / ipa-inline-analysis.c
blobbcdca5b13aa4d37f87a18352891e7e12ae8cc29f
1 /* Analysis used by inlining decision heuristics.
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "alloc-pool.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "tree-streamer.h"
31 #include "cgraph.h"
32 #include "diagnostic.h"
33 #include "fold-const.h"
34 #include "print-tree.h"
35 #include "tree-inline.h"
36 #include "gimple-pretty-print.h"
37 #include "params.h"
38 #include "cfganal.h"
39 #include "gimple-iterator.h"
40 #include "tree-cfg.h"
41 #include "tree-ssa-loop-niter.h"
42 #include "tree-ssa-loop.h"
43 #include "symbol-summary.h"
44 #include "ipa-prop.h"
45 #include "ipa-fnsummary.h"
46 #include "ipa-inline.h"
47 #include "cfgloop.h"
48 #include "tree-scalar-evolution.h"
49 #include "ipa-utils.h"
50 #include "cfgexpand.h"
51 #include "gimplify.h"
53 /* Cached node/edge growths. */
54 vec<edge_growth_cache_entry> edge_growth_cache;
55 static struct cgraph_edge_hook_list *edge_removal_hook_holder;
58 /* Give initial reasons why inlining would fail on EDGE. This gets either
59 nullified or usually overwritten by more precise reasons later. */
61 void
62 initialize_inline_failed (struct cgraph_edge *e)
64 struct cgraph_node *callee = e->callee;
66 if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
67 && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
69 else if (e->indirect_unknown_callee)
70 e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
71 else if (!callee->definition)
72 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
73 else if (callee->local.redefined_extern_inline)
74 e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
75 else
76 e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
77 gcc_checking_assert (!e->call_stmt_cannot_inline_p
78 || cgraph_inline_failed_type (e->inline_failed)
79 == CIF_FINAL_ERROR);
83 /* Keep edge cache consistent across edge removal. */
85 static void
86 inline_edge_removal_hook (struct cgraph_edge *edge,
87 void *data ATTRIBUTE_UNUSED)
89 reset_edge_growth_cache (edge);
93 /* Initialize growth caches. */
95 void
96 initialize_growth_caches (void)
98 if (!edge_removal_hook_holder)
99 edge_removal_hook_holder =
100 symtab->add_edge_removal_hook (&inline_edge_removal_hook, NULL);
101 if (symtab->edges_max_uid)
102 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
106 /* Free growth caches. */
108 void
109 free_growth_caches (void)
111 if (edge_removal_hook_holder)
113 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
114 edge_removal_hook_holder = NULL;
116 edge_growth_cache.release ();
119 /* Return hints derrived from EDGE. */
122 simple_edge_hints (struct cgraph_edge *edge)
124 int hints = 0;
125 struct cgraph_node *to = (edge->caller->global.inlined_to
126 ? edge->caller->global.inlined_to : edge->caller);
127 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
128 if (ipa_fn_summaries->get (to)->scc_no
129 && ipa_fn_summaries->get (to)->scc_no
130 == ipa_fn_summaries->get (callee)->scc_no
131 && !edge->recursive_p ())
132 hints |= INLINE_HINT_same_scc;
134 if (callee->lto_file_data && edge->caller->lto_file_data
135 && edge->caller->lto_file_data != callee->lto_file_data
136 && !callee->merged_comdat && !callee->icf_merged)
137 hints |= INLINE_HINT_cross_module;
139 return hints;
142 /* Estimate the time cost for the caller when inlining EDGE.
143 Only to be called via estimate_edge_time, that handles the
144 caching mechanism.
146 When caching, also update the cache entry. Compute both time and
147 size, since we always need both metrics eventually. */
149 sreal
150 do_estimate_edge_time (struct cgraph_edge *edge)
152 sreal time, nonspec_time;
153 int size;
154 ipa_hints hints;
155 struct cgraph_node *callee;
156 clause_t clause, nonspec_clause;
157 vec<tree> known_vals;
158 vec<ipa_polymorphic_call_context> known_contexts;
159 vec<ipa_agg_jump_function_p> known_aggs;
160 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
161 int min_size;
163 callee = edge->callee->ultimate_alias_target ();
165 gcc_checking_assert (edge->inline_failed);
166 evaluate_properties_for_edge (edge, true,
167 &clause, &nonspec_clause, &known_vals,
168 &known_contexts, &known_aggs);
169 estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
170 known_contexts, known_aggs, &size, &min_size,
171 &time, &nonspec_time, &hints, es->param);
173 /* When we have profile feedback, we can quite safely identify hot
174 edges and for those we disable size limits. Don't do that when
175 probability that caller will call the callee is low however, since it
176 may hurt optimization of the caller's hot path. */
177 if (edge->count.ipa ().initialized_p () && edge->maybe_hot_p ()
178 && (edge->count.ipa ().apply_scale (2, 1)
179 > (edge->caller->global.inlined_to
180 ? edge->caller->global.inlined_to->count.ipa ()
181 : edge->caller->count.ipa ())))
182 hints |= INLINE_HINT_known_hot;
184 known_vals.release ();
185 known_contexts.release ();
186 known_aggs.release ();
187 gcc_checking_assert (size >= 0);
188 gcc_checking_assert (time >= 0);
190 /* When caching, update the cache entry. */
191 if (edge_growth_cache.exists ())
193 ipa_fn_summaries->get (edge->callee)->min_size = min_size;
194 if ((int) edge_growth_cache.length () <= edge->uid)
195 edge_growth_cache.safe_grow_cleared (symtab->edges_max_uid);
196 edge_growth_cache[edge->uid].time = time;
197 edge_growth_cache[edge->uid].nonspec_time = nonspec_time;
199 edge_growth_cache[edge->uid].size = size + (size >= 0);
200 hints |= simple_edge_hints (edge);
201 edge_growth_cache[edge->uid].hints = hints + 1;
203 return time;
207 /* Return estimated callee growth after inlining EDGE.
208 Only to be called via estimate_edge_size. */
211 do_estimate_edge_size (struct cgraph_edge *edge)
213 int size;
214 struct cgraph_node *callee;
215 clause_t clause, nonspec_clause;
216 vec<tree> known_vals;
217 vec<ipa_polymorphic_call_context> known_contexts;
218 vec<ipa_agg_jump_function_p> known_aggs;
220 /* When we do caching, use do_estimate_edge_time to populate the entry. */
222 if (edge_growth_cache.exists ())
224 do_estimate_edge_time (edge);
225 size = edge_growth_cache[edge->uid].size;
226 gcc_checking_assert (size);
227 return size - (size > 0);
230 callee = edge->callee->ultimate_alias_target ();
232 /* Early inliner runs without caching, go ahead and do the dirty work. */
233 gcc_checking_assert (edge->inline_failed);
234 evaluate_properties_for_edge (edge, true,
235 &clause, &nonspec_clause,
236 &known_vals, &known_contexts,
237 &known_aggs);
238 estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
239 known_contexts, known_aggs, &size, NULL, NULL,
240 NULL, NULL, vNULL);
241 known_vals.release ();
242 known_contexts.release ();
243 known_aggs.release ();
244 return size;
248 /* Estimate the growth of the caller when inlining EDGE.
249 Only to be called via estimate_edge_size. */
251 ipa_hints
252 do_estimate_edge_hints (struct cgraph_edge *edge)
254 ipa_hints hints;
255 struct cgraph_node *callee;
256 clause_t clause, nonspec_clause;
257 vec<tree> known_vals;
258 vec<ipa_polymorphic_call_context> known_contexts;
259 vec<ipa_agg_jump_function_p> known_aggs;
261 /* When we do caching, use do_estimate_edge_time to populate the entry. */
263 if (edge_growth_cache.exists ())
265 do_estimate_edge_time (edge);
266 hints = edge_growth_cache[edge->uid].hints;
267 gcc_checking_assert (hints);
268 return hints - 1;
271 callee = edge->callee->ultimate_alias_target ();
273 /* Early inliner runs without caching, go ahead and do the dirty work. */
274 gcc_checking_assert (edge->inline_failed);
275 evaluate_properties_for_edge (edge, true,
276 &clause, &nonspec_clause,
277 &known_vals, &known_contexts,
278 &known_aggs);
279 estimate_node_size_and_time (callee, clause, nonspec_clause, known_vals,
280 known_contexts, known_aggs, NULL, NULL,
281 NULL, NULL, &hints, vNULL);
282 known_vals.release ();
283 known_contexts.release ();
284 known_aggs.release ();
285 hints |= simple_edge_hints (edge);
286 return hints;
289 /* Estimate the size of NODE after inlining EDGE which should be an
290 edge to either NODE or a call inlined into NODE. */
293 estimate_size_after_inlining (struct cgraph_node *node,
294 struct cgraph_edge *edge)
296 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
297 if (!es->predicate || *es->predicate != false)
299 int size = ipa_fn_summaries->get (node)->size + estimate_edge_growth (edge);
300 gcc_assert (size >= 0);
301 return size;
303 return ipa_fn_summaries->get (node)->size;
307 struct growth_data
309 struct cgraph_node *node;
310 bool self_recursive;
311 bool uninlinable;
312 int growth;
316 /* Worker for do_estimate_growth. Collect growth for all callers. */
318 static bool
319 do_estimate_growth_1 (struct cgraph_node *node, void *data)
321 struct cgraph_edge *e;
322 struct growth_data *d = (struct growth_data *) data;
324 for (e = node->callers; e; e = e->next_caller)
326 gcc_checking_assert (e->inline_failed);
328 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR
329 || !opt_for_fn (e->caller->decl, optimize))
331 d->uninlinable = true;
332 continue;
335 if (e->recursive_p ())
337 d->self_recursive = true;
338 continue;
340 d->growth += estimate_edge_growth (e);
342 return false;
346 /* Estimate the growth caused by inlining NODE into all callees. */
349 estimate_growth (struct cgraph_node *node)
351 struct growth_data d = { node, false, false, 0 };
352 struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
354 node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
356 /* For self recursive functions the growth estimation really should be
357 infinity. We don't want to return very large values because the growth
358 plays various roles in badness computation fractions. Be sure to not
359 return zero or negative growths. */
360 if (d.self_recursive)
361 d.growth = d.growth < info->size ? info->size : d.growth;
362 else if (DECL_EXTERNAL (node->decl) || d.uninlinable)
364 else
366 if (node->will_be_removed_from_program_if_no_direct_calls_p ())
367 d.growth -= info->size;
368 /* COMDAT functions are very often not shared across multiple units
369 since they come from various template instantiations.
370 Take this into account. */
371 else if (DECL_COMDAT (node->decl)
372 && node->can_remove_if_no_direct_calls_p ())
373 d.growth -= (info->size
374 * (100 - PARAM_VALUE (PARAM_COMDAT_SHARING_PROBABILITY))
375 + 50) / 100;
378 return d.growth;
381 /* Verify if there are fewer than MAX_CALLERS. */
383 static bool
384 check_callers (cgraph_node *node, int *max_callers)
386 ipa_ref *ref;
388 if (!node->can_remove_if_no_direct_calls_and_refs_p ())
389 return true;
391 for (cgraph_edge *e = node->callers; e; e = e->next_caller)
393 (*max_callers)--;
394 if (!*max_callers
395 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
396 return true;
399 FOR_EACH_ALIAS (node, ref)
400 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), max_callers))
401 return true;
403 return false;
407 /* Make cheap estimation if growth of NODE is likely positive knowing
408 EDGE_GROWTH of one particular edge.
409 We assume that most of other edges will have similar growth
410 and skip computation if there are too many callers. */
412 bool
413 growth_likely_positive (struct cgraph_node *node,
414 int edge_growth)
416 int max_callers;
417 struct cgraph_edge *e;
418 gcc_checking_assert (edge_growth > 0);
420 /* First quickly check if NODE is removable at all. */
421 if (DECL_EXTERNAL (node->decl))
422 return true;
423 if (!node->can_remove_if_no_direct_calls_and_refs_p ()
424 || node->address_taken)
425 return true;
427 max_callers = ipa_fn_summaries->get (node)->size * 4 / edge_growth + 2;
429 for (e = node->callers; e; e = e->next_caller)
431 max_callers--;
432 if (!max_callers
433 || cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
434 return true;
437 ipa_ref *ref;
438 FOR_EACH_ALIAS (node, ref)
439 if (check_callers (dyn_cast <cgraph_node *> (ref->referring), &max_callers))
440 return true;
442 /* Unlike for functions called once, we play unsafe with
443 COMDATs. We can allow that since we know functions
444 in consideration are small (and thus risk is small) and
445 moreover grow estimates already accounts that COMDAT
446 functions may or may not disappear when eliminated from
447 current unit. With good probability making aggressive
448 choice in all units is going to make overall program
449 smaller. */
450 if (DECL_COMDAT (node->decl))
452 if (!node->can_remove_if_no_direct_calls_p ())
453 return true;
455 else if (!node->will_be_removed_from_program_if_no_direct_calls_p ())
456 return true;
458 return estimate_growth (node) > 0;