* config/i386/i386.h (TARGET_SUPPORTS_WIDE_INT): New define.
[official-gcc.git] / gcc / ipa-profile.c
blob96fb8102fc8ca5f7f24db2fcad8a0434ec01b0f9
1 /* Basic IPA optimizations based on profile.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* ipa-profile pass implements the following analysis propagating profille
21 inter-procedurally.
23 - Count histogram construction. This is a histogram analyzing how much
24 time is spent executing statements with a given execution count read
25 from profile feedback. This histogram is complete only with LTO,
26 otherwise it contains information only about the current unit.
28 Similar histogram is also estimated by coverage runtime. This histogram
29 is not dependent on LTO, but it suffers from various defects; first
30 gcov runtime is not weighting individual basic block by estimated execution
31 time and second the merging of multiple runs makes assumption that the
32 histogram distribution did not change. Consequentely histogram constructed
33 here may be more precise.
35 The information is used to set hot/cold thresholds.
36 - Next speculative indirect call resolution is performed: the local
37 profile pass assigns profile-id to each function and provide us with a
38 histogram specifying the most common target. We look up the callgraph
39 node corresponding to the target and produce a speculative call.
41 This call may or may not survive through IPA optimization based on decision
42 of inliner.
43 - Finally we propagate the following flags: unlikely executed, executed
44 once, executed at startup and executed at exit. These flags are used to
45 control code size/performance threshold and and code placement (by producing
46 .text.unlikely/.text.hot/.text.startup/.text.exit subsections). */
47 #include "config.h"
48 #include "system.h"
49 #include "coretypes.h"
50 #include "tm.h"
51 #include "hash-set.h"
52 #include "machmode.h"
53 #include "vec.h"
54 #include "double-int.h"
55 #include "input.h"
56 #include "alias.h"
57 #include "symtab.h"
58 #include "wide-int.h"
59 #include "inchash.h"
60 #include "tree.h"
61 #include "fold-const.h"
62 #include "predict.h"
63 #include "dominance.h"
64 #include "cfg.h"
65 #include "basic-block.h"
66 #include "hash-map.h"
67 #include "is-a.h"
68 #include "plugin-api.h"
69 #include "hard-reg-set.h"
70 #include "input.h"
71 #include "function.h"
72 #include "ipa-ref.h"
73 #include "cgraph.h"
74 #include "tree-pass.h"
75 #include "tree-ssa-alias.h"
76 #include "internal-fn.h"
77 #include "gimple-expr.h"
78 #include "gimple.h"
79 #include "gimple-iterator.h"
80 #include "flags.h"
81 #include "target.h"
82 #include "tree-iterator.h"
83 #include "ipa-utils.h"
84 #include "profile.h"
85 #include "params.h"
86 #include "value-prof.h"
87 #include "alloc-pool.h"
88 #include "tree-inline.h"
89 #include "lto-streamer.h"
90 #include "data-streamer.h"
91 #include "symbol-summary.h"
92 #include "ipa-prop.h"
93 #include "ipa-inline.h"
95 /* Entry in the histogram. */
97 struct histogram_entry
99 gcov_type count;
100 int time;
101 int size;
104 /* Histogram of profile values.
105 The histogram is represented as an ordered vector of entries allocated via
106 histogram_pool. During construction a separate hashtable is kept to lookup
107 duplicate entries. */
109 vec<histogram_entry *> histogram;
110 static alloc_pool histogram_pool;
112 /* Hashtable support for storing SSA names hashed by their SSA_NAME_VAR. */
114 struct histogram_hash : typed_noop_remove <histogram_entry>
116 typedef histogram_entry *value_type;
117 typedef histogram_entry *compare_type;
118 static inline hashval_t hash (const histogram_entry *);
119 static inline int equal (const histogram_entry *, const histogram_entry *);
122 inline hashval_t
123 histogram_hash::hash (const histogram_entry *val)
125 return val->count;
128 inline int
129 histogram_hash::equal (const histogram_entry *val, const histogram_entry *val2)
131 return val->count == val2->count;
134 /* Account TIME and SIZE executed COUNT times into HISTOGRAM.
135 HASHTABLE is the on-side hash kept to avoid duplicates. */
137 static void
138 account_time_size (hash_table<histogram_hash> *hashtable,
139 vec<histogram_entry *> &histogram,
140 gcov_type count, int time, int size)
142 histogram_entry key = {count, 0, 0};
143 histogram_entry **val = hashtable->find_slot (&key, INSERT);
145 if (!*val)
147 *val = (histogram_entry *) pool_alloc (histogram_pool);
148 **val = key;
149 histogram.safe_push (*val);
151 (*val)->time += time;
152 (*val)->size += size;
156 cmp_counts (const void *v1, const void *v2)
158 const histogram_entry *h1 = *(const histogram_entry * const *)v1;
159 const histogram_entry *h2 = *(const histogram_entry * const *)v2;
160 if (h1->count < h2->count)
161 return 1;
162 if (h1->count > h2->count)
163 return -1;
164 return 0;
167 /* Dump HISTOGRAM to FILE. */
169 static void
170 dump_histogram (FILE *file, vec<histogram_entry *> histogram)
172 unsigned int i;
173 gcov_type overall_time = 0, cumulated_time = 0, cumulated_size = 0, overall_size = 0;
175 fprintf (dump_file, "Histogram:\n");
176 for (i = 0; i < histogram.length (); i++)
178 overall_time += histogram[i]->count * histogram[i]->time;
179 overall_size += histogram[i]->size;
181 if (!overall_time)
182 overall_time = 1;
183 if (!overall_size)
184 overall_size = 1;
185 for (i = 0; i < histogram.length (); i++)
187 cumulated_time += histogram[i]->count * histogram[i]->time;
188 cumulated_size += histogram[i]->size;
189 fprintf (file, " %"PRId64": time:%i (%2.2f) size:%i (%2.2f)\n",
190 (int64_t) histogram[i]->count,
191 histogram[i]->time,
192 cumulated_time * 100.0 / overall_time,
193 histogram[i]->size,
194 cumulated_size * 100.0 / overall_size);
198 /* Collect histogram from CFG profiles. */
200 static void
201 ipa_profile_generate_summary (void)
203 struct cgraph_node *node;
204 gimple_stmt_iterator gsi;
205 basic_block bb;
207 hash_table<histogram_hash> hashtable (10);
208 histogram_pool = create_alloc_pool ("IPA histogram", sizeof (struct histogram_entry),
209 10);
211 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
212 FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
214 int time = 0;
215 int size = 0;
216 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
218 gimple stmt = gsi_stmt (gsi);
219 if (gimple_code (stmt) == GIMPLE_CALL
220 && !gimple_call_fndecl (stmt))
222 histogram_value h;
223 h = gimple_histogram_value_of_type
224 (DECL_STRUCT_FUNCTION (node->decl),
225 stmt, HIST_TYPE_INDIR_CALL);
226 /* No need to do sanity check: gimple_ic_transform already
227 takes away bad histograms. */
228 if (h)
230 /* counter 0 is target, counter 1 is number of execution we called target,
231 counter 2 is total number of executions. */
232 if (h->hvalue.counters[2])
234 struct cgraph_edge * e = node->get_edge (stmt);
235 if (e && !e->indirect_unknown_callee)
236 continue;
237 e->indirect_info->common_target_id
238 = h->hvalue.counters [0];
239 e->indirect_info->common_target_probability
240 = GCOV_COMPUTE_SCALE (h->hvalue.counters [1], h->hvalue.counters [2]);
241 if (e->indirect_info->common_target_probability > REG_BR_PROB_BASE)
243 if (dump_file)
244 fprintf (dump_file, "Probability capped to 1\n");
245 e->indirect_info->common_target_probability = REG_BR_PROB_BASE;
248 gimple_remove_histogram_value (DECL_STRUCT_FUNCTION (node->decl),
249 stmt, h);
252 time += estimate_num_insns (stmt, &eni_time_weights);
253 size += estimate_num_insns (stmt, &eni_size_weights);
255 account_time_size (&hashtable, histogram, bb->count, time, size);
257 histogram.qsort (cmp_counts);
260 /* Serialize the ipa info for lto. */
262 static void
263 ipa_profile_write_summary (void)
265 struct lto_simple_output_block *ob
266 = lto_create_simple_output_block (LTO_section_ipa_profile);
267 unsigned int i;
269 streamer_write_uhwi_stream (ob->main_stream, histogram.length ());
270 for (i = 0; i < histogram.length (); i++)
272 streamer_write_gcov_count_stream (ob->main_stream, histogram[i]->count);
273 streamer_write_uhwi_stream (ob->main_stream, histogram[i]->time);
274 streamer_write_uhwi_stream (ob->main_stream, histogram[i]->size);
276 lto_destroy_simple_output_block (ob);
279 /* Deserialize the ipa info for lto. */
281 static void
282 ipa_profile_read_summary (void)
284 struct lto_file_decl_data ** file_data_vec
285 = lto_get_file_decl_data ();
286 struct lto_file_decl_data * file_data;
287 int j = 0;
289 hash_table<histogram_hash> hashtable (10);
290 histogram_pool = create_alloc_pool ("IPA histogram", sizeof (struct histogram_entry),
291 10);
293 while ((file_data = file_data_vec[j++]))
295 const char *data;
296 size_t len;
297 struct lto_input_block *ib
298 = lto_create_simple_input_block (file_data,
299 LTO_section_ipa_profile,
300 &data, &len);
301 if (ib)
303 unsigned int num = streamer_read_uhwi (ib);
304 unsigned int n;
305 for (n = 0; n < num; n++)
307 gcov_type count = streamer_read_gcov_count (ib);
308 int time = streamer_read_uhwi (ib);
309 int size = streamer_read_uhwi (ib);
310 account_time_size (&hashtable, histogram,
311 count, time, size);
313 lto_destroy_simple_input_block (file_data,
314 LTO_section_ipa_profile,
315 ib, data, len);
318 histogram.qsort (cmp_counts);
321 /* Data used by ipa_propagate_frequency. */
323 struct ipa_propagate_frequency_data
325 cgraph_node *function_symbol;
326 bool maybe_unlikely_executed;
327 bool maybe_executed_once;
328 bool only_called_at_startup;
329 bool only_called_at_exit;
332 /* Worker for ipa_propagate_frequency_1. */
334 static bool
335 ipa_propagate_frequency_1 (struct cgraph_node *node, void *data)
337 struct ipa_propagate_frequency_data *d;
338 struct cgraph_edge *edge;
340 d = (struct ipa_propagate_frequency_data *)data;
341 for (edge = node->callers;
342 edge && (d->maybe_unlikely_executed || d->maybe_executed_once
343 || d->only_called_at_startup || d->only_called_at_exit);
344 edge = edge->next_caller)
346 if (edge->caller != d->function_symbol)
348 d->only_called_at_startup &= edge->caller->only_called_at_startup;
349 /* It makes sense to put main() together with the static constructors.
350 It will be executed for sure, but rest of functions called from
351 main are definitely not at startup only. */
352 if (MAIN_NAME_P (DECL_NAME (edge->caller->decl)))
353 d->only_called_at_startup = 0;
354 d->only_called_at_exit &= edge->caller->only_called_at_exit;
357 /* When profile feedback is available, do not try to propagate too hard;
358 counts are already good guide on function frequencies and roundoff
359 errors can make us to push function into unlikely section even when
360 it is executed by the train run. Transfer the function only if all
361 callers are unlikely executed. */
362 if (profile_info
363 && opt_for_fn (d->function_symbol->decl, flag_branch_probabilities)
364 /* Thunks are not profiled. This is more or less implementation
365 bug. */
366 && !d->function_symbol->thunk.thunk_p
367 && (edge->caller->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED
368 || (edge->caller->global.inlined_to
369 && edge->caller->global.inlined_to->frequency
370 != NODE_FREQUENCY_UNLIKELY_EXECUTED)))
371 d->maybe_unlikely_executed = false;
372 if (!edge->frequency)
373 continue;
374 switch (edge->caller->frequency)
376 case NODE_FREQUENCY_UNLIKELY_EXECUTED:
377 break;
378 case NODE_FREQUENCY_EXECUTED_ONCE:
379 if (dump_file && (dump_flags & TDF_DETAILS))
380 fprintf (dump_file, " Called by %s that is executed once\n",
381 edge->caller->name ());
382 d->maybe_unlikely_executed = false;
383 if (inline_edge_summary (edge)->loop_depth)
385 d->maybe_executed_once = false;
386 if (dump_file && (dump_flags & TDF_DETAILS))
387 fprintf (dump_file, " Called in loop\n");
389 break;
390 case NODE_FREQUENCY_HOT:
391 case NODE_FREQUENCY_NORMAL:
392 if (dump_file && (dump_flags & TDF_DETAILS))
393 fprintf (dump_file, " Called by %s that is normal or hot\n",
394 edge->caller->name ());
395 d->maybe_unlikely_executed = false;
396 d->maybe_executed_once = false;
397 break;
400 return edge != NULL;
403 /* Return ture if NODE contains hot calls. */
405 bool
406 contains_hot_call_p (struct cgraph_node *node)
408 struct cgraph_edge *e;
409 for (e = node->callees; e; e = e->next_callee)
410 if (e->maybe_hot_p ())
411 return true;
412 else if (!e->inline_failed
413 && contains_hot_call_p (e->callee))
414 return true;
415 for (e = node->indirect_calls; e; e = e->next_callee)
416 if (e->maybe_hot_p ())
417 return true;
418 return false;
421 /* See if the frequency of NODE can be updated based on frequencies of its
422 callers. */
423 bool
424 ipa_propagate_frequency (struct cgraph_node *node)
426 struct ipa_propagate_frequency_data d = {node, true, true, true, true};
427 bool changed = false;
429 /* We can not propagate anything useful about externally visible functions
430 nor about virtuals. */
431 if (!node->local.local
432 || node->alias
433 || (opt_for_fn (node->decl, flag_devirtualize)
434 && DECL_VIRTUAL_P (node->decl)))
435 return false;
436 gcc_assert (node->analyzed);
437 if (dump_file && (dump_flags & TDF_DETAILS))
438 fprintf (dump_file, "Processing frequency %s\n", node->name ());
440 node->call_for_symbol_and_aliases (ipa_propagate_frequency_1, &d,
441 true);
443 if ((d.only_called_at_startup && !d.only_called_at_exit)
444 && !node->only_called_at_startup)
446 node->only_called_at_startup = true;
447 if (dump_file)
448 fprintf (dump_file, "Node %s promoted to only called at startup.\n",
449 node->name ());
450 changed = true;
452 if ((d.only_called_at_exit && !d.only_called_at_startup)
453 && !node->only_called_at_exit)
455 node->only_called_at_exit = true;
456 if (dump_file)
457 fprintf (dump_file, "Node %s promoted to only called at exit.\n",
458 node->name ());
459 changed = true;
462 /* With profile we can decide on hot/normal based on count. */
463 if (node->count)
465 bool hot = false;
466 if (node->count >= get_hot_bb_threshold ())
467 hot = true;
468 if (!hot)
469 hot |= contains_hot_call_p (node);
470 if (hot)
472 if (node->frequency != NODE_FREQUENCY_HOT)
474 if (dump_file)
475 fprintf (dump_file, "Node %s promoted to hot.\n",
476 node->name ());
477 node->frequency = NODE_FREQUENCY_HOT;
478 return true;
480 return false;
482 else if (node->frequency == NODE_FREQUENCY_HOT)
484 if (dump_file)
485 fprintf (dump_file, "Node %s reduced to normal.\n",
486 node->name ());
487 node->frequency = NODE_FREQUENCY_NORMAL;
488 changed = true;
491 /* These come either from profile or user hints; never update them. */
492 if (node->frequency == NODE_FREQUENCY_HOT
493 || node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED)
494 return changed;
495 if (d.maybe_unlikely_executed)
497 node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
498 if (dump_file)
499 fprintf (dump_file, "Node %s promoted to unlikely executed.\n",
500 node->name ());
501 changed = true;
503 else if (d.maybe_executed_once && node->frequency != NODE_FREQUENCY_EXECUTED_ONCE)
505 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
506 if (dump_file)
507 fprintf (dump_file, "Node %s promoted to executed once.\n",
508 node->name ());
509 changed = true;
511 return changed;
514 /* Simple ipa profile pass propagating frequencies across the callgraph. */
516 static unsigned int
517 ipa_profile (void)
519 struct cgraph_node **order;
520 struct cgraph_edge *e;
521 int order_pos;
522 bool something_changed = false;
523 int i;
524 gcov_type overall_time = 0, cutoff = 0, cumulated = 0, overall_size = 0;
525 struct cgraph_node *n,*n2;
526 int nindirect = 0, ncommon = 0, nunknown = 0, nuseless = 0, nconverted = 0;
527 int nmismatch = 0, nimpossible = 0;
528 bool node_map_initialized = false;
530 if (dump_file)
531 dump_histogram (dump_file, histogram);
532 for (i = 0; i < (int)histogram.length (); i++)
534 overall_time += histogram[i]->count * histogram[i]->time;
535 overall_size += histogram[i]->size;
537 if (overall_time)
539 gcov_type threshold;
541 gcc_assert (overall_size);
542 if (dump_file)
544 gcov_type min, cumulated_time = 0, cumulated_size = 0;
546 fprintf (dump_file, "Overall time: %"PRId64"\n",
547 (int64_t)overall_time);
548 min = get_hot_bb_threshold ();
549 for (i = 0; i < (int)histogram.length () && histogram[i]->count >= min;
550 i++)
552 cumulated_time += histogram[i]->count * histogram[i]->time;
553 cumulated_size += histogram[i]->size;
555 fprintf (dump_file, "GCOV min count: %"PRId64
556 " Time:%3.2f%% Size:%3.2f%%\n",
557 (int64_t)min,
558 cumulated_time * 100.0 / overall_time,
559 cumulated_size * 100.0 / overall_size);
561 cutoff = (overall_time * PARAM_VALUE (HOT_BB_COUNT_WS_PERMILLE) + 500) / 1000;
562 threshold = 0;
563 for (i = 0; cumulated < cutoff; i++)
565 cumulated += histogram[i]->count * histogram[i]->time;
566 threshold = histogram[i]->count;
568 if (!threshold)
569 threshold = 1;
570 if (dump_file)
572 gcov_type cumulated_time = 0, cumulated_size = 0;
574 for (i = 0;
575 i < (int)histogram.length () && histogram[i]->count >= threshold;
576 i++)
578 cumulated_time += histogram[i]->count * histogram[i]->time;
579 cumulated_size += histogram[i]->size;
581 fprintf (dump_file, "Determined min count: %"PRId64
582 " Time:%3.2f%% Size:%3.2f%%\n",
583 (int64_t)threshold,
584 cumulated_time * 100.0 / overall_time,
585 cumulated_size * 100.0 / overall_size);
587 if (threshold > get_hot_bb_threshold ()
588 || in_lto_p)
590 if (dump_file)
591 fprintf (dump_file, "Threshold updated.\n");
592 set_hot_bb_threshold (threshold);
595 histogram.release ();
596 free_alloc_pool (histogram_pool);
598 /* Produce speculative calls: we saved common traget from porfiling into
599 e->common_target_id. Now, at link time, we can look up corresponding
600 function node and produce speculative call. */
602 FOR_EACH_DEFINED_FUNCTION (n)
604 bool update = false;
606 if (!opt_for_fn (n->decl, flag_ipa_profile))
607 continue;
609 for (e = n->indirect_calls; e; e = e->next_callee)
611 if (n->count)
612 nindirect++;
613 if (e->indirect_info->common_target_id)
615 if (!node_map_initialized)
616 init_node_map (false);
617 node_map_initialized = true;
618 ncommon++;
619 n2 = find_func_by_profile_id (e->indirect_info->common_target_id);
620 if (n2)
622 if (dump_file)
624 fprintf (dump_file, "Indirect call -> direct call from"
625 " other module %s/%i => %s/%i, prob %3.2f\n",
626 xstrdup_for_dump (n->name ()), n->order,
627 xstrdup_for_dump (n2->name ()), n2->order,
628 e->indirect_info->common_target_probability
629 / (float)REG_BR_PROB_BASE);
631 if (e->indirect_info->common_target_probability
632 < REG_BR_PROB_BASE / 2)
634 nuseless++;
635 if (dump_file)
636 fprintf (dump_file,
637 "Not speculating: probability is too low.\n");
639 else if (!e->maybe_hot_p ())
641 nuseless++;
642 if (dump_file)
643 fprintf (dump_file,
644 "Not speculating: call is cold.\n");
646 else if (n2->get_availability () <= AVAIL_INTERPOSABLE
647 && n2->can_be_discarded_p ())
649 nuseless++;
650 if (dump_file)
651 fprintf (dump_file,
652 "Not speculating: target is overwritable "
653 "and can be discarded.\n");
655 else if (ipa_node_params_sum && ipa_edge_args_vector
656 && !IPA_NODE_REF (n2)->descriptors.is_empty ()
657 && ipa_get_param_count (IPA_NODE_REF (n2))
658 != ipa_get_cs_argument_count (IPA_EDGE_REF (e))
659 && (ipa_get_param_count (IPA_NODE_REF (n2))
660 >= ipa_get_cs_argument_count (IPA_EDGE_REF (e))
661 || !stdarg_p (TREE_TYPE (n2->decl))))
663 nmismatch++;
664 if (dump_file)
665 fprintf (dump_file,
666 "Not speculating: "
667 "parameter count mistmatch\n");
669 else if (e->indirect_info->polymorphic
670 && !opt_for_fn (n->decl, flag_devirtualize)
671 && !possible_polymorphic_call_target_p (e, n2))
673 nimpossible++;
674 if (dump_file)
675 fprintf (dump_file,
676 "Not speculating: "
677 "function is not in the polymorphic "
678 "call target list\n");
680 else
682 /* Target may be overwritable, but profile says that
683 control flow goes to this particular implementation
684 of N2. Speculate on the local alias to allow inlining.
686 if (!n2->can_be_discarded_p ())
688 cgraph_node *alias;
689 alias = dyn_cast<cgraph_node *> (n2->noninterposable_alias ());
690 if (alias)
691 n2 = alias;
693 nconverted++;
694 e->make_speculative
695 (n2,
696 apply_scale (e->count,
697 e->indirect_info->common_target_probability),
698 apply_scale (e->frequency,
699 e->indirect_info->common_target_probability));
700 update = true;
703 else
705 if (dump_file)
706 fprintf (dump_file, "Function with profile-id %i not found.\n",
707 e->indirect_info->common_target_id);
708 nunknown++;
712 if (update)
713 inline_update_overall_summary (n);
715 if (node_map_initialized)
716 del_node_map ();
717 if (dump_file && nindirect)
718 fprintf (dump_file,
719 "%i indirect calls trained.\n"
720 "%i (%3.2f%%) have common target.\n"
721 "%i (%3.2f%%) targets was not found.\n"
722 "%i (%3.2f%%) targets had parameter count mismatch.\n"
723 "%i (%3.2f%%) targets was not in polymorphic call target list.\n"
724 "%i (%3.2f%%) speculations seems useless.\n"
725 "%i (%3.2f%%) speculations produced.\n",
726 nindirect,
727 ncommon, ncommon * 100.0 / nindirect,
728 nunknown, nunknown * 100.0 / nindirect,
729 nmismatch, nmismatch * 100.0 / nindirect,
730 nimpossible, nimpossible * 100.0 / nindirect,
731 nuseless, nuseless * 100.0 / nindirect,
732 nconverted, nconverted * 100.0 / nindirect);
734 order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
735 order_pos = ipa_reverse_postorder (order);
736 for (i = order_pos - 1; i >= 0; i--)
738 if (order[i]->local.local
739 && opt_for_fn (order[i]->decl, flag_ipa_profile)
740 && ipa_propagate_frequency (order[i]))
742 for (e = order[i]->callees; e; e = e->next_callee)
743 if (e->callee->local.local && !e->callee->aux)
745 something_changed = true;
746 e->callee->aux = (void *)1;
749 order[i]->aux = NULL;
752 while (something_changed)
754 something_changed = false;
755 for (i = order_pos - 1; i >= 0; i--)
757 if (order[i]->aux
758 && opt_for_fn (order[i]->decl, flag_ipa_profile)
759 && ipa_propagate_frequency (order[i]))
761 for (e = order[i]->callees; e; e = e->next_callee)
762 if (e->callee->local.local && !e->callee->aux)
764 something_changed = true;
765 e->callee->aux = (void *)1;
768 order[i]->aux = NULL;
771 free (order);
772 return 0;
775 namespace {
777 const pass_data pass_data_ipa_profile =
779 IPA_PASS, /* type */
780 "profile_estimate", /* name */
781 OPTGROUP_NONE, /* optinfo_flags */
782 TV_IPA_PROFILE, /* tv_id */
783 0, /* properties_required */
784 0, /* properties_provided */
785 0, /* properties_destroyed */
786 0, /* todo_flags_start */
787 0, /* todo_flags_finish */
790 class pass_ipa_profile : public ipa_opt_pass_d
792 public:
793 pass_ipa_profile (gcc::context *ctxt)
794 : ipa_opt_pass_d (pass_data_ipa_profile, ctxt,
795 ipa_profile_generate_summary, /* generate_summary */
796 ipa_profile_write_summary, /* write_summary */
797 ipa_profile_read_summary, /* read_summary */
798 NULL, /* write_optimization_summary */
799 NULL, /* read_optimization_summary */
800 NULL, /* stmt_fixup */
801 0, /* function_transform_todo_flags_start */
802 NULL, /* function_transform */
803 NULL) /* variable_transform */
806 /* opt_pass methods: */
807 virtual bool gate (function *) { return flag_ipa_profile || in_lto_p; }
808 virtual unsigned int execute (function *) { return ipa_profile (); }
810 }; // class pass_ipa_profile
812 } // anon namespace
814 ipa_opt_pass_d *
815 make_pass_ipa_profile (gcc::context *ctxt)
817 return new pass_ipa_profile (ctxt);