PR tree-optimization/82929
[official-gcc.git] / gcc / ipa-profile.c
blob8eb03dd7c24d739cc2fed950657d7e9c676e7417
1 /* Basic IPA optimizations based on profile.
2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* ipa-profile pass implements the following analysis propagating profille
21 inter-procedurally.
23 - Count histogram construction. This is a histogram analyzing how much
24 time is spent executing statements with a given execution count read
25 from profile feedback. This histogram is complete only with LTO,
26 otherwise it contains information only about the current unit.
28 Similar histogram is also estimated by coverage runtime. This histogram
29 is not dependent on LTO, but it suffers from various defects; first
30 gcov runtime is not weighting individual basic block by estimated execution
31 time and second the merging of multiple runs makes assumption that the
32 histogram distribution did not change. Consequentely histogram constructed
33 here may be more precise.
35 The information is used to set hot/cold thresholds.
36 - Next speculative indirect call resolution is performed: the local
37 profile pass assigns profile-id to each function and provide us with a
38 histogram specifying the most common target. We look up the callgraph
39 node corresponding to the target and produce a speculative call.
41 This call may or may not survive through IPA optimization based on decision
42 of inliner.
43 - Finally we propagate the following flags: unlikely executed, executed
44 once, executed at startup and executed at exit. These flags are used to
45 control code size/performance threshold and code placement (by producing
46 .text.unlikely/.text.hot/.text.startup/.text.exit subsections). */
47 #include "config.h"
48 #include "system.h"
49 #include "coretypes.h"
50 #include "backend.h"
51 #include "tree.h"
52 #include "gimple.h"
53 #include "predict.h"
54 #include "alloc-pool.h"
55 #include "tree-pass.h"
56 #include "cgraph.h"
57 #include "data-streamer.h"
58 #include "gimple-iterator.h"
59 #include "ipa-utils.h"
60 #include "profile.h"
61 #include "params.h"
62 #include "value-prof.h"
63 #include "tree-inline.h"
64 #include "symbol-summary.h"
65 #include "tree-vrp.h"
66 #include "ipa-prop.h"
67 #include "ipa-fnsummary.h"
69 /* Entry in the histogram. */
71 struct histogram_entry
73 gcov_type count;
74 int time;
75 int size;
78 /* Histogram of profile values.
79 The histogram is represented as an ordered vector of entries allocated via
80 histogram_pool. During construction a separate hashtable is kept to lookup
81 duplicate entries. */
83 vec<histogram_entry *> histogram;
84 static object_allocator<histogram_entry> histogram_pool ("IPA histogram");
86 /* Hashtable support for storing SSA names hashed by their SSA_NAME_VAR. */
88 struct histogram_hash : nofree_ptr_hash <histogram_entry>
90 static inline hashval_t hash (const histogram_entry *);
91 static inline int equal (const histogram_entry *, const histogram_entry *);
94 inline hashval_t
95 histogram_hash::hash (const histogram_entry *val)
97 return val->count;
100 inline int
101 histogram_hash::equal (const histogram_entry *val, const histogram_entry *val2)
103 return val->count == val2->count;
106 /* Account TIME and SIZE executed COUNT times into HISTOGRAM.
107 HASHTABLE is the on-side hash kept to avoid duplicates. */
109 static void
110 account_time_size (hash_table<histogram_hash> *hashtable,
111 vec<histogram_entry *> &histogram,
112 gcov_type count, int time, int size)
114 histogram_entry key = {count, 0, 0};
115 histogram_entry **val = hashtable->find_slot (&key, INSERT);
117 if (!*val)
119 *val = histogram_pool.allocate ();
120 **val = key;
121 histogram.safe_push (*val);
123 (*val)->time += time;
124 (*val)->size += size;
128 cmp_counts (const void *v1, const void *v2)
130 const histogram_entry *h1 = *(const histogram_entry * const *)v1;
131 const histogram_entry *h2 = *(const histogram_entry * const *)v2;
132 if (h1->count < h2->count)
133 return 1;
134 if (h1->count > h2->count)
135 return -1;
136 return 0;
139 /* Dump HISTOGRAM to FILE. */
141 static void
142 dump_histogram (FILE *file, vec<histogram_entry *> histogram)
144 unsigned int i;
145 gcov_type overall_time = 0, cumulated_time = 0, cumulated_size = 0, overall_size = 0;
147 fprintf (dump_file, "Histogram:\n");
148 for (i = 0; i < histogram.length (); i++)
150 overall_time += histogram[i]->count * histogram[i]->time;
151 overall_size += histogram[i]->size;
153 if (!overall_time)
154 overall_time = 1;
155 if (!overall_size)
156 overall_size = 1;
157 for (i = 0; i < histogram.length (); i++)
159 cumulated_time += histogram[i]->count * histogram[i]->time;
160 cumulated_size += histogram[i]->size;
161 fprintf (file, " %" PRId64": time:%i (%2.2f) size:%i (%2.2f)\n",
162 (int64_t) histogram[i]->count,
163 histogram[i]->time,
164 cumulated_time * 100.0 / overall_time,
165 histogram[i]->size,
166 cumulated_size * 100.0 / overall_size);
170 /* Collect histogram from CFG profiles. */
172 static void
173 ipa_profile_generate_summary (void)
175 struct cgraph_node *node;
176 gimple_stmt_iterator gsi;
177 basic_block bb;
179 hash_table<histogram_hash> hashtable (10);
181 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
182 if (ENTRY_BLOCK_PTR_FOR_FN (DECL_STRUCT_FUNCTION (node->decl))->count.ipa_p ())
183 FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
185 int time = 0;
186 int size = 0;
187 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
189 gimple *stmt = gsi_stmt (gsi);
190 if (gimple_code (stmt) == GIMPLE_CALL
191 && !gimple_call_fndecl (stmt))
193 histogram_value h;
194 h = gimple_histogram_value_of_type
195 (DECL_STRUCT_FUNCTION (node->decl),
196 stmt, HIST_TYPE_INDIR_CALL);
197 /* No need to do sanity check: gimple_ic_transform already
198 takes away bad histograms. */
199 if (h)
201 /* counter 0 is target, counter 1 is number of execution we called target,
202 counter 2 is total number of executions. */
203 if (h->hvalue.counters[2])
205 struct cgraph_edge * e = node->get_edge (stmt);
206 if (e && !e->indirect_unknown_callee)
207 continue;
208 e->indirect_info->common_target_id
209 = h->hvalue.counters [0];
210 e->indirect_info->common_target_probability
211 = GCOV_COMPUTE_SCALE (h->hvalue.counters [1], h->hvalue.counters [2]);
212 if (e->indirect_info->common_target_probability > REG_BR_PROB_BASE)
214 if (dump_file)
215 fprintf (dump_file, "Probability capped to 1\n");
216 e->indirect_info->common_target_probability = REG_BR_PROB_BASE;
219 gimple_remove_histogram_value (DECL_STRUCT_FUNCTION (node->decl),
220 stmt, h);
223 time += estimate_num_insns (stmt, &eni_time_weights);
224 size += estimate_num_insns (stmt, &eni_size_weights);
226 if (bb->count.ipa_p () && bb->count.initialized_p ())
227 account_time_size (&hashtable, histogram, bb->count.ipa ().to_gcov_type (),
228 time, size);
230 histogram.qsort (cmp_counts);
233 /* Serialize the ipa info for lto. */
235 static void
236 ipa_profile_write_summary (void)
238 struct lto_simple_output_block *ob
239 = lto_create_simple_output_block (LTO_section_ipa_profile);
240 unsigned int i;
242 streamer_write_uhwi_stream (ob->main_stream, histogram.length ());
243 for (i = 0; i < histogram.length (); i++)
245 streamer_write_gcov_count_stream (ob->main_stream, histogram[i]->count);
246 streamer_write_uhwi_stream (ob->main_stream, histogram[i]->time);
247 streamer_write_uhwi_stream (ob->main_stream, histogram[i]->size);
249 lto_destroy_simple_output_block (ob);
252 /* Deserialize the ipa info for lto. */
254 static void
255 ipa_profile_read_summary (void)
257 struct lto_file_decl_data ** file_data_vec
258 = lto_get_file_decl_data ();
259 struct lto_file_decl_data * file_data;
260 int j = 0;
262 hash_table<histogram_hash> hashtable (10);
264 while ((file_data = file_data_vec[j++]))
266 const char *data;
267 size_t len;
268 struct lto_input_block *ib
269 = lto_create_simple_input_block (file_data,
270 LTO_section_ipa_profile,
271 &data, &len);
272 if (ib)
274 unsigned int num = streamer_read_uhwi (ib);
275 unsigned int n;
276 for (n = 0; n < num; n++)
278 gcov_type count = streamer_read_gcov_count (ib);
279 int time = streamer_read_uhwi (ib);
280 int size = streamer_read_uhwi (ib);
281 account_time_size (&hashtable, histogram,
282 count, time, size);
284 lto_destroy_simple_input_block (file_data,
285 LTO_section_ipa_profile,
286 ib, data, len);
289 histogram.qsort (cmp_counts);
292 /* Data used by ipa_propagate_frequency. */
294 struct ipa_propagate_frequency_data
296 cgraph_node *function_symbol;
297 bool maybe_unlikely_executed;
298 bool maybe_executed_once;
299 bool only_called_at_startup;
300 bool only_called_at_exit;
303 /* Worker for ipa_propagate_frequency_1. */
305 static bool
306 ipa_propagate_frequency_1 (struct cgraph_node *node, void *data)
308 struct ipa_propagate_frequency_data *d;
309 struct cgraph_edge *edge;
311 d = (struct ipa_propagate_frequency_data *)data;
312 for (edge = node->callers;
313 edge && (d->maybe_unlikely_executed || d->maybe_executed_once
314 || d->only_called_at_startup || d->only_called_at_exit);
315 edge = edge->next_caller)
317 if (edge->caller != d->function_symbol)
319 d->only_called_at_startup &= edge->caller->only_called_at_startup;
320 /* It makes sense to put main() together with the static constructors.
321 It will be executed for sure, but rest of functions called from
322 main are definitely not at startup only. */
323 if (MAIN_NAME_P (DECL_NAME (edge->caller->decl)))
324 d->only_called_at_startup = 0;
325 d->only_called_at_exit &= edge->caller->only_called_at_exit;
328 /* When profile feedback is available, do not try to propagate too hard;
329 counts are already good guide on function frequencies and roundoff
330 errors can make us to push function into unlikely section even when
331 it is executed by the train run. Transfer the function only if all
332 callers are unlikely executed. */
333 if (profile_info
334 && edge->callee->count.initialized_p ()
335 /* Thunks are not profiled. This is more or less implementation
336 bug. */
337 && !d->function_symbol->thunk.thunk_p
338 && (edge->caller->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED
339 || (edge->caller->global.inlined_to
340 && edge->caller->global.inlined_to->frequency
341 != NODE_FREQUENCY_UNLIKELY_EXECUTED)))
342 d->maybe_unlikely_executed = false;
343 if (!edge->frequency)
344 continue;
345 switch (edge->caller->frequency)
347 case NODE_FREQUENCY_UNLIKELY_EXECUTED:
348 break;
349 case NODE_FREQUENCY_EXECUTED_ONCE:
350 if (dump_file && (dump_flags & TDF_DETAILS))
351 fprintf (dump_file, " Called by %s that is executed once\n",
352 edge->caller->name ());
353 d->maybe_unlikely_executed = false;
354 if (ipa_call_summaries->get (edge)->loop_depth)
356 d->maybe_executed_once = false;
357 if (dump_file && (dump_flags & TDF_DETAILS))
358 fprintf (dump_file, " Called in loop\n");
360 break;
361 case NODE_FREQUENCY_HOT:
362 case NODE_FREQUENCY_NORMAL:
363 if (dump_file && (dump_flags & TDF_DETAILS))
364 fprintf (dump_file, " Called by %s that is normal or hot\n",
365 edge->caller->name ());
366 d->maybe_unlikely_executed = false;
367 d->maybe_executed_once = false;
368 break;
371 return edge != NULL;
374 /* Return ture if NODE contains hot calls. */
376 bool
377 contains_hot_call_p (struct cgraph_node *node)
379 struct cgraph_edge *e;
380 for (e = node->callees; e; e = e->next_callee)
381 if (e->maybe_hot_p ())
382 return true;
383 else if (!e->inline_failed
384 && contains_hot_call_p (e->callee))
385 return true;
386 for (e = node->indirect_calls; e; e = e->next_callee)
387 if (e->maybe_hot_p ())
388 return true;
389 return false;
392 /* See if the frequency of NODE can be updated based on frequencies of its
393 callers. */
394 bool
395 ipa_propagate_frequency (struct cgraph_node *node)
397 struct ipa_propagate_frequency_data d = {node, true, true, true, true};
398 bool changed = false;
400 /* We can not propagate anything useful about externally visible functions
401 nor about virtuals. */
402 if (!node->local.local
403 || node->alias
404 || (opt_for_fn (node->decl, flag_devirtualize)
405 && DECL_VIRTUAL_P (node->decl)))
406 return false;
407 gcc_assert (node->analyzed);
408 if (dump_file && (dump_flags & TDF_DETAILS))
409 fprintf (dump_file, "Processing frequency %s\n", node->name ());
411 node->call_for_symbol_and_aliases (ipa_propagate_frequency_1, &d,
412 true);
414 if ((d.only_called_at_startup && !d.only_called_at_exit)
415 && !node->only_called_at_startup)
417 node->only_called_at_startup = true;
418 if (dump_file)
419 fprintf (dump_file, "Node %s promoted to only called at startup.\n",
420 node->name ());
421 changed = true;
423 if ((d.only_called_at_exit && !d.only_called_at_startup)
424 && !node->only_called_at_exit)
426 node->only_called_at_exit = true;
427 if (dump_file)
428 fprintf (dump_file, "Node %s promoted to only called at exit.\n",
429 node->name ());
430 changed = true;
433 /* With profile we can decide on hot/normal based on count. */
434 if (node->count.initialized_p ())
436 bool hot = false;
437 if (!(node->count == profile_count::zero ())
438 && node->count >= get_hot_bb_threshold ())
439 hot = true;
440 if (!hot)
441 hot |= contains_hot_call_p (node);
442 if (hot)
444 if (node->frequency != NODE_FREQUENCY_HOT)
446 if (dump_file)
447 fprintf (dump_file, "Node %s promoted to hot.\n",
448 node->name ());
449 node->frequency = NODE_FREQUENCY_HOT;
450 return true;
452 return false;
454 else if (node->frequency == NODE_FREQUENCY_HOT)
456 if (dump_file)
457 fprintf (dump_file, "Node %s reduced to normal.\n",
458 node->name ());
459 node->frequency = NODE_FREQUENCY_NORMAL;
460 changed = true;
463 /* These come either from profile or user hints; never update them. */
464 if (node->frequency == NODE_FREQUENCY_HOT
465 || node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED)
466 return changed;
467 if (d.maybe_unlikely_executed)
469 node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
470 if (dump_file)
471 fprintf (dump_file, "Node %s promoted to unlikely executed.\n",
472 node->name ());
473 changed = true;
475 else if (d.maybe_executed_once && node->frequency != NODE_FREQUENCY_EXECUTED_ONCE)
477 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
478 if (dump_file)
479 fprintf (dump_file, "Node %s promoted to executed once.\n",
480 node->name ());
481 changed = true;
483 return changed;
486 /* Simple ipa profile pass propagating frequencies across the callgraph. */
488 static unsigned int
489 ipa_profile (void)
491 struct cgraph_node **order;
492 struct cgraph_edge *e;
493 int order_pos;
494 bool something_changed = false;
495 int i;
496 gcov_type overall_time = 0, cutoff = 0, cumulated = 0, overall_size = 0;
497 struct cgraph_node *n,*n2;
498 int nindirect = 0, ncommon = 0, nunknown = 0, nuseless = 0, nconverted = 0;
499 int nmismatch = 0, nimpossible = 0;
500 bool node_map_initialized = false;
502 if (dump_file)
503 dump_histogram (dump_file, histogram);
504 for (i = 0; i < (int)histogram.length (); i++)
506 overall_time += histogram[i]->count * histogram[i]->time;
507 overall_size += histogram[i]->size;
509 if (overall_time)
511 gcov_type threshold;
513 gcc_assert (overall_size);
514 if (dump_file)
516 gcov_type min, cumulated_time = 0, cumulated_size = 0;
518 fprintf (dump_file, "Overall time: %" PRId64"\n",
519 (int64_t)overall_time);
520 min = get_hot_bb_threshold ();
521 for (i = 0; i < (int)histogram.length () && histogram[i]->count >= min;
522 i++)
524 cumulated_time += histogram[i]->count * histogram[i]->time;
525 cumulated_size += histogram[i]->size;
527 fprintf (dump_file, "GCOV min count: %" PRId64
528 " Time:%3.2f%% Size:%3.2f%%\n",
529 (int64_t)min,
530 cumulated_time * 100.0 / overall_time,
531 cumulated_size * 100.0 / overall_size);
533 cutoff = (overall_time * PARAM_VALUE (HOT_BB_COUNT_WS_PERMILLE) + 500) / 1000;
534 threshold = 0;
535 for (i = 0; cumulated < cutoff; i++)
537 cumulated += histogram[i]->count * histogram[i]->time;
538 threshold = histogram[i]->count;
540 if (!threshold)
541 threshold = 1;
542 if (dump_file)
544 gcov_type cumulated_time = 0, cumulated_size = 0;
546 for (i = 0;
547 i < (int)histogram.length () && histogram[i]->count >= threshold;
548 i++)
550 cumulated_time += histogram[i]->count * histogram[i]->time;
551 cumulated_size += histogram[i]->size;
553 fprintf (dump_file, "Determined min count: %" PRId64
554 " Time:%3.2f%% Size:%3.2f%%\n",
555 (int64_t)threshold,
556 cumulated_time * 100.0 / overall_time,
557 cumulated_size * 100.0 / overall_size);
559 if (threshold > get_hot_bb_threshold ()
560 || in_lto_p)
562 if (dump_file)
563 fprintf (dump_file, "Threshold updated.\n");
564 set_hot_bb_threshold (threshold);
567 histogram.release ();
568 histogram_pool.release ();
570 /* Produce speculative calls: we saved common traget from porfiling into
571 e->common_target_id. Now, at link time, we can look up corresponding
572 function node and produce speculative call. */
574 FOR_EACH_DEFINED_FUNCTION (n)
576 bool update = false;
578 if (!opt_for_fn (n->decl, flag_ipa_profile))
579 continue;
581 for (e = n->indirect_calls; e; e = e->next_callee)
583 if (n->count.initialized_p ())
584 nindirect++;
585 if (e->indirect_info->common_target_id)
587 if (!node_map_initialized)
588 init_node_map (false);
589 node_map_initialized = true;
590 ncommon++;
591 n2 = find_func_by_profile_id (e->indirect_info->common_target_id);
592 if (n2)
594 if (dump_file)
596 fprintf (dump_file, "Indirect call -> direct call from"
597 " other module %s => %s, prob %3.2f\n",
598 n->dump_name (),
599 n2->dump_name (),
600 e->indirect_info->common_target_probability
601 / (float)REG_BR_PROB_BASE);
603 if (e->indirect_info->common_target_probability
604 < REG_BR_PROB_BASE / 2)
606 nuseless++;
607 if (dump_file)
608 fprintf (dump_file,
609 "Not speculating: probability is too low.\n");
611 else if (!e->maybe_hot_p ())
613 nuseless++;
614 if (dump_file)
615 fprintf (dump_file,
616 "Not speculating: call is cold.\n");
618 else if (n2->get_availability () <= AVAIL_INTERPOSABLE
619 && n2->can_be_discarded_p ())
621 nuseless++;
622 if (dump_file)
623 fprintf (dump_file,
624 "Not speculating: target is overwritable "
625 "and can be discarded.\n");
627 else if (ipa_node_params_sum && ipa_edge_args_sum
628 && (!vec_safe_is_empty
629 (IPA_NODE_REF (n2)->descriptors))
630 && ipa_get_param_count (IPA_NODE_REF (n2))
631 != ipa_get_cs_argument_count (IPA_EDGE_REF (e))
632 && (ipa_get_param_count (IPA_NODE_REF (n2))
633 >= ipa_get_cs_argument_count (IPA_EDGE_REF (e))
634 || !stdarg_p (TREE_TYPE (n2->decl))))
636 nmismatch++;
637 if (dump_file)
638 fprintf (dump_file,
639 "Not speculating: "
640 "parameter count mistmatch\n");
642 else if (e->indirect_info->polymorphic
643 && !opt_for_fn (n->decl, flag_devirtualize)
644 && !possible_polymorphic_call_target_p (e, n2))
646 nimpossible++;
647 if (dump_file)
648 fprintf (dump_file,
649 "Not speculating: "
650 "function is not in the polymorphic "
651 "call target list\n");
653 else
655 /* Target may be overwritable, but profile says that
656 control flow goes to this particular implementation
657 of N2. Speculate on the local alias to allow inlining.
659 if (!n2->can_be_discarded_p ())
661 cgraph_node *alias;
662 alias = dyn_cast<cgraph_node *> (n2->noninterposable_alias ());
663 if (alias)
664 n2 = alias;
666 nconverted++;
667 e->make_speculative
668 (n2,
669 e->count.apply_probability
670 (e->indirect_info->common_target_probability),
671 apply_scale (e->frequency,
672 e->indirect_info->common_target_probability));
673 update = true;
676 else
678 if (dump_file)
679 fprintf (dump_file, "Function with profile-id %i not found.\n",
680 e->indirect_info->common_target_id);
681 nunknown++;
685 if (update)
686 ipa_update_overall_fn_summary (n);
688 if (node_map_initialized)
689 del_node_map ();
690 if (dump_file && nindirect)
691 fprintf (dump_file,
692 "%i indirect calls trained.\n"
693 "%i (%3.2f%%) have common target.\n"
694 "%i (%3.2f%%) targets was not found.\n"
695 "%i (%3.2f%%) targets had parameter count mismatch.\n"
696 "%i (%3.2f%%) targets was not in polymorphic call target list.\n"
697 "%i (%3.2f%%) speculations seems useless.\n"
698 "%i (%3.2f%%) speculations produced.\n",
699 nindirect,
700 ncommon, ncommon * 100.0 / nindirect,
701 nunknown, nunknown * 100.0 / nindirect,
702 nmismatch, nmismatch * 100.0 / nindirect,
703 nimpossible, nimpossible * 100.0 / nindirect,
704 nuseless, nuseless * 100.0 / nindirect,
705 nconverted, nconverted * 100.0 / nindirect);
707 order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
708 order_pos = ipa_reverse_postorder (order);
709 for (i = order_pos - 1; i >= 0; i--)
711 if (order[i]->local.local
712 && opt_for_fn (order[i]->decl, flag_ipa_profile)
713 && ipa_propagate_frequency (order[i]))
715 for (e = order[i]->callees; e; e = e->next_callee)
716 if (e->callee->local.local && !e->callee->aux)
718 something_changed = true;
719 e->callee->aux = (void *)1;
722 order[i]->aux = NULL;
725 while (something_changed)
727 something_changed = false;
728 for (i = order_pos - 1; i >= 0; i--)
730 if (order[i]->aux
731 && opt_for_fn (order[i]->decl, flag_ipa_profile)
732 && ipa_propagate_frequency (order[i]))
734 for (e = order[i]->callees; e; e = e->next_callee)
735 if (e->callee->local.local && !e->callee->aux)
737 something_changed = true;
738 e->callee->aux = (void *)1;
741 order[i]->aux = NULL;
744 free (order);
745 return 0;
748 namespace {
750 const pass_data pass_data_ipa_profile =
752 IPA_PASS, /* type */
753 "profile_estimate", /* name */
754 OPTGROUP_NONE, /* optinfo_flags */
755 TV_IPA_PROFILE, /* tv_id */
756 0, /* properties_required */
757 0, /* properties_provided */
758 0, /* properties_destroyed */
759 0, /* todo_flags_start */
760 0, /* todo_flags_finish */
763 class pass_ipa_profile : public ipa_opt_pass_d
765 public:
766 pass_ipa_profile (gcc::context *ctxt)
767 : ipa_opt_pass_d (pass_data_ipa_profile, ctxt,
768 ipa_profile_generate_summary, /* generate_summary */
769 ipa_profile_write_summary, /* write_summary */
770 ipa_profile_read_summary, /* read_summary */
771 NULL, /* write_optimization_summary */
772 NULL, /* read_optimization_summary */
773 NULL, /* stmt_fixup */
774 0, /* function_transform_todo_flags_start */
775 NULL, /* function_transform */
776 NULL) /* variable_transform */
779 /* opt_pass methods: */
780 virtual bool gate (function *) { return flag_ipa_profile || in_lto_p; }
781 virtual unsigned int execute (function *) { return ipa_profile (); }
783 }; // class pass_ipa_profile
785 } // anon namespace
787 ipa_opt_pass_d *
788 make_pass_ipa_profile (gcc::context *ctxt)
790 return new pass_ipa_profile (ctxt);