1 /* Basic IPA optimizations based on profile.
2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* ipa-profile pass implements the following analysis propagating profille
23 - Count histogram construction. This is a histogram analyzing how much
24 time is spent executing statements with a given execution count read
25 from profile feedback. This histogram is complete only with LTO,
26 otherwise it contains information only about the current unit.
28 Similar histogram is also estimated by coverage runtime. This histogram
29 is not dependent on LTO, but it suffers from various defects; first
30 gcov runtime is not weighting individual basic block by estimated execution
31 time and second the merging of multiple runs makes assumption that the
32 histogram distribution did not change. Consequentely histogram constructed
33 here may be more precise.
35 The information is used to set hot/cold thresholds.
36 - Next speculative indirect call resolution is performed: the local
37 profile pass assigns profile-id to each function and provide us with a
38 histogram specifying the most common target. We look up the callgraph
39 node corresponding to the target and produce a speculative call.
41 This call may or may not survive through IPA optimization based on decision
43 - Finally we propagate the following flags: unlikely executed, executed
44 once, executed at startup and executed at exit. These flags are used to
45 control code size/performance threshold and code placement (by producing
46 .text.unlikely/.text.hot/.text.startup/.text.exit subsections). */
49 #include "coretypes.h"
54 #include "alloc-pool.h"
55 #include "tree-pass.h"
57 #include "data-streamer.h"
58 #include "gimple-iterator.h"
59 #include "ipa-utils.h"
62 #include "value-prof.h"
63 #include "tree-inline.h"
64 #include "symbol-summary.h"
67 #include "ipa-inline.h"
69 /* Entry in the histogram. */
71 struct histogram_entry
78 /* Histogram of profile values.
79 The histogram is represented as an ordered vector of entries allocated via
80 histogram_pool. During construction a separate hashtable is kept to lookup
83 vec
<histogram_entry
*> histogram
;
84 static object_allocator
<histogram_entry
> histogram_pool ("IPA histogram");
86 /* Hashtable support for storing SSA names hashed by their SSA_NAME_VAR. */
88 struct histogram_hash
: nofree_ptr_hash
<histogram_entry
>
90 static inline hashval_t
hash (const histogram_entry
*);
91 static inline int equal (const histogram_entry
*, const histogram_entry
*);
95 histogram_hash::hash (const histogram_entry
*val
)
101 histogram_hash::equal (const histogram_entry
*val
, const histogram_entry
*val2
)
103 return val
->count
== val2
->count
;
106 /* Account TIME and SIZE executed COUNT times into HISTOGRAM.
107 HASHTABLE is the on-side hash kept to avoid duplicates. */
110 account_time_size (hash_table
<histogram_hash
> *hashtable
,
111 vec
<histogram_entry
*> &histogram
,
112 gcov_type count
, int time
, int size
)
114 histogram_entry key
= {count
, 0, 0};
115 histogram_entry
**val
= hashtable
->find_slot (&key
, INSERT
);
119 *val
= histogram_pool
.allocate ();
121 histogram
.safe_push (*val
);
123 (*val
)->time
+= time
;
124 (*val
)->size
+= size
;
128 cmp_counts (const void *v1
, const void *v2
)
130 const histogram_entry
*h1
= *(const histogram_entry
* const *)v1
;
131 const histogram_entry
*h2
= *(const histogram_entry
* const *)v2
;
132 if (h1
->count
< h2
->count
)
134 if (h1
->count
> h2
->count
)
139 /* Dump HISTOGRAM to FILE. */
142 dump_histogram (FILE *file
, vec
<histogram_entry
*> histogram
)
145 gcov_type overall_time
= 0, cumulated_time
= 0, cumulated_size
= 0, overall_size
= 0;
147 fprintf (dump_file
, "Histogram:\n");
148 for (i
= 0; i
< histogram
.length (); i
++)
150 overall_time
+= histogram
[i
]->count
* histogram
[i
]->time
;
151 overall_size
+= histogram
[i
]->size
;
157 for (i
= 0; i
< histogram
.length (); i
++)
159 cumulated_time
+= histogram
[i
]->count
* histogram
[i
]->time
;
160 cumulated_size
+= histogram
[i
]->size
;
161 fprintf (file
, " %" PRId64
": time:%i (%2.2f) size:%i (%2.2f)\n",
162 (int64_t) histogram
[i
]->count
,
164 cumulated_time
* 100.0 / overall_time
,
166 cumulated_size
* 100.0 / overall_size
);
170 /* Collect histogram from CFG profiles. */
173 ipa_profile_generate_summary (void)
175 struct cgraph_node
*node
;
176 gimple_stmt_iterator gsi
;
179 hash_table
<histogram_hash
> hashtable (10);
181 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node
)
182 FOR_EACH_BB_FN (bb
, DECL_STRUCT_FUNCTION (node
->decl
))
186 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
188 gimple
*stmt
= gsi_stmt (gsi
);
189 if (gimple_code (stmt
) == GIMPLE_CALL
190 && !gimple_call_fndecl (stmt
))
193 h
= gimple_histogram_value_of_type
194 (DECL_STRUCT_FUNCTION (node
->decl
),
195 stmt
, HIST_TYPE_INDIR_CALL
);
196 /* No need to do sanity check: gimple_ic_transform already
197 takes away bad histograms. */
200 /* counter 0 is target, counter 1 is number of execution we called target,
201 counter 2 is total number of executions. */
202 if (h
->hvalue
.counters
[2])
204 struct cgraph_edge
* e
= node
->get_edge (stmt
);
205 if (e
&& !e
->indirect_unknown_callee
)
207 e
->indirect_info
->common_target_id
208 = h
->hvalue
.counters
[0];
209 e
->indirect_info
->common_target_probability
210 = GCOV_COMPUTE_SCALE (h
->hvalue
.counters
[1], h
->hvalue
.counters
[2]);
211 if (e
->indirect_info
->common_target_probability
> REG_BR_PROB_BASE
)
214 fprintf (dump_file
, "Probability capped to 1\n");
215 e
->indirect_info
->common_target_probability
= REG_BR_PROB_BASE
;
218 gimple_remove_histogram_value (DECL_STRUCT_FUNCTION (node
->decl
),
222 time
+= estimate_num_insns (stmt
, &eni_time_weights
);
223 size
+= estimate_num_insns (stmt
, &eni_size_weights
);
225 account_time_size (&hashtable
, histogram
, bb
->count
, time
, size
);
227 histogram
.qsort (cmp_counts
);
230 /* Serialize the ipa info for lto. */
233 ipa_profile_write_summary (void)
235 struct lto_simple_output_block
*ob
236 = lto_create_simple_output_block (LTO_section_ipa_profile
);
239 streamer_write_uhwi_stream (ob
->main_stream
, histogram
.length ());
240 for (i
= 0; i
< histogram
.length (); i
++)
242 streamer_write_gcov_count_stream (ob
->main_stream
, histogram
[i
]->count
);
243 streamer_write_uhwi_stream (ob
->main_stream
, histogram
[i
]->time
);
244 streamer_write_uhwi_stream (ob
->main_stream
, histogram
[i
]->size
);
246 lto_destroy_simple_output_block (ob
);
249 /* Deserialize the ipa info for lto. */
252 ipa_profile_read_summary (void)
254 struct lto_file_decl_data
** file_data_vec
255 = lto_get_file_decl_data ();
256 struct lto_file_decl_data
* file_data
;
259 hash_table
<histogram_hash
> hashtable (10);
261 while ((file_data
= file_data_vec
[j
++]))
265 struct lto_input_block
*ib
266 = lto_create_simple_input_block (file_data
,
267 LTO_section_ipa_profile
,
271 unsigned int num
= streamer_read_uhwi (ib
);
273 for (n
= 0; n
< num
; n
++)
275 gcov_type count
= streamer_read_gcov_count (ib
);
276 int time
= streamer_read_uhwi (ib
);
277 int size
= streamer_read_uhwi (ib
);
278 account_time_size (&hashtable
, histogram
,
281 lto_destroy_simple_input_block (file_data
,
282 LTO_section_ipa_profile
,
286 histogram
.qsort (cmp_counts
);
289 /* Data used by ipa_propagate_frequency. */
291 struct ipa_propagate_frequency_data
293 cgraph_node
*function_symbol
;
294 bool maybe_unlikely_executed
;
295 bool maybe_executed_once
;
296 bool only_called_at_startup
;
297 bool only_called_at_exit
;
300 /* Worker for ipa_propagate_frequency_1. */
303 ipa_propagate_frequency_1 (struct cgraph_node
*node
, void *data
)
305 struct ipa_propagate_frequency_data
*d
;
306 struct cgraph_edge
*edge
;
308 d
= (struct ipa_propagate_frequency_data
*)data
;
309 for (edge
= node
->callers
;
310 edge
&& (d
->maybe_unlikely_executed
|| d
->maybe_executed_once
311 || d
->only_called_at_startup
|| d
->only_called_at_exit
);
312 edge
= edge
->next_caller
)
314 if (edge
->caller
!= d
->function_symbol
)
316 d
->only_called_at_startup
&= edge
->caller
->only_called_at_startup
;
317 /* It makes sense to put main() together with the static constructors.
318 It will be executed for sure, but rest of functions called from
319 main are definitely not at startup only. */
320 if (MAIN_NAME_P (DECL_NAME (edge
->caller
->decl
)))
321 d
->only_called_at_startup
= 0;
322 d
->only_called_at_exit
&= edge
->caller
->only_called_at_exit
;
325 /* When profile feedback is available, do not try to propagate too hard;
326 counts are already good guide on function frequencies and roundoff
327 errors can make us to push function into unlikely section even when
328 it is executed by the train run. Transfer the function only if all
329 callers are unlikely executed. */
331 && opt_for_fn (d
->function_symbol
->decl
, flag_branch_probabilities
)
332 /* Thunks are not profiled. This is more or less implementation
334 && !d
->function_symbol
->thunk
.thunk_p
335 && (edge
->caller
->frequency
!= NODE_FREQUENCY_UNLIKELY_EXECUTED
336 || (edge
->caller
->global
.inlined_to
337 && edge
->caller
->global
.inlined_to
->frequency
338 != NODE_FREQUENCY_UNLIKELY_EXECUTED
)))
339 d
->maybe_unlikely_executed
= false;
340 if (!edge
->frequency
)
342 switch (edge
->caller
->frequency
)
344 case NODE_FREQUENCY_UNLIKELY_EXECUTED
:
346 case NODE_FREQUENCY_EXECUTED_ONCE
:
347 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
348 fprintf (dump_file
, " Called by %s that is executed once\n",
349 edge
->caller
->name ());
350 d
->maybe_unlikely_executed
= false;
351 if (inline_edge_summary (edge
)->loop_depth
)
353 d
->maybe_executed_once
= false;
354 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
355 fprintf (dump_file
, " Called in loop\n");
358 case NODE_FREQUENCY_HOT
:
359 case NODE_FREQUENCY_NORMAL
:
360 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
361 fprintf (dump_file
, " Called by %s that is normal or hot\n",
362 edge
->caller
->name ());
363 d
->maybe_unlikely_executed
= false;
364 d
->maybe_executed_once
= false;
371 /* Return ture if NODE contains hot calls. */
374 contains_hot_call_p (struct cgraph_node
*node
)
376 struct cgraph_edge
*e
;
377 for (e
= node
->callees
; e
; e
= e
->next_callee
)
378 if (e
->maybe_hot_p ())
380 else if (!e
->inline_failed
381 && contains_hot_call_p (e
->callee
))
383 for (e
= node
->indirect_calls
; e
; e
= e
->next_callee
)
384 if (e
->maybe_hot_p ())
389 /* See if the frequency of NODE can be updated based on frequencies of its
392 ipa_propagate_frequency (struct cgraph_node
*node
)
394 struct ipa_propagate_frequency_data d
= {node
, true, true, true, true};
395 bool changed
= false;
397 /* We can not propagate anything useful about externally visible functions
398 nor about virtuals. */
399 if (!node
->local
.local
401 || (opt_for_fn (node
->decl
, flag_devirtualize
)
402 && DECL_VIRTUAL_P (node
->decl
)))
404 gcc_assert (node
->analyzed
);
405 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
406 fprintf (dump_file
, "Processing frequency %s\n", node
->name ());
408 node
->call_for_symbol_and_aliases (ipa_propagate_frequency_1
, &d
,
411 if ((d
.only_called_at_startup
&& !d
.only_called_at_exit
)
412 && !node
->only_called_at_startup
)
414 node
->only_called_at_startup
= true;
416 fprintf (dump_file
, "Node %s promoted to only called at startup.\n",
420 if ((d
.only_called_at_exit
&& !d
.only_called_at_startup
)
421 && !node
->only_called_at_exit
)
423 node
->only_called_at_exit
= true;
425 fprintf (dump_file
, "Node %s promoted to only called at exit.\n",
430 /* With profile we can decide on hot/normal based on count. */
434 if (node
->count
>= get_hot_bb_threshold ())
437 hot
|= contains_hot_call_p (node
);
440 if (node
->frequency
!= NODE_FREQUENCY_HOT
)
443 fprintf (dump_file
, "Node %s promoted to hot.\n",
445 node
->frequency
= NODE_FREQUENCY_HOT
;
450 else if (node
->frequency
== NODE_FREQUENCY_HOT
)
453 fprintf (dump_file
, "Node %s reduced to normal.\n",
455 node
->frequency
= NODE_FREQUENCY_NORMAL
;
459 /* These come either from profile or user hints; never update them. */
460 if (node
->frequency
== NODE_FREQUENCY_HOT
461 || node
->frequency
== NODE_FREQUENCY_UNLIKELY_EXECUTED
)
463 if (d
.maybe_unlikely_executed
)
465 node
->frequency
= NODE_FREQUENCY_UNLIKELY_EXECUTED
;
467 fprintf (dump_file
, "Node %s promoted to unlikely executed.\n",
471 else if (d
.maybe_executed_once
&& node
->frequency
!= NODE_FREQUENCY_EXECUTED_ONCE
)
473 node
->frequency
= NODE_FREQUENCY_EXECUTED_ONCE
;
475 fprintf (dump_file
, "Node %s promoted to executed once.\n",
482 /* Simple ipa profile pass propagating frequencies across the callgraph. */
487 struct cgraph_node
**order
;
488 struct cgraph_edge
*e
;
490 bool something_changed
= false;
492 gcov_type overall_time
= 0, cutoff
= 0, cumulated
= 0, overall_size
= 0;
493 struct cgraph_node
*n
,*n2
;
494 int nindirect
= 0, ncommon
= 0, nunknown
= 0, nuseless
= 0, nconverted
= 0;
495 int nmismatch
= 0, nimpossible
= 0;
496 bool node_map_initialized
= false;
499 dump_histogram (dump_file
, histogram
);
500 for (i
= 0; i
< (int)histogram
.length (); i
++)
502 overall_time
+= histogram
[i
]->count
* histogram
[i
]->time
;
503 overall_size
+= histogram
[i
]->size
;
509 gcc_assert (overall_size
);
512 gcov_type min
, cumulated_time
= 0, cumulated_size
= 0;
514 fprintf (dump_file
, "Overall time: %" PRId64
"\n",
515 (int64_t)overall_time
);
516 min
= get_hot_bb_threshold ();
517 for (i
= 0; i
< (int)histogram
.length () && histogram
[i
]->count
>= min
;
520 cumulated_time
+= histogram
[i
]->count
* histogram
[i
]->time
;
521 cumulated_size
+= histogram
[i
]->size
;
523 fprintf (dump_file
, "GCOV min count: %" PRId64
524 " Time:%3.2f%% Size:%3.2f%%\n",
526 cumulated_time
* 100.0 / overall_time
,
527 cumulated_size
* 100.0 / overall_size
);
529 cutoff
= (overall_time
* PARAM_VALUE (HOT_BB_COUNT_WS_PERMILLE
) + 500) / 1000;
531 for (i
= 0; cumulated
< cutoff
; i
++)
533 cumulated
+= histogram
[i
]->count
* histogram
[i
]->time
;
534 threshold
= histogram
[i
]->count
;
540 gcov_type cumulated_time
= 0, cumulated_size
= 0;
543 i
< (int)histogram
.length () && histogram
[i
]->count
>= threshold
;
546 cumulated_time
+= histogram
[i
]->count
* histogram
[i
]->time
;
547 cumulated_size
+= histogram
[i
]->size
;
549 fprintf (dump_file
, "Determined min count: %" PRId64
550 " Time:%3.2f%% Size:%3.2f%%\n",
552 cumulated_time
* 100.0 / overall_time
,
553 cumulated_size
* 100.0 / overall_size
);
555 if (threshold
> get_hot_bb_threshold ()
559 fprintf (dump_file
, "Threshold updated.\n");
560 set_hot_bb_threshold (threshold
);
563 histogram
.release ();
564 histogram_pool
.release ();
566 /* Produce speculative calls: we saved common traget from porfiling into
567 e->common_target_id. Now, at link time, we can look up corresponding
568 function node and produce speculative call. */
570 FOR_EACH_DEFINED_FUNCTION (n
)
574 if (!opt_for_fn (n
->decl
, flag_ipa_profile
))
577 for (e
= n
->indirect_calls
; e
; e
= e
->next_callee
)
581 if (e
->indirect_info
->common_target_id
)
583 if (!node_map_initialized
)
584 init_node_map (false);
585 node_map_initialized
= true;
587 n2
= find_func_by_profile_id (e
->indirect_info
->common_target_id
);
592 fprintf (dump_file
, "Indirect call -> direct call from"
593 " other module %s/%i => %s/%i, prob %3.2f\n",
594 xstrdup_for_dump (n
->name ()), n
->order
,
595 xstrdup_for_dump (n2
->name ()), n2
->order
,
596 e
->indirect_info
->common_target_probability
597 / (float)REG_BR_PROB_BASE
);
599 if (e
->indirect_info
->common_target_probability
600 < REG_BR_PROB_BASE
/ 2)
605 "Not speculating: probability is too low.\n");
607 else if (!e
->maybe_hot_p ())
612 "Not speculating: call is cold.\n");
614 else if (n2
->get_availability () <= AVAIL_INTERPOSABLE
615 && n2
->can_be_discarded_p ())
620 "Not speculating: target is overwritable "
621 "and can be discarded.\n");
623 else if (ipa_node_params_sum
&& ipa_edge_args_vector
624 && (!vec_safe_is_empty
625 (IPA_NODE_REF (n2
)->descriptors
))
626 && ipa_get_param_count (IPA_NODE_REF (n2
))
627 != ipa_get_cs_argument_count (IPA_EDGE_REF (e
))
628 && (ipa_get_param_count (IPA_NODE_REF (n2
))
629 >= ipa_get_cs_argument_count (IPA_EDGE_REF (e
))
630 || !stdarg_p (TREE_TYPE (n2
->decl
))))
636 "parameter count mistmatch\n");
638 else if (e
->indirect_info
->polymorphic
639 && !opt_for_fn (n
->decl
, flag_devirtualize
)
640 && !possible_polymorphic_call_target_p (e
, n2
))
646 "function is not in the polymorphic "
647 "call target list\n");
651 /* Target may be overwritable, but profile says that
652 control flow goes to this particular implementation
653 of N2. Speculate on the local alias to allow inlining.
655 if (!n2
->can_be_discarded_p ())
658 alias
= dyn_cast
<cgraph_node
*> (n2
->noninterposable_alias ());
665 apply_scale (e
->count
,
666 e
->indirect_info
->common_target_probability
),
667 apply_scale (e
->frequency
,
668 e
->indirect_info
->common_target_probability
));
675 fprintf (dump_file
, "Function with profile-id %i not found.\n",
676 e
->indirect_info
->common_target_id
);
682 inline_update_overall_summary (n
);
684 if (node_map_initialized
)
686 if (dump_file
&& nindirect
)
688 "%i indirect calls trained.\n"
689 "%i (%3.2f%%) have common target.\n"
690 "%i (%3.2f%%) targets was not found.\n"
691 "%i (%3.2f%%) targets had parameter count mismatch.\n"
692 "%i (%3.2f%%) targets was not in polymorphic call target list.\n"
693 "%i (%3.2f%%) speculations seems useless.\n"
694 "%i (%3.2f%%) speculations produced.\n",
696 ncommon
, ncommon
* 100.0 / nindirect
,
697 nunknown
, nunknown
* 100.0 / nindirect
,
698 nmismatch
, nmismatch
* 100.0 / nindirect
,
699 nimpossible
, nimpossible
* 100.0 / nindirect
,
700 nuseless
, nuseless
* 100.0 / nindirect
,
701 nconverted
, nconverted
* 100.0 / nindirect
);
703 order
= XCNEWVEC (struct cgraph_node
*, symtab
->cgraph_count
);
704 order_pos
= ipa_reverse_postorder (order
);
705 for (i
= order_pos
- 1; i
>= 0; i
--)
707 if (order
[i
]->local
.local
708 && opt_for_fn (order
[i
]->decl
, flag_ipa_profile
)
709 && ipa_propagate_frequency (order
[i
]))
711 for (e
= order
[i
]->callees
; e
; e
= e
->next_callee
)
712 if (e
->callee
->local
.local
&& !e
->callee
->aux
)
714 something_changed
= true;
715 e
->callee
->aux
= (void *)1;
718 order
[i
]->aux
= NULL
;
721 while (something_changed
)
723 something_changed
= false;
724 for (i
= order_pos
- 1; i
>= 0; i
--)
727 && opt_for_fn (order
[i
]->decl
, flag_ipa_profile
)
728 && ipa_propagate_frequency (order
[i
]))
730 for (e
= order
[i
]->callees
; e
; e
= e
->next_callee
)
731 if (e
->callee
->local
.local
&& !e
->callee
->aux
)
733 something_changed
= true;
734 e
->callee
->aux
= (void *)1;
737 order
[i
]->aux
= NULL
;
746 const pass_data pass_data_ipa_profile
=
749 "profile_estimate", /* name */
750 OPTGROUP_NONE
, /* optinfo_flags */
751 TV_IPA_PROFILE
, /* tv_id */
752 0, /* properties_required */
753 0, /* properties_provided */
754 0, /* properties_destroyed */
755 0, /* todo_flags_start */
756 0, /* todo_flags_finish */
759 class pass_ipa_profile
: public ipa_opt_pass_d
762 pass_ipa_profile (gcc::context
*ctxt
)
763 : ipa_opt_pass_d (pass_data_ipa_profile
, ctxt
,
764 ipa_profile_generate_summary
, /* generate_summary */
765 ipa_profile_write_summary
, /* write_summary */
766 ipa_profile_read_summary
, /* read_summary */
767 NULL
, /* write_optimization_summary */
768 NULL
, /* read_optimization_summary */
769 NULL
, /* stmt_fixup */
770 0, /* function_transform_todo_flags_start */
771 NULL
, /* function_transform */
772 NULL
) /* variable_transform */
775 /* opt_pass methods: */
776 virtual bool gate (function
*) { return flag_ipa_profile
|| in_lto_p
; }
777 virtual unsigned int execute (function
*) { return ipa_profile (); }
779 }; // class pass_ipa_profile
784 make_pass_ipa_profile (gcc::context
*ctxt
)
786 return new pass_ipa_profile (ctxt
);