PR target/60104
[official-gcc.git] / gcc / predict.c
blob0215e9170dbac6a885a08f5ffc31072405b96ed7
1 /* Branch prediction routines for the GNU compiler.
2 Copyright (C) 2000-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* References:
22 [1] "Branch Prediction for Free"
23 Ball and Larus; PLDI '93.
24 [2] "Static Branch Frequency and Program Profile Analysis"
25 Wu and Larus; MICRO-27.
26 [3] "Corpus-based Static Branch Prediction"
27 Calder, Grunwald, Lindsay, Martin, Mozer, and Zorn; PLDI '95. */
30 #include "config.h"
31 #include "system.h"
32 #include "coretypes.h"
33 #include "tm.h"
34 #include "tree.h"
35 #include "calls.h"
36 #include "rtl.h"
37 #include "tm_p.h"
38 #include "hard-reg-set.h"
39 #include "predict.h"
40 #include "vec.h"
41 #include "hashtab.h"
42 #include "hash-set.h"
43 #include "machmode.h"
44 #include "input.h"
45 #include "function.h"
46 #include "dominance.h"
47 #include "cfg.h"
48 #include "cfganal.h"
49 #include "basic-block.h"
50 #include "insn-config.h"
51 #include "regs.h"
52 #include "flags.h"
53 #include "profile.h"
54 #include "except.h"
55 #include "diagnostic-core.h"
56 #include "recog.h"
57 #include "expr.h"
58 #include "coverage.h"
59 #include "sreal.h"
60 #include "params.h"
61 #include "target.h"
62 #include "cfgloop.h"
63 #include "hash-map.h"
64 #include "tree-ssa-alias.h"
65 #include "internal-fn.h"
66 #include "gimple-expr.h"
67 #include "is-a.h"
68 #include "gimple.h"
69 #include "gimple-iterator.h"
70 #include "gimple-ssa.h"
71 #include "plugin-api.h"
72 #include "ipa-ref.h"
73 #include "cgraph.h"
74 #include "tree-cfg.h"
75 #include "tree-phinodes.h"
76 #include "ssa-iterators.h"
77 #include "tree-ssa-loop-niter.h"
78 #include "tree-ssa-loop.h"
79 #include "tree-pass.h"
80 #include "tree-scalar-evolution.h"
81 #include "cfgloop.h"
83 /* real constants: 0, 1, 1-1/REG_BR_PROB_BASE, REG_BR_PROB_BASE,
84 1/REG_BR_PROB_BASE, 0.5, BB_FREQ_MAX. */
85 static sreal real_zero, real_one, real_almost_one, real_br_prob_base,
86 real_inv_br_prob_base, real_one_half, real_bb_freq_max;
88 static void combine_predictions_for_insn (rtx_insn *, basic_block);
89 static void dump_prediction (FILE *, enum br_predictor, int, basic_block, int);
90 static void predict_paths_leading_to (basic_block, enum br_predictor, enum prediction);
91 static void predict_paths_leading_to_edge (edge, enum br_predictor, enum prediction);
92 static bool can_predict_insn_p (const rtx_insn *);
94 /* Information we hold about each branch predictor.
95 Filled using information from predict.def. */
97 struct predictor_info
99 const char *const name; /* Name used in the debugging dumps. */
100 const int hitrate; /* Expected hitrate used by
101 predict_insn_def call. */
102 const int flags;
105 /* Use given predictor without Dempster-Shaffer theory if it matches
106 using first_match heuristics. */
107 #define PRED_FLAG_FIRST_MATCH 1
109 /* Recompute hitrate in percent to our representation. */
111 #define HITRATE(VAL) ((int) ((VAL) * REG_BR_PROB_BASE + 50) / 100)
113 #define DEF_PREDICTOR(ENUM, NAME, HITRATE, FLAGS) {NAME, HITRATE, FLAGS},
114 static const struct predictor_info predictor_info[]= {
115 #include "predict.def"
117 /* Upper bound on predictors. */
118 {NULL, 0, 0}
120 #undef DEF_PREDICTOR
122 /* Return TRUE if frequency FREQ is considered to be hot. */
124 static inline bool
125 maybe_hot_frequency_p (struct function *fun, int freq)
127 struct cgraph_node *node = cgraph_node::get (fun->decl);
128 if (!profile_info || !flag_branch_probabilities)
130 if (node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED)
131 return false;
132 if (node->frequency == NODE_FREQUENCY_HOT)
133 return true;
135 if (profile_status_for_fn (fun) == PROFILE_ABSENT)
136 return true;
137 if (node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
138 && freq < (ENTRY_BLOCK_PTR_FOR_FN (fun)->frequency * 2 / 3))
139 return false;
140 if (PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION) == 0)
141 return false;
142 if (freq < (ENTRY_BLOCK_PTR_FOR_FN (fun)->frequency
143 / PARAM_VALUE (HOT_BB_FREQUENCY_FRACTION)))
144 return false;
145 return true;
148 static gcov_type min_count = -1;
150 /* Determine the threshold for hot BB counts. */
152 gcov_type
153 get_hot_bb_threshold ()
155 gcov_working_set_t *ws;
156 if (min_count == -1)
158 ws = find_working_set (PARAM_VALUE (HOT_BB_COUNT_WS_PERMILLE));
159 gcc_assert (ws);
160 min_count = ws->min_counter;
162 return min_count;
165 /* Set the threshold for hot BB counts. */
167 void
168 set_hot_bb_threshold (gcov_type min)
170 min_count = min;
173 /* Return TRUE if frequency FREQ is considered to be hot. */
175 bool
176 maybe_hot_count_p (struct function *fun, gcov_type count)
178 if (fun && profile_status_for_fn (fun) != PROFILE_READ)
179 return true;
180 /* Code executed at most once is not hot. */
181 if (profile_info->runs >= count)
182 return false;
183 return (count >= get_hot_bb_threshold ());
186 /* Return true in case BB can be CPU intensive and should be optimized
187 for maximal performance. */
189 bool
190 maybe_hot_bb_p (struct function *fun, const_basic_block bb)
192 gcc_checking_assert (fun);
193 if (profile_status_for_fn (fun) == PROFILE_READ)
194 return maybe_hot_count_p (fun, bb->count);
195 return maybe_hot_frequency_p (fun, bb->frequency);
198 /* Return true in case BB can be CPU intensive and should be optimized
199 for maximal performance. */
201 bool
202 maybe_hot_edge_p (edge e)
204 if (profile_status_for_fn (cfun) == PROFILE_READ)
205 return maybe_hot_count_p (cfun, e->count);
206 return maybe_hot_frequency_p (cfun, EDGE_FREQUENCY (e));
209 /* Return true if profile COUNT and FREQUENCY, or function FUN static
210 node frequency reflects never being executed. */
212 static bool
213 probably_never_executed (struct function *fun,
214 gcov_type count, int frequency)
216 gcc_checking_assert (fun);
217 if (profile_status_for_fn (cfun) == PROFILE_READ)
219 int unlikely_count_fraction = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
220 if (count * unlikely_count_fraction >= profile_info->runs)
221 return false;
222 if (!frequency)
223 return true;
224 if (!ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency)
225 return false;
226 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count)
228 gcov_type computed_count;
229 /* Check for possibility of overflow, in which case entry bb count
230 is large enough to do the division first without losing much
231 precision. */
232 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count < REG_BR_PROB_BASE *
233 REG_BR_PROB_BASE)
235 gcov_type scaled_count
236 = frequency * ENTRY_BLOCK_PTR_FOR_FN (cfun)->count *
237 unlikely_count_fraction;
238 computed_count = RDIV (scaled_count,
239 ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency);
241 else
243 computed_count = RDIV (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count,
244 ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency);
245 computed_count *= frequency * unlikely_count_fraction;
247 if (computed_count >= profile_info->runs)
248 return false;
250 return true;
252 if ((!profile_info || !flag_branch_probabilities)
253 && (cgraph_node::get (fun->decl)->frequency
254 == NODE_FREQUENCY_UNLIKELY_EXECUTED))
255 return true;
256 return false;
260 /* Return true in case BB is probably never executed. */
262 bool
263 probably_never_executed_bb_p (struct function *fun, const_basic_block bb)
265 return probably_never_executed (fun, bb->count, bb->frequency);
269 /* Return true in case edge E is probably never executed. */
271 bool
272 probably_never_executed_edge_p (struct function *fun, edge e)
274 return probably_never_executed (fun, e->count, EDGE_FREQUENCY (e));
277 /* Return true when current function should always be optimized for size. */
279 bool
280 optimize_function_for_size_p (struct function *fun)
282 if (optimize_size)
283 return true;
284 if (!fun || !fun->decl)
285 return false;
287 cgraph_node *n = cgraph_node::get (fun->decl);
288 return n && n->optimize_for_size_p ();
291 /* Return true when current function should always be optimized for speed. */
293 bool
294 optimize_function_for_speed_p (struct function *fun)
296 return !optimize_function_for_size_p (fun);
299 /* Return TRUE when BB should be optimized for size. */
301 bool
302 optimize_bb_for_size_p (const_basic_block bb)
304 return (optimize_function_for_size_p (cfun)
305 || (bb && !maybe_hot_bb_p (cfun, bb)));
308 /* Return TRUE when BB should be optimized for speed. */
310 bool
311 optimize_bb_for_speed_p (const_basic_block bb)
313 return !optimize_bb_for_size_p (bb);
316 /* Return TRUE when BB should be optimized for size. */
318 bool
319 optimize_edge_for_size_p (edge e)
321 return optimize_function_for_size_p (cfun) || !maybe_hot_edge_p (e);
324 /* Return TRUE when BB should be optimized for speed. */
326 bool
327 optimize_edge_for_speed_p (edge e)
329 return !optimize_edge_for_size_p (e);
332 /* Return TRUE when BB should be optimized for size. */
334 bool
335 optimize_insn_for_size_p (void)
337 return optimize_function_for_size_p (cfun) || !crtl->maybe_hot_insn_p;
340 /* Return TRUE when BB should be optimized for speed. */
342 bool
343 optimize_insn_for_speed_p (void)
345 return !optimize_insn_for_size_p ();
348 /* Return TRUE when LOOP should be optimized for size. */
350 bool
351 optimize_loop_for_size_p (struct loop *loop)
353 return optimize_bb_for_size_p (loop->header);
356 /* Return TRUE when LOOP should be optimized for speed. */
358 bool
359 optimize_loop_for_speed_p (struct loop *loop)
361 return optimize_bb_for_speed_p (loop->header);
364 /* Return TRUE when LOOP nest should be optimized for speed. */
366 bool
367 optimize_loop_nest_for_speed_p (struct loop *loop)
369 struct loop *l = loop;
370 if (optimize_loop_for_speed_p (loop))
371 return true;
372 l = loop->inner;
373 while (l && l != loop)
375 if (optimize_loop_for_speed_p (l))
376 return true;
377 if (l->inner)
378 l = l->inner;
379 else if (l->next)
380 l = l->next;
381 else
383 while (l != loop && !l->next)
384 l = loop_outer (l);
385 if (l != loop)
386 l = l->next;
389 return false;
392 /* Return TRUE when LOOP nest should be optimized for size. */
394 bool
395 optimize_loop_nest_for_size_p (struct loop *loop)
397 return !optimize_loop_nest_for_speed_p (loop);
400 /* Return true when edge E is likely to be well predictable by branch
401 predictor. */
403 bool
404 predictable_edge_p (edge e)
406 if (profile_status_for_fn (cfun) == PROFILE_ABSENT)
407 return false;
408 if ((e->probability
409 <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100)
410 || (REG_BR_PROB_BASE - e->probability
411 <= PARAM_VALUE (PARAM_PREDICTABLE_BRANCH_OUTCOME) * REG_BR_PROB_BASE / 100))
412 return true;
413 return false;
417 /* Set RTL expansion for BB profile. */
419 void
420 rtl_profile_for_bb (basic_block bb)
422 crtl->maybe_hot_insn_p = maybe_hot_bb_p (cfun, bb);
425 /* Set RTL expansion for edge profile. */
427 void
428 rtl_profile_for_edge (edge e)
430 crtl->maybe_hot_insn_p = maybe_hot_edge_p (e);
433 /* Set RTL expansion to default mode (i.e. when profile info is not known). */
434 void
435 default_rtl_profile (void)
437 crtl->maybe_hot_insn_p = true;
440 /* Return true if the one of outgoing edges is already predicted by
441 PREDICTOR. */
443 bool
444 rtl_predicted_by_p (const_basic_block bb, enum br_predictor predictor)
446 rtx note;
447 if (!INSN_P (BB_END (bb)))
448 return false;
449 for (note = REG_NOTES (BB_END (bb)); note; note = XEXP (note, 1))
450 if (REG_NOTE_KIND (note) == REG_BR_PRED
451 && INTVAL (XEXP (XEXP (note, 0), 0)) == (int)predictor)
452 return true;
453 return false;
456 /* Structure representing predictions in tree level. */
458 struct edge_prediction {
459 struct edge_prediction *ep_next;
460 edge ep_edge;
461 enum br_predictor ep_predictor;
462 int ep_probability;
465 /* This map contains for a basic block the list of predictions for the
466 outgoing edges. */
468 static hash_map<const_basic_block, edge_prediction *> *bb_predictions;
470 /* Return true if the one of outgoing edges is already predicted by
471 PREDICTOR. */
473 bool
474 gimple_predicted_by_p (const_basic_block bb, enum br_predictor predictor)
476 struct edge_prediction *i;
477 edge_prediction **preds = bb_predictions->get (bb);
479 if (!preds)
480 return false;
482 for (i = *preds; i; i = i->ep_next)
483 if (i->ep_predictor == predictor)
484 return true;
485 return false;
488 /* Return true when the probability of edge is reliable.
490 The profile guessing code is good at predicting branch outcome (ie.
491 taken/not taken), that is predicted right slightly over 75% of time.
492 It is however notoriously poor on predicting the probability itself.
493 In general the profile appear a lot flatter (with probabilities closer
494 to 50%) than the reality so it is bad idea to use it to drive optimization
495 such as those disabling dynamic branch prediction for well predictable
496 branches.
498 There are two exceptions - edges leading to noreturn edges and edges
499 predicted by number of iterations heuristics are predicted well. This macro
500 should be able to distinguish those, but at the moment it simply check for
501 noreturn heuristic that is only one giving probability over 99% or bellow
502 1%. In future we might want to propagate reliability information across the
503 CFG if we find this information useful on multiple places. */
504 static bool
505 probability_reliable_p (int prob)
507 return (profile_status_for_fn (cfun) == PROFILE_READ
508 || (profile_status_for_fn (cfun) == PROFILE_GUESSED
509 && (prob <= HITRATE (1) || prob >= HITRATE (99))));
512 /* Same predicate as above, working on edges. */
513 bool
514 edge_probability_reliable_p (const_edge e)
516 return probability_reliable_p (e->probability);
519 /* Same predicate as edge_probability_reliable_p, working on notes. */
520 bool
521 br_prob_note_reliable_p (const_rtx note)
523 gcc_assert (REG_NOTE_KIND (note) == REG_BR_PROB);
524 return probability_reliable_p (XINT (note, 0));
527 static void
528 predict_insn (rtx_insn *insn, enum br_predictor predictor, int probability)
530 gcc_assert (any_condjump_p (insn));
531 if (!flag_guess_branch_prob)
532 return;
534 add_reg_note (insn, REG_BR_PRED,
535 gen_rtx_CONCAT (VOIDmode,
536 GEN_INT ((int) predictor),
537 GEN_INT ((int) probability)));
540 /* Predict insn by given predictor. */
542 void
543 predict_insn_def (rtx_insn *insn, enum br_predictor predictor,
544 enum prediction taken)
546 int probability = predictor_info[(int) predictor].hitrate;
548 if (taken != TAKEN)
549 probability = REG_BR_PROB_BASE - probability;
551 predict_insn (insn, predictor, probability);
554 /* Predict edge E with given probability if possible. */
556 void
557 rtl_predict_edge (edge e, enum br_predictor predictor, int probability)
559 rtx_insn *last_insn;
560 last_insn = BB_END (e->src);
562 /* We can store the branch prediction information only about
563 conditional jumps. */
564 if (!any_condjump_p (last_insn))
565 return;
567 /* We always store probability of branching. */
568 if (e->flags & EDGE_FALLTHRU)
569 probability = REG_BR_PROB_BASE - probability;
571 predict_insn (last_insn, predictor, probability);
574 /* Predict edge E with the given PROBABILITY. */
575 void
576 gimple_predict_edge (edge e, enum br_predictor predictor, int probability)
578 gcc_assert (profile_status_for_fn (cfun) != PROFILE_GUESSED);
579 if ((e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun) && EDGE_COUNT (e->src->succs) >
581 && flag_guess_branch_prob && optimize)
583 struct edge_prediction *i = XNEW (struct edge_prediction);
584 edge_prediction *&preds = bb_predictions->get_or_insert (e->src);
586 i->ep_next = preds;
587 preds = i;
588 i->ep_probability = probability;
589 i->ep_predictor = predictor;
590 i->ep_edge = e;
594 /* Remove all predictions on given basic block that are attached
595 to edge E. */
596 void
597 remove_predictions_associated_with_edge (edge e)
599 if (!bb_predictions)
600 return;
602 edge_prediction **preds = bb_predictions->get (e->src);
604 if (preds)
606 struct edge_prediction **prediction = preds;
607 struct edge_prediction *next;
609 while (*prediction)
611 if ((*prediction)->ep_edge == e)
613 next = (*prediction)->ep_next;
614 free (*prediction);
615 *prediction = next;
617 else
618 prediction = &((*prediction)->ep_next);
623 /* Clears the list of predictions stored for BB. */
625 static void
626 clear_bb_predictions (basic_block bb)
628 edge_prediction **preds = bb_predictions->get (bb);
629 struct edge_prediction *pred, *next;
631 if (!preds)
632 return;
634 for (pred = *preds; pred; pred = next)
636 next = pred->ep_next;
637 free (pred);
639 *preds = NULL;
642 /* Return true when we can store prediction on insn INSN.
643 At the moment we represent predictions only on conditional
644 jumps, not at computed jump or other complicated cases. */
645 static bool
646 can_predict_insn_p (const rtx_insn *insn)
648 return (JUMP_P (insn)
649 && any_condjump_p (insn)
650 && EDGE_COUNT (BLOCK_FOR_INSN (insn)->succs) >= 2);
653 /* Predict edge E by given predictor if possible. */
655 void
656 predict_edge_def (edge e, enum br_predictor predictor,
657 enum prediction taken)
659 int probability = predictor_info[(int) predictor].hitrate;
661 if (taken != TAKEN)
662 probability = REG_BR_PROB_BASE - probability;
664 predict_edge (e, predictor, probability);
667 /* Invert all branch predictions or probability notes in the INSN. This needs
668 to be done each time we invert the condition used by the jump. */
670 void
671 invert_br_probabilities (rtx insn)
673 rtx note;
675 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
676 if (REG_NOTE_KIND (note) == REG_BR_PROB)
677 XINT (note, 0) = REG_BR_PROB_BASE - XINT (note, 0);
678 else if (REG_NOTE_KIND (note) == REG_BR_PRED)
679 XEXP (XEXP (note, 0), 1)
680 = GEN_INT (REG_BR_PROB_BASE - INTVAL (XEXP (XEXP (note, 0), 1)));
683 /* Dump information about the branch prediction to the output file. */
685 static void
686 dump_prediction (FILE *file, enum br_predictor predictor, int probability,
687 basic_block bb, int used)
689 edge e;
690 edge_iterator ei;
692 if (!file)
693 return;
695 FOR_EACH_EDGE (e, ei, bb->succs)
696 if (! (e->flags & EDGE_FALLTHRU))
697 break;
699 fprintf (file, " %s heuristics%s: %.1f%%",
700 predictor_info[predictor].name,
701 used ? "" : " (ignored)", probability * 100.0 / REG_BR_PROB_BASE);
703 if (bb->count)
705 fprintf (file, " exec %"PRId64, bb->count);
706 if (e)
708 fprintf (file, " hit %"PRId64, e->count);
709 fprintf (file, " (%.1f%%)", e->count * 100.0 / bb->count);
713 fprintf (file, "\n");
716 /* We can not predict the probabilities of outgoing edges of bb. Set them
717 evenly and hope for the best. */
718 static void
719 set_even_probabilities (basic_block bb)
721 int nedges = 0;
722 edge e;
723 edge_iterator ei;
725 FOR_EACH_EDGE (e, ei, bb->succs)
726 if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
727 nedges ++;
728 FOR_EACH_EDGE (e, ei, bb->succs)
729 if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
730 e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges;
731 else
732 e->probability = 0;
735 /* Combine all REG_BR_PRED notes into single probability and attach REG_BR_PROB
736 note if not already present. Remove now useless REG_BR_PRED notes. */
738 static void
739 combine_predictions_for_insn (rtx_insn *insn, basic_block bb)
741 rtx prob_note;
742 rtx *pnote;
743 rtx note;
744 int best_probability = PROB_EVEN;
745 enum br_predictor best_predictor = END_PREDICTORS;
746 int combined_probability = REG_BR_PROB_BASE / 2;
747 int d;
748 bool first_match = false;
749 bool found = false;
751 if (!can_predict_insn_p (insn))
753 set_even_probabilities (bb);
754 return;
757 prob_note = find_reg_note (insn, REG_BR_PROB, 0);
758 pnote = &REG_NOTES (insn);
759 if (dump_file)
760 fprintf (dump_file, "Predictions for insn %i bb %i\n", INSN_UID (insn),
761 bb->index);
763 /* We implement "first match" heuristics and use probability guessed
764 by predictor with smallest index. */
765 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
766 if (REG_NOTE_KIND (note) == REG_BR_PRED)
768 enum br_predictor predictor = ((enum br_predictor)
769 INTVAL (XEXP (XEXP (note, 0), 0)));
770 int probability = INTVAL (XEXP (XEXP (note, 0), 1));
772 found = true;
773 if (best_predictor > predictor)
774 best_probability = probability, best_predictor = predictor;
776 d = (combined_probability * probability
777 + (REG_BR_PROB_BASE - combined_probability)
778 * (REG_BR_PROB_BASE - probability));
780 /* Use FP math to avoid overflows of 32bit integers. */
781 if (d == 0)
782 /* If one probability is 0% and one 100%, avoid division by zero. */
783 combined_probability = REG_BR_PROB_BASE / 2;
784 else
785 combined_probability = (((double) combined_probability) * probability
786 * REG_BR_PROB_BASE / d + 0.5);
789 /* Decide which heuristic to use. In case we didn't match anything,
790 use no_prediction heuristic, in case we did match, use either
791 first match or Dempster-Shaffer theory depending on the flags. */
793 if (predictor_info [best_predictor].flags & PRED_FLAG_FIRST_MATCH)
794 first_match = true;
796 if (!found)
797 dump_prediction (dump_file, PRED_NO_PREDICTION,
798 combined_probability, bb, true);
799 else
801 dump_prediction (dump_file, PRED_DS_THEORY, combined_probability,
802 bb, !first_match);
803 dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability,
804 bb, first_match);
807 if (first_match)
808 combined_probability = best_probability;
809 dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb, true);
811 while (*pnote)
813 if (REG_NOTE_KIND (*pnote) == REG_BR_PRED)
815 enum br_predictor predictor = ((enum br_predictor)
816 INTVAL (XEXP (XEXP (*pnote, 0), 0)));
817 int probability = INTVAL (XEXP (XEXP (*pnote, 0), 1));
819 dump_prediction (dump_file, predictor, probability, bb,
820 !first_match || best_predictor == predictor);
821 *pnote = XEXP (*pnote, 1);
823 else
824 pnote = &XEXP (*pnote, 1);
827 if (!prob_note)
829 add_int_reg_note (insn, REG_BR_PROB, combined_probability);
831 /* Save the prediction into CFG in case we are seeing non-degenerated
832 conditional jump. */
833 if (!single_succ_p (bb))
835 BRANCH_EDGE (bb)->probability = combined_probability;
836 FALLTHRU_EDGE (bb)->probability
837 = REG_BR_PROB_BASE - combined_probability;
840 else if (!single_succ_p (bb))
842 int prob = XINT (prob_note, 0);
844 BRANCH_EDGE (bb)->probability = prob;
845 FALLTHRU_EDGE (bb)->probability = REG_BR_PROB_BASE - prob;
847 else
848 single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
851 /* Combine predictions into single probability and store them into CFG.
852 Remove now useless prediction entries. */
854 static void
855 combine_predictions_for_bb (basic_block bb)
857 int best_probability = PROB_EVEN;
858 enum br_predictor best_predictor = END_PREDICTORS;
859 int combined_probability = REG_BR_PROB_BASE / 2;
860 int d;
861 bool first_match = false;
862 bool found = false;
863 struct edge_prediction *pred;
864 int nedges = 0;
865 edge e, first = NULL, second = NULL;
866 edge_iterator ei;
868 FOR_EACH_EDGE (e, ei, bb->succs)
869 if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
871 nedges ++;
872 if (first && !second)
873 second = e;
874 if (!first)
875 first = e;
878 /* When there is no successor or only one choice, prediction is easy.
880 We are lazy for now and predict only basic blocks with two outgoing
881 edges. It is possible to predict generic case too, but we have to
882 ignore first match heuristics and do more involved combining. Implement
883 this later. */
884 if (nedges != 2)
886 if (!bb->count)
887 set_even_probabilities (bb);
888 clear_bb_predictions (bb);
889 if (dump_file)
890 fprintf (dump_file, "%i edges in bb %i predicted to even probabilities\n",
891 nedges, bb->index);
892 return;
895 if (dump_file)
896 fprintf (dump_file, "Predictions for bb %i\n", bb->index);
898 edge_prediction **preds = bb_predictions->get (bb);
899 if (preds)
901 /* We implement "first match" heuristics and use probability guessed
902 by predictor with smallest index. */
903 for (pred = *preds; pred; pred = pred->ep_next)
905 enum br_predictor predictor = pred->ep_predictor;
906 int probability = pred->ep_probability;
908 if (pred->ep_edge != first)
909 probability = REG_BR_PROB_BASE - probability;
911 found = true;
912 /* First match heuristics would be widly confused if we predicted
913 both directions. */
914 if (best_predictor > predictor)
916 struct edge_prediction *pred2;
917 int prob = probability;
919 for (pred2 = (struct edge_prediction *) *preds;
920 pred2; pred2 = pred2->ep_next)
921 if (pred2 != pred && pred2->ep_predictor == pred->ep_predictor)
923 int probability2 = pred->ep_probability;
925 if (pred2->ep_edge != first)
926 probability2 = REG_BR_PROB_BASE - probability2;
928 if ((probability < REG_BR_PROB_BASE / 2) !=
929 (probability2 < REG_BR_PROB_BASE / 2))
930 break;
932 /* If the same predictor later gave better result, go for it! */
933 if ((probability >= REG_BR_PROB_BASE / 2 && (probability2 > probability))
934 || (probability <= REG_BR_PROB_BASE / 2 && (probability2 < probability)))
935 prob = probability2;
937 if (!pred2)
938 best_probability = prob, best_predictor = predictor;
941 d = (combined_probability * probability
942 + (REG_BR_PROB_BASE - combined_probability)
943 * (REG_BR_PROB_BASE - probability));
945 /* Use FP math to avoid overflows of 32bit integers. */
946 if (d == 0)
947 /* If one probability is 0% and one 100%, avoid division by zero. */
948 combined_probability = REG_BR_PROB_BASE / 2;
949 else
950 combined_probability = (((double) combined_probability)
951 * probability
952 * REG_BR_PROB_BASE / d + 0.5);
956 /* Decide which heuristic to use. In case we didn't match anything,
957 use no_prediction heuristic, in case we did match, use either
958 first match or Dempster-Shaffer theory depending on the flags. */
960 if (predictor_info [best_predictor].flags & PRED_FLAG_FIRST_MATCH)
961 first_match = true;
963 if (!found)
964 dump_prediction (dump_file, PRED_NO_PREDICTION, combined_probability, bb, true);
965 else
967 dump_prediction (dump_file, PRED_DS_THEORY, combined_probability, bb,
968 !first_match);
969 dump_prediction (dump_file, PRED_FIRST_MATCH, best_probability, bb,
970 first_match);
973 if (first_match)
974 combined_probability = best_probability;
975 dump_prediction (dump_file, PRED_COMBINED, combined_probability, bb, true);
977 if (preds)
979 for (pred = (struct edge_prediction *) *preds; pred; pred = pred->ep_next)
981 enum br_predictor predictor = pred->ep_predictor;
982 int probability = pred->ep_probability;
984 if (pred->ep_edge != EDGE_SUCC (bb, 0))
985 probability = REG_BR_PROB_BASE - probability;
986 dump_prediction (dump_file, predictor, probability, bb,
987 !first_match || best_predictor == predictor);
990 clear_bb_predictions (bb);
992 if (!bb->count)
994 first->probability = combined_probability;
995 second->probability = REG_BR_PROB_BASE - combined_probability;
999 /* Check if T1 and T2 satisfy the IV_COMPARE condition.
1000 Return the SSA_NAME if the condition satisfies, NULL otherwise.
1002 T1 and T2 should be one of the following cases:
1003 1. T1 is SSA_NAME, T2 is NULL
1004 2. T1 is SSA_NAME, T2 is INTEGER_CST between [-4, 4]
1005 3. T2 is SSA_NAME, T1 is INTEGER_CST between [-4, 4] */
1007 static tree
1008 strips_small_constant (tree t1, tree t2)
1010 tree ret = NULL;
1011 int value = 0;
1013 if (!t1)
1014 return NULL;
1015 else if (TREE_CODE (t1) == SSA_NAME)
1016 ret = t1;
1017 else if (tree_fits_shwi_p (t1))
1018 value = tree_to_shwi (t1);
1019 else
1020 return NULL;
1022 if (!t2)
1023 return ret;
1024 else if (tree_fits_shwi_p (t2))
1025 value = tree_to_shwi (t2);
1026 else if (TREE_CODE (t2) == SSA_NAME)
1028 if (ret)
1029 return NULL;
1030 else
1031 ret = t2;
1034 if (value <= 4 && value >= -4)
1035 return ret;
1036 else
1037 return NULL;
1040 /* Return the SSA_NAME in T or T's operands.
1041 Return NULL if SSA_NAME cannot be found. */
1043 static tree
1044 get_base_value (tree t)
1046 if (TREE_CODE (t) == SSA_NAME)
1047 return t;
1049 if (!BINARY_CLASS_P (t))
1050 return NULL;
1052 switch (TREE_OPERAND_LENGTH (t))
1054 case 1:
1055 return strips_small_constant (TREE_OPERAND (t, 0), NULL);
1056 case 2:
1057 return strips_small_constant (TREE_OPERAND (t, 0),
1058 TREE_OPERAND (t, 1));
1059 default:
1060 return NULL;
1064 /* Check the compare STMT in LOOP. If it compares an induction
1065 variable to a loop invariant, return true, and save
1066 LOOP_INVARIANT, COMPARE_CODE and LOOP_STEP.
1067 Otherwise return false and set LOOP_INVAIANT to NULL. */
1069 static bool
1070 is_comparison_with_loop_invariant_p (gimple stmt, struct loop *loop,
1071 tree *loop_invariant,
1072 enum tree_code *compare_code,
1073 tree *loop_step,
1074 tree *loop_iv_base)
1076 tree op0, op1, bound, base;
1077 affine_iv iv0, iv1;
1078 enum tree_code code;
1079 tree step;
1081 code = gimple_cond_code (stmt);
1082 *loop_invariant = NULL;
1084 switch (code)
1086 case GT_EXPR:
1087 case GE_EXPR:
1088 case NE_EXPR:
1089 case LT_EXPR:
1090 case LE_EXPR:
1091 case EQ_EXPR:
1092 break;
1094 default:
1095 return false;
1098 op0 = gimple_cond_lhs (stmt);
1099 op1 = gimple_cond_rhs (stmt);
1101 if ((TREE_CODE (op0) != SSA_NAME && TREE_CODE (op0) != INTEGER_CST)
1102 || (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op1) != INTEGER_CST))
1103 return false;
1104 if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, true))
1105 return false;
1106 if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, true))
1107 return false;
1108 if (TREE_CODE (iv0.step) != INTEGER_CST
1109 || TREE_CODE (iv1.step) != INTEGER_CST)
1110 return false;
1111 if ((integer_zerop (iv0.step) && integer_zerop (iv1.step))
1112 || (!integer_zerop (iv0.step) && !integer_zerop (iv1.step)))
1113 return false;
1115 if (integer_zerop (iv0.step))
1117 if (code != NE_EXPR && code != EQ_EXPR)
1118 code = invert_tree_comparison (code, false);
1119 bound = iv0.base;
1120 base = iv1.base;
1121 if (tree_fits_shwi_p (iv1.step))
1122 step = iv1.step;
1123 else
1124 return false;
1126 else
1128 bound = iv1.base;
1129 base = iv0.base;
1130 if (tree_fits_shwi_p (iv0.step))
1131 step = iv0.step;
1132 else
1133 return false;
1136 if (TREE_CODE (bound) != INTEGER_CST)
1137 bound = get_base_value (bound);
1138 if (!bound)
1139 return false;
1140 if (TREE_CODE (base) != INTEGER_CST)
1141 base = get_base_value (base);
1142 if (!base)
1143 return false;
1145 *loop_invariant = bound;
1146 *compare_code = code;
1147 *loop_step = step;
1148 *loop_iv_base = base;
1149 return true;
1152 /* Compare two SSA_NAMEs: returns TRUE if T1 and T2 are value coherent. */
1154 static bool
1155 expr_coherent_p (tree t1, tree t2)
1157 gimple stmt;
1158 tree ssa_name_1 = NULL;
1159 tree ssa_name_2 = NULL;
1161 gcc_assert (TREE_CODE (t1) == SSA_NAME || TREE_CODE (t1) == INTEGER_CST);
1162 gcc_assert (TREE_CODE (t2) == SSA_NAME || TREE_CODE (t2) == INTEGER_CST);
1164 if (t1 == t2)
1165 return true;
1167 if (TREE_CODE (t1) == INTEGER_CST && TREE_CODE (t2) == INTEGER_CST)
1168 return true;
1169 if (TREE_CODE (t1) == INTEGER_CST || TREE_CODE (t2) == INTEGER_CST)
1170 return false;
1172 /* Check to see if t1 is expressed/defined with t2. */
1173 stmt = SSA_NAME_DEF_STMT (t1);
1174 gcc_assert (stmt != NULL);
1175 if (is_gimple_assign (stmt))
1177 ssa_name_1 = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1178 if (ssa_name_1 && ssa_name_1 == t2)
1179 return true;
1182 /* Check to see if t2 is expressed/defined with t1. */
1183 stmt = SSA_NAME_DEF_STMT (t2);
1184 gcc_assert (stmt != NULL);
1185 if (is_gimple_assign (stmt))
1187 ssa_name_2 = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1188 if (ssa_name_2 && ssa_name_2 == t1)
1189 return true;
1192 /* Compare if t1 and t2's def_stmts are identical. */
1193 if (ssa_name_2 != NULL && ssa_name_1 == ssa_name_2)
1194 return true;
1195 else
1196 return false;
1199 /* Predict branch probability of BB when BB contains a branch that compares
1200 an induction variable in LOOP with LOOP_IV_BASE_VAR to LOOP_BOUND_VAR. The
1201 loop exit is compared using LOOP_BOUND_CODE, with step of LOOP_BOUND_STEP.
1203 E.g.
1204 for (int i = 0; i < bound; i++) {
1205 if (i < bound - 2)
1206 computation_1();
1207 else
1208 computation_2();
1211 In this loop, we will predict the branch inside the loop to be taken. */
1213 static void
1214 predict_iv_comparison (struct loop *loop, basic_block bb,
1215 tree loop_bound_var,
1216 tree loop_iv_base_var,
1217 enum tree_code loop_bound_code,
1218 int loop_bound_step)
1220 gimple stmt;
1221 tree compare_var, compare_base;
1222 enum tree_code compare_code;
1223 tree compare_step_var;
1224 edge then_edge;
1225 edge_iterator ei;
1227 if (predicted_by_p (bb, PRED_LOOP_ITERATIONS_GUESSED)
1228 || predicted_by_p (bb, PRED_LOOP_ITERATIONS)
1229 || predicted_by_p (bb, PRED_LOOP_EXIT))
1230 return;
1232 stmt = last_stmt (bb);
1233 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
1234 return;
1235 if (!is_comparison_with_loop_invariant_p (stmt, loop, &compare_var,
1236 &compare_code,
1237 &compare_step_var,
1238 &compare_base))
1239 return;
1241 /* Find the taken edge. */
1242 FOR_EACH_EDGE (then_edge, ei, bb->succs)
1243 if (then_edge->flags & EDGE_TRUE_VALUE)
1244 break;
1246 /* When comparing an IV to a loop invariant, NE is more likely to be
1247 taken while EQ is more likely to be not-taken. */
1248 if (compare_code == NE_EXPR)
1250 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1251 return;
1253 else if (compare_code == EQ_EXPR)
1255 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1256 return;
1259 if (!expr_coherent_p (loop_iv_base_var, compare_base))
1260 return;
1262 /* If loop bound, base and compare bound are all constants, we can
1263 calculate the probability directly. */
1264 if (tree_fits_shwi_p (loop_bound_var)
1265 && tree_fits_shwi_p (compare_var)
1266 && tree_fits_shwi_p (compare_base))
1268 int probability;
1269 bool overflow, overall_overflow = false;
1270 widest_int compare_count, tem;
1272 /* (loop_bound - base) / compare_step */
1273 tem = wi::sub (wi::to_widest (loop_bound_var),
1274 wi::to_widest (compare_base), SIGNED, &overflow);
1275 overall_overflow |= overflow;
1276 widest_int loop_count = wi::div_trunc (tem,
1277 wi::to_widest (compare_step_var),
1278 SIGNED, &overflow);
1279 overall_overflow |= overflow;
1281 if (!wi::neg_p (wi::to_widest (compare_step_var))
1282 ^ (compare_code == LT_EXPR || compare_code == LE_EXPR))
1284 /* (loop_bound - compare_bound) / compare_step */
1285 tem = wi::sub (wi::to_widest (loop_bound_var),
1286 wi::to_widest (compare_var), SIGNED, &overflow);
1287 overall_overflow |= overflow;
1288 compare_count = wi::div_trunc (tem, wi::to_widest (compare_step_var),
1289 SIGNED, &overflow);
1290 overall_overflow |= overflow;
1292 else
1294 /* (compare_bound - base) / compare_step */
1295 tem = wi::sub (wi::to_widest (compare_var),
1296 wi::to_widest (compare_base), SIGNED, &overflow);
1297 overall_overflow |= overflow;
1298 compare_count = wi::div_trunc (tem, wi::to_widest (compare_step_var),
1299 SIGNED, &overflow);
1300 overall_overflow |= overflow;
1302 if (compare_code == LE_EXPR || compare_code == GE_EXPR)
1303 ++compare_count;
1304 if (loop_bound_code == LE_EXPR || loop_bound_code == GE_EXPR)
1305 ++loop_count;
1306 if (wi::neg_p (compare_count))
1307 compare_count = 0;
1308 if (wi::neg_p (loop_count))
1309 loop_count = 0;
1310 if (loop_count == 0)
1311 probability = 0;
1312 else if (wi::cmps (compare_count, loop_count) == 1)
1313 probability = REG_BR_PROB_BASE;
1314 else
1316 tem = compare_count * REG_BR_PROB_BASE;
1317 tem = wi::udiv_trunc (tem, loop_count);
1318 probability = tem.to_uhwi ();
1321 if (!overall_overflow)
1322 predict_edge (then_edge, PRED_LOOP_IV_COMPARE, probability);
1324 return;
1327 if (expr_coherent_p (loop_bound_var, compare_var))
1329 if ((loop_bound_code == LT_EXPR || loop_bound_code == LE_EXPR)
1330 && (compare_code == LT_EXPR || compare_code == LE_EXPR))
1331 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1332 else if ((loop_bound_code == GT_EXPR || loop_bound_code == GE_EXPR)
1333 && (compare_code == GT_EXPR || compare_code == GE_EXPR))
1334 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1335 else if (loop_bound_code == NE_EXPR)
1337 /* If the loop backedge condition is "(i != bound)", we do
1338 the comparison based on the step of IV:
1339 * step < 0 : backedge condition is like (i > bound)
1340 * step > 0 : backedge condition is like (i < bound) */
1341 gcc_assert (loop_bound_step != 0);
1342 if (loop_bound_step > 0
1343 && (compare_code == LT_EXPR
1344 || compare_code == LE_EXPR))
1345 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1346 else if (loop_bound_step < 0
1347 && (compare_code == GT_EXPR
1348 || compare_code == GE_EXPR))
1349 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1350 else
1351 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1353 else
1354 /* The branch is predicted not-taken if loop_bound_code is
1355 opposite with compare_code. */
1356 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1358 else if (expr_coherent_p (loop_iv_base_var, compare_var))
1360 /* For cases like:
1361 for (i = s; i < h; i++)
1362 if (i > s + 2) ....
1363 The branch should be predicted taken. */
1364 if (loop_bound_step > 0
1365 && (compare_code == GT_EXPR || compare_code == GE_EXPR))
1366 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1367 else if (loop_bound_step < 0
1368 && (compare_code == LT_EXPR || compare_code == LE_EXPR))
1369 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, TAKEN);
1370 else
1371 predict_edge_def (then_edge, PRED_LOOP_IV_COMPARE_GUESS, NOT_TAKEN);
1375 /* Predict for extra loop exits that will lead to EXIT_EDGE. The extra loop
1376 exits are resulted from short-circuit conditions that will generate an
1377 if_tmp. E.g.:
1379 if (foo() || global > 10)
1380 break;
1382 This will be translated into:
1384 BB3:
1385 loop header...
1386 BB4:
1387 if foo() goto BB6 else goto BB5
1388 BB5:
1389 if global > 10 goto BB6 else goto BB7
1390 BB6:
1391 goto BB7
1392 BB7:
1393 iftmp = (PHI 0(BB5), 1(BB6))
1394 if iftmp == 1 goto BB8 else goto BB3
1395 BB8:
1396 outside of the loop...
1398 The edge BB7->BB8 is loop exit because BB8 is outside of the loop.
1399 From the dataflow, we can infer that BB4->BB6 and BB5->BB6 are also loop
1400 exits. This function takes BB7->BB8 as input, and finds out the extra loop
1401 exits to predict them using PRED_LOOP_EXIT. */
1403 static void
1404 predict_extra_loop_exits (edge exit_edge)
1406 unsigned i;
1407 bool check_value_one;
1408 gimple phi_stmt;
1409 tree cmp_rhs, cmp_lhs;
1410 gimple cmp_stmt = last_stmt (exit_edge->src);
1412 if (!cmp_stmt || gimple_code (cmp_stmt) != GIMPLE_COND)
1413 return;
1414 cmp_rhs = gimple_cond_rhs (cmp_stmt);
1415 cmp_lhs = gimple_cond_lhs (cmp_stmt);
1416 if (!TREE_CONSTANT (cmp_rhs)
1417 || !(integer_zerop (cmp_rhs) || integer_onep (cmp_rhs)))
1418 return;
1419 if (TREE_CODE (cmp_lhs) != SSA_NAME)
1420 return;
1422 /* If check_value_one is true, only the phi_args with value '1' will lead
1423 to loop exit. Otherwise, only the phi_args with value '0' will lead to
1424 loop exit. */
1425 check_value_one = (((integer_onep (cmp_rhs))
1426 ^ (gimple_cond_code (cmp_stmt) == EQ_EXPR))
1427 ^ ((exit_edge->flags & EDGE_TRUE_VALUE) != 0));
1429 phi_stmt = SSA_NAME_DEF_STMT (cmp_lhs);
1430 if (!phi_stmt || gimple_code (phi_stmt) != GIMPLE_PHI)
1431 return;
1433 for (i = 0; i < gimple_phi_num_args (phi_stmt); i++)
1435 edge e1;
1436 edge_iterator ei;
1437 tree val = gimple_phi_arg_def (phi_stmt, i);
1438 edge e = gimple_phi_arg_edge (phi_stmt, i);
1440 if (!TREE_CONSTANT (val) || !(integer_zerop (val) || integer_onep (val)))
1441 continue;
1442 if ((check_value_one ^ integer_onep (val)) == 1)
1443 continue;
1444 if (EDGE_COUNT (e->src->succs) != 1)
1446 predict_paths_leading_to_edge (e, PRED_LOOP_EXIT, NOT_TAKEN);
1447 continue;
1450 FOR_EACH_EDGE (e1, ei, e->src->preds)
1451 predict_paths_leading_to_edge (e1, PRED_LOOP_EXIT, NOT_TAKEN);
1455 /* Predict edge probabilities by exploiting loop structure. */
1457 static void
1458 predict_loops (void)
1460 struct loop *loop;
1462 /* Try to predict out blocks in a loop that are not part of a
1463 natural loop. */
1464 FOR_EACH_LOOP (loop, 0)
1466 basic_block bb, *bbs;
1467 unsigned j, n_exits;
1468 vec<edge> exits;
1469 struct tree_niter_desc niter_desc;
1470 edge ex;
1471 struct nb_iter_bound *nb_iter;
1472 enum tree_code loop_bound_code = ERROR_MARK;
1473 tree loop_bound_step = NULL;
1474 tree loop_bound_var = NULL;
1475 tree loop_iv_base = NULL;
1476 gimple stmt = NULL;
1478 exits = get_loop_exit_edges (loop);
1479 n_exits = exits.length ();
1480 if (!n_exits)
1482 exits.release ();
1483 continue;
1486 FOR_EACH_VEC_ELT (exits, j, ex)
1488 tree niter = NULL;
1489 HOST_WIDE_INT nitercst;
1490 int max = PARAM_VALUE (PARAM_MAX_PREDICTED_ITERATIONS);
1491 int probability;
1492 enum br_predictor predictor;
1494 predict_extra_loop_exits (ex);
1496 if (number_of_iterations_exit (loop, ex, &niter_desc, false, false))
1497 niter = niter_desc.niter;
1498 if (!niter || TREE_CODE (niter_desc.niter) != INTEGER_CST)
1499 niter = loop_niter_by_eval (loop, ex);
1501 if (TREE_CODE (niter) == INTEGER_CST)
1503 if (tree_fits_uhwi_p (niter)
1504 && max
1505 && compare_tree_int (niter, max - 1) == -1)
1506 nitercst = tree_to_uhwi (niter) + 1;
1507 else
1508 nitercst = max;
1509 predictor = PRED_LOOP_ITERATIONS;
1511 /* If we have just one exit and we can derive some information about
1512 the number of iterations of the loop from the statements inside
1513 the loop, use it to predict this exit. */
1514 else if (n_exits == 1)
1516 nitercst = estimated_stmt_executions_int (loop);
1517 if (nitercst < 0)
1518 continue;
1519 if (nitercst > max)
1520 nitercst = max;
1522 predictor = PRED_LOOP_ITERATIONS_GUESSED;
1524 else
1525 continue;
1527 /* If the prediction for number of iterations is zero, do not
1528 predict the exit edges. */
1529 if (nitercst == 0)
1530 continue;
1532 probability = ((REG_BR_PROB_BASE + nitercst / 2) / nitercst);
1533 predict_edge (ex, predictor, probability);
1535 exits.release ();
1537 /* Find information about loop bound variables. */
1538 for (nb_iter = loop->bounds; nb_iter;
1539 nb_iter = nb_iter->next)
1540 if (nb_iter->stmt
1541 && gimple_code (nb_iter->stmt) == GIMPLE_COND)
1543 stmt = nb_iter->stmt;
1544 break;
1546 if (!stmt && last_stmt (loop->header)
1547 && gimple_code (last_stmt (loop->header)) == GIMPLE_COND)
1548 stmt = last_stmt (loop->header);
1549 if (stmt)
1550 is_comparison_with_loop_invariant_p (stmt, loop,
1551 &loop_bound_var,
1552 &loop_bound_code,
1553 &loop_bound_step,
1554 &loop_iv_base);
1556 bbs = get_loop_body (loop);
1558 for (j = 0; j < loop->num_nodes; j++)
1560 int header_found = 0;
1561 edge e;
1562 edge_iterator ei;
1564 bb = bbs[j];
1566 /* Bypass loop heuristics on continue statement. These
1567 statements construct loops via "non-loop" constructs
1568 in the source language and are better to be handled
1569 separately. */
1570 if (predicted_by_p (bb, PRED_CONTINUE))
1571 continue;
1573 /* Loop branch heuristics - predict an edge back to a
1574 loop's head as taken. */
1575 if (bb == loop->latch)
1577 e = find_edge (loop->latch, loop->header);
1578 if (e)
1580 header_found = 1;
1581 predict_edge_def (e, PRED_LOOP_BRANCH, TAKEN);
1585 /* Loop exit heuristics - predict an edge exiting the loop if the
1586 conditional has no loop header successors as not taken. */
1587 if (!header_found
1588 /* If we already used more reliable loop exit predictors, do not
1589 bother with PRED_LOOP_EXIT. */
1590 && !predicted_by_p (bb, PRED_LOOP_ITERATIONS_GUESSED)
1591 && !predicted_by_p (bb, PRED_LOOP_ITERATIONS))
1593 /* For loop with many exits we don't want to predict all exits
1594 with the pretty large probability, because if all exits are
1595 considered in row, the loop would be predicted to iterate
1596 almost never. The code to divide probability by number of
1597 exits is very rough. It should compute the number of exits
1598 taken in each patch through function (not the overall number
1599 of exits that might be a lot higher for loops with wide switch
1600 statements in them) and compute n-th square root.
1602 We limit the minimal probability by 2% to avoid
1603 EDGE_PROBABILITY_RELIABLE from trusting the branch prediction
1604 as this was causing regression in perl benchmark containing such
1605 a wide loop. */
1607 int probability = ((REG_BR_PROB_BASE
1608 - predictor_info [(int) PRED_LOOP_EXIT].hitrate)
1609 / n_exits);
1610 if (probability < HITRATE (2))
1611 probability = HITRATE (2);
1612 FOR_EACH_EDGE (e, ei, bb->succs)
1613 if (e->dest->index < NUM_FIXED_BLOCKS
1614 || !flow_bb_inside_loop_p (loop, e->dest))
1615 predict_edge (e, PRED_LOOP_EXIT, probability);
1617 if (loop_bound_var)
1618 predict_iv_comparison (loop, bb, loop_bound_var, loop_iv_base,
1619 loop_bound_code,
1620 tree_to_shwi (loop_bound_step));
1623 /* Free basic blocks from get_loop_body. */
1624 free (bbs);
1628 /* Attempt to predict probabilities of BB outgoing edges using local
1629 properties. */
1630 static void
1631 bb_estimate_probability_locally (basic_block bb)
1633 rtx_insn *last_insn = BB_END (bb);
1634 rtx cond;
1636 if (! can_predict_insn_p (last_insn))
1637 return;
1638 cond = get_condition (last_insn, NULL, false, false);
1639 if (! cond)
1640 return;
1642 /* Try "pointer heuristic."
1643 A comparison ptr == 0 is predicted as false.
1644 Similarly, a comparison ptr1 == ptr2 is predicted as false. */
1645 if (COMPARISON_P (cond)
1646 && ((REG_P (XEXP (cond, 0)) && REG_POINTER (XEXP (cond, 0)))
1647 || (REG_P (XEXP (cond, 1)) && REG_POINTER (XEXP (cond, 1)))))
1649 if (GET_CODE (cond) == EQ)
1650 predict_insn_def (last_insn, PRED_POINTER, NOT_TAKEN);
1651 else if (GET_CODE (cond) == NE)
1652 predict_insn_def (last_insn, PRED_POINTER, TAKEN);
1654 else
1656 /* Try "opcode heuristic."
1657 EQ tests are usually false and NE tests are usually true. Also,
1658 most quantities are positive, so we can make the appropriate guesses
1659 about signed comparisons against zero. */
1660 switch (GET_CODE (cond))
1662 case CONST_INT:
1663 /* Unconditional branch. */
1664 predict_insn_def (last_insn, PRED_UNCONDITIONAL,
1665 cond == const0_rtx ? NOT_TAKEN : TAKEN);
1666 break;
1668 case EQ:
1669 case UNEQ:
1670 /* Floating point comparisons appears to behave in a very
1671 unpredictable way because of special role of = tests in
1672 FP code. */
1673 if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
1675 /* Comparisons with 0 are often used for booleans and there is
1676 nothing useful to predict about them. */
1677 else if (XEXP (cond, 1) == const0_rtx
1678 || XEXP (cond, 0) == const0_rtx)
1680 else
1681 predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, NOT_TAKEN);
1682 break;
1684 case NE:
1685 case LTGT:
1686 /* Floating point comparisons appears to behave in a very
1687 unpredictable way because of special role of = tests in
1688 FP code. */
1689 if (FLOAT_MODE_P (GET_MODE (XEXP (cond, 0))))
1691 /* Comparisons with 0 are often used for booleans and there is
1692 nothing useful to predict about them. */
1693 else if (XEXP (cond, 1) == const0_rtx
1694 || XEXP (cond, 0) == const0_rtx)
1696 else
1697 predict_insn_def (last_insn, PRED_OPCODE_NONEQUAL, TAKEN);
1698 break;
1700 case ORDERED:
1701 predict_insn_def (last_insn, PRED_FPOPCODE, TAKEN);
1702 break;
1704 case UNORDERED:
1705 predict_insn_def (last_insn, PRED_FPOPCODE, NOT_TAKEN);
1706 break;
1708 case LE:
1709 case LT:
1710 if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
1711 || XEXP (cond, 1) == constm1_rtx)
1712 predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, NOT_TAKEN);
1713 break;
1715 case GE:
1716 case GT:
1717 if (XEXP (cond, 1) == const0_rtx || XEXP (cond, 1) == const1_rtx
1718 || XEXP (cond, 1) == constm1_rtx)
1719 predict_insn_def (last_insn, PRED_OPCODE_POSITIVE, TAKEN);
1720 break;
1722 default:
1723 break;
1727 /* Set edge->probability for each successor edge of BB. */
1728 void
1729 guess_outgoing_edge_probabilities (basic_block bb)
1731 bb_estimate_probability_locally (bb);
1732 combine_predictions_for_insn (BB_END (bb), bb);
1735 static tree expr_expected_value (tree, bitmap, enum br_predictor *predictor);
1737 /* Helper function for expr_expected_value. */
1739 static tree
1740 expr_expected_value_1 (tree type, tree op0, enum tree_code code,
1741 tree op1, bitmap visited, enum br_predictor *predictor)
1743 gimple def;
1745 if (predictor)
1746 *predictor = PRED_UNCONDITIONAL;
1748 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
1750 if (TREE_CONSTANT (op0))
1751 return op0;
1753 if (code != SSA_NAME)
1754 return NULL_TREE;
1756 def = SSA_NAME_DEF_STMT (op0);
1758 /* If we were already here, break the infinite cycle. */
1759 if (!bitmap_set_bit (visited, SSA_NAME_VERSION (op0)))
1760 return NULL;
1762 if (gimple_code (def) == GIMPLE_PHI)
1764 /* All the arguments of the PHI node must have the same constant
1765 length. */
1766 int i, n = gimple_phi_num_args (def);
1767 tree val = NULL, new_val;
1769 for (i = 0; i < n; i++)
1771 tree arg = PHI_ARG_DEF (def, i);
1772 enum br_predictor predictor2;
1774 /* If this PHI has itself as an argument, we cannot
1775 determine the string length of this argument. However,
1776 if we can find an expected constant value for the other
1777 PHI args then we can still be sure that this is
1778 likely a constant. So be optimistic and just
1779 continue with the next argument. */
1780 if (arg == PHI_RESULT (def))
1781 continue;
1783 new_val = expr_expected_value (arg, visited, &predictor2);
1785 /* It is difficult to combine value predictors. Simply assume
1786 that later predictor is weaker and take its prediction. */
1787 if (predictor && *predictor < predictor2)
1788 *predictor = predictor2;
1789 if (!new_val)
1790 return NULL;
1791 if (!val)
1792 val = new_val;
1793 else if (!operand_equal_p (val, new_val, false))
1794 return NULL;
1796 return val;
1798 if (is_gimple_assign (def))
1800 if (gimple_assign_lhs (def) != op0)
1801 return NULL;
1803 return expr_expected_value_1 (TREE_TYPE (gimple_assign_lhs (def)),
1804 gimple_assign_rhs1 (def),
1805 gimple_assign_rhs_code (def),
1806 gimple_assign_rhs2 (def),
1807 visited, predictor);
1810 if (is_gimple_call (def))
1812 tree decl = gimple_call_fndecl (def);
1813 if (!decl)
1815 if (gimple_call_internal_p (def)
1816 && gimple_call_internal_fn (def) == IFN_BUILTIN_EXPECT)
1818 gcc_assert (gimple_call_num_args (def) == 3);
1819 tree val = gimple_call_arg (def, 0);
1820 if (TREE_CONSTANT (val))
1821 return val;
1822 if (predictor)
1824 tree val2 = gimple_call_arg (def, 2);
1825 gcc_assert (TREE_CODE (val2) == INTEGER_CST
1826 && tree_fits_uhwi_p (val2)
1827 && tree_to_uhwi (val2) < END_PREDICTORS);
1828 *predictor = (enum br_predictor) tree_to_uhwi (val2);
1830 return gimple_call_arg (def, 1);
1832 return NULL;
1834 if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
1835 switch (DECL_FUNCTION_CODE (decl))
1837 case BUILT_IN_EXPECT:
1839 tree val;
1840 if (gimple_call_num_args (def) != 2)
1841 return NULL;
1842 val = gimple_call_arg (def, 0);
1843 if (TREE_CONSTANT (val))
1844 return val;
1845 if (predictor)
1846 *predictor = PRED_BUILTIN_EXPECT;
1847 return gimple_call_arg (def, 1);
1850 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_N:
1851 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1:
1852 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2:
1853 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4:
1854 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_8:
1855 case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_16:
1856 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE:
1857 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N:
1858 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1:
1859 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2:
1860 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4:
1861 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8:
1862 case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16:
1863 /* Assume that any given atomic operation has low contention,
1864 and thus the compare-and-swap operation succeeds. */
1865 if (predictor)
1866 *predictor = PRED_COMPARE_AND_SWAP;
1867 return boolean_true_node;
1868 default:
1869 break;
1873 return NULL;
1876 if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
1878 tree res;
1879 enum br_predictor predictor2;
1880 op0 = expr_expected_value (op0, visited, predictor);
1881 if (!op0)
1882 return NULL;
1883 op1 = expr_expected_value (op1, visited, &predictor2);
1884 if (predictor && *predictor < predictor2)
1885 *predictor = predictor2;
1886 if (!op1)
1887 return NULL;
1888 res = fold_build2 (code, type, op0, op1);
1889 if (TREE_CONSTANT (res))
1890 return res;
1891 return NULL;
1893 if (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS)
1895 tree res;
1896 op0 = expr_expected_value (op0, visited, predictor);
1897 if (!op0)
1898 return NULL;
1899 res = fold_build1 (code, type, op0);
1900 if (TREE_CONSTANT (res))
1901 return res;
1902 return NULL;
1904 return NULL;
1907 /* Return constant EXPR will likely have at execution time, NULL if unknown.
1908 The function is used by builtin_expect branch predictor so the evidence
1909 must come from this construct and additional possible constant folding.
1911 We may want to implement more involved value guess (such as value range
1912 propagation based prediction), but such tricks shall go to new
1913 implementation. */
1915 static tree
1916 expr_expected_value (tree expr, bitmap visited,
1917 enum br_predictor *predictor)
1919 enum tree_code code;
1920 tree op0, op1;
1922 if (TREE_CONSTANT (expr))
1924 if (predictor)
1925 *predictor = PRED_UNCONDITIONAL;
1926 return expr;
1929 extract_ops_from_tree (expr, &code, &op0, &op1);
1930 return expr_expected_value_1 (TREE_TYPE (expr),
1931 op0, code, op1, visited, predictor);
1934 /* Predict using opcode of the last statement in basic block. */
1935 static void
1936 tree_predict_by_opcode (basic_block bb)
1938 gimple stmt = last_stmt (bb);
1939 edge then_edge;
1940 tree op0, op1;
1941 tree type;
1942 tree val;
1943 enum tree_code cmp;
1944 bitmap visited;
1945 edge_iterator ei;
1946 enum br_predictor predictor;
1948 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
1949 return;
1950 FOR_EACH_EDGE (then_edge, ei, bb->succs)
1951 if (then_edge->flags & EDGE_TRUE_VALUE)
1952 break;
1953 op0 = gimple_cond_lhs (stmt);
1954 op1 = gimple_cond_rhs (stmt);
1955 cmp = gimple_cond_code (stmt);
1956 type = TREE_TYPE (op0);
1957 visited = BITMAP_ALLOC (NULL);
1958 val = expr_expected_value_1 (boolean_type_node, op0, cmp, op1, visited,
1959 &predictor);
1960 BITMAP_FREE (visited);
1961 if (val && TREE_CODE (val) == INTEGER_CST)
1963 if (predictor == PRED_BUILTIN_EXPECT)
1965 int percent = PARAM_VALUE (BUILTIN_EXPECT_PROBABILITY);
1967 gcc_assert (percent >= 0 && percent <= 100);
1968 if (integer_zerop (val))
1969 percent = 100 - percent;
1970 predict_edge (then_edge, PRED_BUILTIN_EXPECT, HITRATE (percent));
1972 else
1973 predict_edge (then_edge, predictor,
1974 integer_zerop (val) ? NOT_TAKEN : TAKEN);
1976 /* Try "pointer heuristic."
1977 A comparison ptr == 0 is predicted as false.
1978 Similarly, a comparison ptr1 == ptr2 is predicted as false. */
1979 if (POINTER_TYPE_P (type))
1981 if (cmp == EQ_EXPR)
1982 predict_edge_def (then_edge, PRED_TREE_POINTER, NOT_TAKEN);
1983 else if (cmp == NE_EXPR)
1984 predict_edge_def (then_edge, PRED_TREE_POINTER, TAKEN);
1986 else
1988 /* Try "opcode heuristic."
1989 EQ tests are usually false and NE tests are usually true. Also,
1990 most quantities are positive, so we can make the appropriate guesses
1991 about signed comparisons against zero. */
1992 switch (cmp)
1994 case EQ_EXPR:
1995 case UNEQ_EXPR:
1996 /* Floating point comparisons appears to behave in a very
1997 unpredictable way because of special role of = tests in
1998 FP code. */
1999 if (FLOAT_TYPE_P (type))
2001 /* Comparisons with 0 are often used for booleans and there is
2002 nothing useful to predict about them. */
2003 else if (integer_zerop (op0) || integer_zerop (op1))
2005 else
2006 predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, NOT_TAKEN);
2007 break;
2009 case NE_EXPR:
2010 case LTGT_EXPR:
2011 /* Floating point comparisons appears to behave in a very
2012 unpredictable way because of special role of = tests in
2013 FP code. */
2014 if (FLOAT_TYPE_P (type))
2016 /* Comparisons with 0 are often used for booleans and there is
2017 nothing useful to predict about them. */
2018 else if (integer_zerop (op0)
2019 || integer_zerop (op1))
2021 else
2022 predict_edge_def (then_edge, PRED_TREE_OPCODE_NONEQUAL, TAKEN);
2023 break;
2025 case ORDERED_EXPR:
2026 predict_edge_def (then_edge, PRED_TREE_FPOPCODE, TAKEN);
2027 break;
2029 case UNORDERED_EXPR:
2030 predict_edge_def (then_edge, PRED_TREE_FPOPCODE, NOT_TAKEN);
2031 break;
2033 case LE_EXPR:
2034 case LT_EXPR:
2035 if (integer_zerop (op1)
2036 || integer_onep (op1)
2037 || integer_all_onesp (op1)
2038 || real_zerop (op1)
2039 || real_onep (op1)
2040 || real_minus_onep (op1))
2041 predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, NOT_TAKEN);
2042 break;
2044 case GE_EXPR:
2045 case GT_EXPR:
2046 if (integer_zerop (op1)
2047 || integer_onep (op1)
2048 || integer_all_onesp (op1)
2049 || real_zerop (op1)
2050 || real_onep (op1)
2051 || real_minus_onep (op1))
2052 predict_edge_def (then_edge, PRED_TREE_OPCODE_POSITIVE, TAKEN);
2053 break;
2055 default:
2056 break;
2060 /* Try to guess whether the value of return means error code. */
2062 static enum br_predictor
2063 return_prediction (tree val, enum prediction *prediction)
2065 /* VOID. */
2066 if (!val)
2067 return PRED_NO_PREDICTION;
2068 /* Different heuristics for pointers and scalars. */
2069 if (POINTER_TYPE_P (TREE_TYPE (val)))
2071 /* NULL is usually not returned. */
2072 if (integer_zerop (val))
2074 *prediction = NOT_TAKEN;
2075 return PRED_NULL_RETURN;
2078 else if (INTEGRAL_TYPE_P (TREE_TYPE (val)))
2080 /* Negative return values are often used to indicate
2081 errors. */
2082 if (TREE_CODE (val) == INTEGER_CST
2083 && tree_int_cst_sgn (val) < 0)
2085 *prediction = NOT_TAKEN;
2086 return PRED_NEGATIVE_RETURN;
2088 /* Constant return values seems to be commonly taken.
2089 Zero/one often represent booleans so exclude them from the
2090 heuristics. */
2091 if (TREE_CONSTANT (val)
2092 && (!integer_zerop (val) && !integer_onep (val)))
2094 *prediction = TAKEN;
2095 return PRED_CONST_RETURN;
2098 return PRED_NO_PREDICTION;
2101 /* Find the basic block with return expression and look up for possible
2102 return value trying to apply RETURN_PREDICTION heuristics. */
2103 static void
2104 apply_return_prediction (void)
2106 gimple return_stmt = NULL;
2107 tree return_val;
2108 edge e;
2109 gimple phi;
2110 int phi_num_args, i;
2111 enum br_predictor pred;
2112 enum prediction direction;
2113 edge_iterator ei;
2115 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
2117 return_stmt = last_stmt (e->src);
2118 if (return_stmt
2119 && gimple_code (return_stmt) == GIMPLE_RETURN)
2120 break;
2122 if (!e)
2123 return;
2124 return_val = gimple_return_retval (return_stmt);
2125 if (!return_val)
2126 return;
2127 if (TREE_CODE (return_val) != SSA_NAME
2128 || !SSA_NAME_DEF_STMT (return_val)
2129 || gimple_code (SSA_NAME_DEF_STMT (return_val)) != GIMPLE_PHI)
2130 return;
2131 phi = SSA_NAME_DEF_STMT (return_val);
2132 phi_num_args = gimple_phi_num_args (phi);
2133 pred = return_prediction (PHI_ARG_DEF (phi, 0), &direction);
2135 /* Avoid the degenerate case where all return values form the function
2136 belongs to same category (ie they are all positive constants)
2137 so we can hardly say something about them. */
2138 for (i = 1; i < phi_num_args; i++)
2139 if (pred != return_prediction (PHI_ARG_DEF (phi, i), &direction))
2140 break;
2141 if (i != phi_num_args)
2142 for (i = 0; i < phi_num_args; i++)
2144 pred = return_prediction (PHI_ARG_DEF (phi, i), &direction);
2145 if (pred != PRED_NO_PREDICTION)
2146 predict_paths_leading_to_edge (gimple_phi_arg_edge (phi, i), pred,
2147 direction);
2151 /* Look for basic block that contains unlikely to happen events
2152 (such as noreturn calls) and mark all paths leading to execution
2153 of this basic blocks as unlikely. */
2155 static void
2156 tree_bb_level_predictions (void)
2158 basic_block bb;
2159 bool has_return_edges = false;
2160 edge e;
2161 edge_iterator ei;
2163 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
2164 if (!(e->flags & (EDGE_ABNORMAL | EDGE_FAKE | EDGE_EH)))
2166 has_return_edges = true;
2167 break;
2170 apply_return_prediction ();
2172 FOR_EACH_BB_FN (bb, cfun)
2174 gimple_stmt_iterator gsi;
2176 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2178 gimple stmt = gsi_stmt (gsi);
2179 tree decl;
2181 if (is_gimple_call (stmt))
2183 if ((gimple_call_flags (stmt) & ECF_NORETURN)
2184 && has_return_edges)
2185 predict_paths_leading_to (bb, PRED_NORETURN,
2186 NOT_TAKEN);
2187 decl = gimple_call_fndecl (stmt);
2188 if (decl
2189 && lookup_attribute ("cold",
2190 DECL_ATTRIBUTES (decl)))
2191 predict_paths_leading_to (bb, PRED_COLD_FUNCTION,
2192 NOT_TAKEN);
2194 else if (gimple_code (stmt) == GIMPLE_PREDICT)
2196 predict_paths_leading_to (bb, gimple_predict_predictor (stmt),
2197 gimple_predict_outcome (stmt));
2198 /* Keep GIMPLE_PREDICT around so early inlining will propagate
2199 hints to callers. */
2205 #ifdef ENABLE_CHECKING
2207 /* Callback for hash_map::traverse, asserts that the pointer map is
2208 empty. */
2210 bool
2211 assert_is_empty (const_basic_block const &, edge_prediction *const &value,
2212 void *)
2214 gcc_assert (!value);
2215 return false;
2217 #endif
2219 /* Predict branch probabilities and estimate profile for basic block BB. */
2221 static void
2222 tree_estimate_probability_bb (basic_block bb)
2224 edge e;
2225 edge_iterator ei;
2226 gimple last;
2228 FOR_EACH_EDGE (e, ei, bb->succs)
2230 /* Predict edges to user labels with attributes. */
2231 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2233 gimple_stmt_iterator gi;
2234 for (gi = gsi_start_bb (e->dest); !gsi_end_p (gi); gsi_next (&gi))
2236 gimple stmt = gsi_stmt (gi);
2237 tree decl;
2239 if (gimple_code (stmt) != GIMPLE_LABEL)
2240 break;
2241 decl = gimple_label_label (stmt);
2242 if (DECL_ARTIFICIAL (decl))
2243 continue;
2245 /* Finally, we have a user-defined label. */
2246 if (lookup_attribute ("cold", DECL_ATTRIBUTES (decl)))
2247 predict_edge_def (e, PRED_COLD_LABEL, NOT_TAKEN);
2248 else if (lookup_attribute ("hot", DECL_ATTRIBUTES (decl)))
2249 predict_edge_def (e, PRED_HOT_LABEL, TAKEN);
2253 /* Predict early returns to be probable, as we've already taken
2254 care for error returns and other cases are often used for
2255 fast paths through function.
2257 Since we've already removed the return statements, we are
2258 looking for CFG like:
2260 if (conditional)
2263 goto return_block
2265 some other blocks
2266 return_block:
2267 return_stmt. */
2268 if (e->dest != bb->next_bb
2269 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
2270 && single_succ_p (e->dest)
2271 && single_succ_edge (e->dest)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2272 && (last = last_stmt (e->dest)) != NULL
2273 && gimple_code (last) == GIMPLE_RETURN)
2275 edge e1;
2276 edge_iterator ei1;
2278 if (single_succ_p (bb))
2280 FOR_EACH_EDGE (e1, ei1, bb->preds)
2281 if (!predicted_by_p (e1->src, PRED_NULL_RETURN)
2282 && !predicted_by_p (e1->src, PRED_CONST_RETURN)
2283 && !predicted_by_p (e1->src, PRED_NEGATIVE_RETURN))
2284 predict_edge_def (e1, PRED_TREE_EARLY_RETURN, NOT_TAKEN);
2286 else
2287 if (!predicted_by_p (e->src, PRED_NULL_RETURN)
2288 && !predicted_by_p (e->src, PRED_CONST_RETURN)
2289 && !predicted_by_p (e->src, PRED_NEGATIVE_RETURN))
2290 predict_edge_def (e, PRED_TREE_EARLY_RETURN, NOT_TAKEN);
2293 /* Look for block we are guarding (ie we dominate it,
2294 but it doesn't postdominate us). */
2295 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun) && e->dest != bb
2296 && dominated_by_p (CDI_DOMINATORS, e->dest, e->src)
2297 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e->dest))
2299 gimple_stmt_iterator bi;
2301 /* The call heuristic claims that a guarded function call
2302 is improbable. This is because such calls are often used
2303 to signal exceptional situations such as printing error
2304 messages. */
2305 for (bi = gsi_start_bb (e->dest); !gsi_end_p (bi);
2306 gsi_next (&bi))
2308 gimple stmt = gsi_stmt (bi);
2309 if (is_gimple_call (stmt)
2310 /* Constant and pure calls are hardly used to signalize
2311 something exceptional. */
2312 && gimple_has_side_effects (stmt))
2314 predict_edge_def (e, PRED_CALL, NOT_TAKEN);
2315 break;
2320 tree_predict_by_opcode (bb);
2323 /* Predict branch probabilities and estimate profile of the tree CFG.
2324 This function can be called from the loop optimizers to recompute
2325 the profile information. */
2327 void
2328 tree_estimate_probability (void)
2330 basic_block bb;
2332 add_noreturn_fake_exit_edges ();
2333 connect_infinite_loops_to_exit ();
2334 /* We use loop_niter_by_eval, which requires that the loops have
2335 preheaders. */
2336 create_preheaders (CP_SIMPLE_PREHEADERS);
2337 calculate_dominance_info (CDI_POST_DOMINATORS);
2339 bb_predictions = new hash_map<const_basic_block, edge_prediction *>;
2340 tree_bb_level_predictions ();
2341 record_loop_exits ();
2343 if (number_of_loops (cfun) > 1)
2344 predict_loops ();
2346 FOR_EACH_BB_FN (bb, cfun)
2347 tree_estimate_probability_bb (bb);
2349 FOR_EACH_BB_FN (bb, cfun)
2350 combine_predictions_for_bb (bb);
2352 #ifdef ENABLE_CHECKING
2353 bb_predictions->traverse<void *, assert_is_empty> (NULL);
2354 #endif
2355 delete bb_predictions;
2356 bb_predictions = NULL;
2358 estimate_bb_frequencies (false);
2359 free_dominance_info (CDI_POST_DOMINATORS);
2360 remove_fake_exit_edges ();
2363 /* Predict edges to successors of CUR whose sources are not postdominated by
2364 BB by PRED and recurse to all postdominators. */
2366 static void
2367 predict_paths_for_bb (basic_block cur, basic_block bb,
2368 enum br_predictor pred,
2369 enum prediction taken,
2370 bitmap visited)
2372 edge e;
2373 edge_iterator ei;
2374 basic_block son;
2376 /* We are looking for all edges forming edge cut induced by
2377 set of all blocks postdominated by BB. */
2378 FOR_EACH_EDGE (e, ei, cur->preds)
2379 if (e->src->index >= NUM_FIXED_BLOCKS
2380 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, bb))
2382 edge e2;
2383 edge_iterator ei2;
2384 bool found = false;
2386 /* Ignore fake edges and eh, we predict them as not taken anyway. */
2387 if (e->flags & (EDGE_EH | EDGE_FAKE))
2388 continue;
2389 gcc_assert (bb == cur || dominated_by_p (CDI_POST_DOMINATORS, cur, bb));
2391 /* See if there is an edge from e->src that is not abnormal
2392 and does not lead to BB. */
2393 FOR_EACH_EDGE (e2, ei2, e->src->succs)
2394 if (e2 != e
2395 && !(e2->flags & (EDGE_EH | EDGE_FAKE))
2396 && !dominated_by_p (CDI_POST_DOMINATORS, e2->dest, bb))
2398 found = true;
2399 break;
2402 /* If there is non-abnormal path leaving e->src, predict edge
2403 using predictor. Otherwise we need to look for paths
2404 leading to e->src.
2406 The second may lead to infinite loop in the case we are predicitng
2407 regions that are only reachable by abnormal edges. We simply
2408 prevent visiting given BB twice. */
2409 if (found)
2410 predict_edge_def (e, pred, taken);
2411 else if (bitmap_set_bit (visited, e->src->index))
2412 predict_paths_for_bb (e->src, e->src, pred, taken, visited);
2414 for (son = first_dom_son (CDI_POST_DOMINATORS, cur);
2415 son;
2416 son = next_dom_son (CDI_POST_DOMINATORS, son))
2417 predict_paths_for_bb (son, bb, pred, taken, visited);
2420 /* Sets branch probabilities according to PREDiction and
2421 FLAGS. */
2423 static void
2424 predict_paths_leading_to (basic_block bb, enum br_predictor pred,
2425 enum prediction taken)
2427 bitmap visited = BITMAP_ALLOC (NULL);
2428 predict_paths_for_bb (bb, bb, pred, taken, visited);
2429 BITMAP_FREE (visited);
2432 /* Like predict_paths_leading_to but take edge instead of basic block. */
2434 static void
2435 predict_paths_leading_to_edge (edge e, enum br_predictor pred,
2436 enum prediction taken)
2438 bool has_nonloop_edge = false;
2439 edge_iterator ei;
2440 edge e2;
2442 basic_block bb = e->src;
2443 FOR_EACH_EDGE (e2, ei, bb->succs)
2444 if (e2->dest != e->src && e2->dest != e->dest
2445 && !(e->flags & (EDGE_EH | EDGE_FAKE))
2446 && !dominated_by_p (CDI_POST_DOMINATORS, e->src, e2->dest))
2448 has_nonloop_edge = true;
2449 break;
2451 if (!has_nonloop_edge)
2453 bitmap visited = BITMAP_ALLOC (NULL);
2454 predict_paths_for_bb (bb, bb, pred, taken, visited);
2455 BITMAP_FREE (visited);
2457 else
2458 predict_edge_def (e, pred, taken);
2461 /* This is used to carry information about basic blocks. It is
2462 attached to the AUX field of the standard CFG block. */
2464 struct block_info
2466 /* Estimated frequency of execution of basic_block. */
2467 sreal frequency;
2469 /* To keep queue of basic blocks to process. */
2470 basic_block next;
2472 /* Number of predecessors we need to visit first. */
2473 int npredecessors;
2476 /* Similar information for edges. */
2477 struct edge_prob_info
2479 /* In case edge is a loopback edge, the probability edge will be reached
2480 in case header is. Estimated number of iterations of the loop can be
2481 then computed as 1 / (1 - back_edge_prob). */
2482 sreal back_edge_prob;
2483 /* True if the edge is a loopback edge in the natural loop. */
2484 unsigned int back_edge:1;
2487 #define BLOCK_INFO(B) ((block_info *) (B)->aux)
2488 #undef EDGE_INFO
2489 #define EDGE_INFO(E) ((edge_prob_info *) (E)->aux)
2491 /* Helper function for estimate_bb_frequencies.
2492 Propagate the frequencies in blocks marked in
2493 TOVISIT, starting in HEAD. */
2495 static void
2496 propagate_freq (basic_block head, bitmap tovisit)
2498 basic_block bb;
2499 basic_block last;
2500 unsigned i;
2501 edge e;
2502 basic_block nextbb;
2503 bitmap_iterator bi;
2505 /* For each basic block we need to visit count number of his predecessors
2506 we need to visit first. */
2507 EXECUTE_IF_SET_IN_BITMAP (tovisit, 0, i, bi)
2509 edge_iterator ei;
2510 int count = 0;
2512 bb = BASIC_BLOCK_FOR_FN (cfun, i);
2514 FOR_EACH_EDGE (e, ei, bb->preds)
2516 bool visit = bitmap_bit_p (tovisit, e->src->index);
2518 if (visit && !(e->flags & EDGE_DFS_BACK))
2519 count++;
2520 else if (visit && dump_file && !EDGE_INFO (e)->back_edge)
2521 fprintf (dump_file,
2522 "Irreducible region hit, ignoring edge to %i->%i\n",
2523 e->src->index, bb->index);
2525 BLOCK_INFO (bb)->npredecessors = count;
2526 /* When function never returns, we will never process exit block. */
2527 if (!count && bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2528 bb->count = bb->frequency = 0;
2531 BLOCK_INFO (head)->frequency = real_one;
2532 last = head;
2533 for (bb = head; bb; bb = nextbb)
2535 edge_iterator ei;
2536 sreal cyclic_probability = real_zero;
2537 sreal frequency = real_zero;
2539 nextbb = BLOCK_INFO (bb)->next;
2540 BLOCK_INFO (bb)->next = NULL;
2542 /* Compute frequency of basic block. */
2543 if (bb != head)
2545 #ifdef ENABLE_CHECKING
2546 FOR_EACH_EDGE (e, ei, bb->preds)
2547 gcc_assert (!bitmap_bit_p (tovisit, e->src->index)
2548 || (e->flags & EDGE_DFS_BACK));
2549 #endif
2551 FOR_EACH_EDGE (e, ei, bb->preds)
2552 if (EDGE_INFO (e)->back_edge)
2554 cyclic_probability += EDGE_INFO (e)->back_edge_prob;
2556 else if (!(e->flags & EDGE_DFS_BACK))
2558 /* frequency += (e->probability
2559 * BLOCK_INFO (e->src)->frequency /
2560 REG_BR_PROB_BASE); */
2562 sreal tmp (e->probability, 0);
2563 tmp *= BLOCK_INFO (e->src)->frequency;
2564 tmp *= real_inv_br_prob_base;
2565 frequency += tmp;
2568 if (cyclic_probability == real_zero)
2570 BLOCK_INFO (bb)->frequency = frequency;
2572 else
2574 if (cyclic_probability > real_almost_one)
2575 cyclic_probability = real_almost_one;
2577 /* BLOCK_INFO (bb)->frequency = frequency
2578 / (1 - cyclic_probability) */
2580 cyclic_probability = real_one - cyclic_probability;
2581 BLOCK_INFO (bb)->frequency = frequency / cyclic_probability;
2585 bitmap_clear_bit (tovisit, bb->index);
2587 e = find_edge (bb, head);
2588 if (e)
2590 /* EDGE_INFO (e)->back_edge_prob
2591 = ((e->probability * BLOCK_INFO (bb)->frequency)
2592 / REG_BR_PROB_BASE); */
2594 sreal tmp (e->probability, 0);
2595 tmp *= BLOCK_INFO (bb)->frequency;
2596 EDGE_INFO (e)->back_edge_prob = tmp * real_inv_br_prob_base;
2599 /* Propagate to successor blocks. */
2600 FOR_EACH_EDGE (e, ei, bb->succs)
2601 if (!(e->flags & EDGE_DFS_BACK)
2602 && BLOCK_INFO (e->dest)->npredecessors)
2604 BLOCK_INFO (e->dest)->npredecessors--;
2605 if (!BLOCK_INFO (e->dest)->npredecessors)
2607 if (!nextbb)
2608 nextbb = e->dest;
2609 else
2610 BLOCK_INFO (last)->next = e->dest;
2612 last = e->dest;
2618 /* Estimate frequencies in loops at same nest level. */
2620 static void
2621 estimate_loops_at_level (struct loop *first_loop)
2623 struct loop *loop;
2625 for (loop = first_loop; loop; loop = loop->next)
2627 edge e;
2628 basic_block *bbs;
2629 unsigned i;
2630 bitmap tovisit = BITMAP_ALLOC (NULL);
2632 estimate_loops_at_level (loop->inner);
2634 /* Find current loop back edge and mark it. */
2635 e = loop_latch_edge (loop);
2636 EDGE_INFO (e)->back_edge = 1;
2638 bbs = get_loop_body (loop);
2639 for (i = 0; i < loop->num_nodes; i++)
2640 bitmap_set_bit (tovisit, bbs[i]->index);
2641 free (bbs);
2642 propagate_freq (loop->header, tovisit);
2643 BITMAP_FREE (tovisit);
2647 /* Propagates frequencies through structure of loops. */
2649 static void
2650 estimate_loops (void)
2652 bitmap tovisit = BITMAP_ALLOC (NULL);
2653 basic_block bb;
2655 /* Start by estimating the frequencies in the loops. */
2656 if (number_of_loops (cfun) > 1)
2657 estimate_loops_at_level (current_loops->tree_root->inner);
2659 /* Now propagate the frequencies through all the blocks. */
2660 FOR_ALL_BB_FN (bb, cfun)
2662 bitmap_set_bit (tovisit, bb->index);
2664 propagate_freq (ENTRY_BLOCK_PTR_FOR_FN (cfun), tovisit);
2665 BITMAP_FREE (tovisit);
2668 /* Drop the profile for NODE to guessed, and update its frequency based on
2669 whether it is expected to be hot given the CALL_COUNT. */
2671 static void
2672 drop_profile (struct cgraph_node *node, gcov_type call_count)
2674 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
2675 /* In the case where this was called by another function with a
2676 dropped profile, call_count will be 0. Since there are no
2677 non-zero call counts to this function, we don't know for sure
2678 whether it is hot, and therefore it will be marked normal below. */
2679 bool hot = maybe_hot_count_p (NULL, call_count);
2681 if (dump_file)
2682 fprintf (dump_file,
2683 "Dropping 0 profile for %s/%i. %s based on calls.\n",
2684 node->name (), node->order,
2685 hot ? "Function is hot" : "Function is normal");
2686 /* We only expect to miss profiles for functions that are reached
2687 via non-zero call edges in cases where the function may have
2688 been linked from another module or library (COMDATs and extern
2689 templates). See the comments below for handle_missing_profiles.
2690 Also, only warn in cases where the missing counts exceed the
2691 number of training runs. In certain cases with an execv followed
2692 by a no-return call the profile for the no-return call is not
2693 dumped and there can be a mismatch. */
2694 if (!DECL_COMDAT (node->decl) && !DECL_EXTERNAL (node->decl)
2695 && call_count > profile_info->runs)
2697 if (flag_profile_correction)
2699 if (dump_file)
2700 fprintf (dump_file,
2701 "Missing counts for called function %s/%i\n",
2702 node->name (), node->order);
2704 else
2705 warning (0, "Missing counts for called function %s/%i",
2706 node->name (), node->order);
2709 profile_status_for_fn (fn)
2710 = (flag_guess_branch_prob ? PROFILE_GUESSED : PROFILE_ABSENT);
2711 node->frequency
2712 = hot ? NODE_FREQUENCY_HOT : NODE_FREQUENCY_NORMAL;
2715 /* In the case of COMDAT routines, multiple object files will contain the same
2716 function and the linker will select one for the binary. In that case
2717 all the other copies from the profile instrument binary will be missing
2718 profile counts. Look for cases where this happened, due to non-zero
2719 call counts going to 0-count functions, and drop the profile to guessed
2720 so that we can use the estimated probabilities and avoid optimizing only
2721 for size.
2723 The other case where the profile may be missing is when the routine
2724 is not going to be emitted to the object file, e.g. for "extern template"
2725 class methods. Those will be marked DECL_EXTERNAL. Emit a warning in
2726 all other cases of non-zero calls to 0-count functions. */
2728 void
2729 handle_missing_profiles (void)
2731 struct cgraph_node *node;
2732 int unlikely_count_fraction = PARAM_VALUE (UNLIKELY_BB_COUNT_FRACTION);
2733 vec<struct cgraph_node *> worklist;
2734 worklist.create (64);
2736 /* See if 0 count function has non-0 count callers. In this case we
2737 lost some profile. Drop its function profile to PROFILE_GUESSED. */
2738 FOR_EACH_DEFINED_FUNCTION (node)
2740 struct cgraph_edge *e;
2741 gcov_type call_count = 0;
2742 gcov_type max_tp_first_run = 0;
2743 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
2745 if (node->count)
2746 continue;
2747 for (e = node->callers; e; e = e->next_caller)
2749 call_count += e->count;
2751 if (e->caller->tp_first_run > max_tp_first_run)
2752 max_tp_first_run = e->caller->tp_first_run;
2755 /* If time profile is missing, let assign the maximum that comes from
2756 caller functions. */
2757 if (!node->tp_first_run && max_tp_first_run)
2758 node->tp_first_run = max_tp_first_run + 1;
2760 if (call_count
2761 && fn && fn->cfg
2762 && (call_count * unlikely_count_fraction >= profile_info->runs))
2764 drop_profile (node, call_count);
2765 worklist.safe_push (node);
2769 /* Propagate the profile dropping to other 0-count COMDATs that are
2770 potentially called by COMDATs we already dropped the profile on. */
2771 while (worklist.length () > 0)
2773 struct cgraph_edge *e;
2775 node = worklist.pop ();
2776 for (e = node->callees; e; e = e->next_caller)
2778 struct cgraph_node *callee = e->callee;
2779 struct function *fn = DECL_STRUCT_FUNCTION (callee->decl);
2781 if (callee->count > 0)
2782 continue;
2783 if (DECL_COMDAT (callee->decl) && fn && fn->cfg
2784 && profile_status_for_fn (fn) == PROFILE_READ)
2786 drop_profile (node, 0);
2787 worklist.safe_push (callee);
2791 worklist.release ();
2794 /* Convert counts measured by profile driven feedback to frequencies.
2795 Return nonzero iff there was any nonzero execution count. */
2798 counts_to_freqs (void)
2800 gcov_type count_max, true_count_max = 0;
2801 basic_block bb;
2803 /* Don't overwrite the estimated frequencies when the profile for
2804 the function is missing. We may drop this function PROFILE_GUESSED
2805 later in drop_profile (). */
2806 if (!flag_auto_profile && !ENTRY_BLOCK_PTR_FOR_FN (cfun)->count)
2807 return 0;
2809 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
2810 true_count_max = MAX (bb->count, true_count_max);
2812 count_max = MAX (true_count_max, 1);
2813 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
2814 bb->frequency = (bb->count * BB_FREQ_MAX + count_max / 2) / count_max;
2816 return true_count_max;
2819 /* Return true if function is likely to be expensive, so there is no point to
2820 optimize performance of prologue, epilogue or do inlining at the expense
2821 of code size growth. THRESHOLD is the limit of number of instructions
2822 function can execute at average to be still considered not expensive. */
2824 bool
2825 expensive_function_p (int threshold)
2827 unsigned int sum = 0;
2828 basic_block bb;
2829 unsigned int limit;
2831 /* We can not compute accurately for large thresholds due to scaled
2832 frequencies. */
2833 gcc_assert (threshold <= BB_FREQ_MAX);
2835 /* Frequencies are out of range. This either means that function contains
2836 internal loop executing more than BB_FREQ_MAX times or profile feedback
2837 is available and function has not been executed at all. */
2838 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency == 0)
2839 return true;
2841 /* Maximally BB_FREQ_MAX^2 so overflow won't happen. */
2842 limit = ENTRY_BLOCK_PTR_FOR_FN (cfun)->frequency * threshold;
2843 FOR_EACH_BB_FN (bb, cfun)
2845 rtx_insn *insn;
2847 FOR_BB_INSNS (bb, insn)
2848 if (active_insn_p (insn))
2850 sum += bb->frequency;
2851 if (sum > limit)
2852 return true;
2856 return false;
2859 /* Estimate and propagate basic block frequencies using the given branch
2860 probabilities. If FORCE is true, the frequencies are used to estimate
2861 the counts even when there are already non-zero profile counts. */
2863 void
2864 estimate_bb_frequencies (bool force)
2866 basic_block bb;
2867 sreal freq_max;
2869 if (force || profile_status_for_fn (cfun) != PROFILE_READ || !counts_to_freqs ())
2871 static int real_values_initialized = 0;
2873 if (!real_values_initialized)
2875 real_values_initialized = 1;
2876 real_zero = sreal (0, 0);
2877 real_one = sreal (1, 0);
2878 real_br_prob_base = sreal (REG_BR_PROB_BASE, 0);
2879 real_bb_freq_max = sreal (BB_FREQ_MAX, 0);
2880 real_one_half = sreal (1, -1);
2881 real_inv_br_prob_base = real_one / real_br_prob_base;
2882 real_almost_one = real_one - real_inv_br_prob_base;
2885 mark_dfs_back_edges ();
2887 single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->probability =
2888 REG_BR_PROB_BASE;
2890 /* Set up block info for each basic block. */
2891 alloc_aux_for_blocks (sizeof (block_info));
2892 alloc_aux_for_edges (sizeof (edge_prob_info));
2893 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
2895 edge e;
2896 edge_iterator ei;
2898 FOR_EACH_EDGE (e, ei, bb->succs)
2900 EDGE_INFO (e)->back_edge_prob = sreal (e->probability, 0);
2901 EDGE_INFO (e)->back_edge_prob *= real_inv_br_prob_base;
2905 /* First compute frequencies locally for each loop from innermost
2906 to outermost to examine frequencies for back edges. */
2907 estimate_loops ();
2909 freq_max = real_zero;
2910 FOR_EACH_BB_FN (bb, cfun)
2911 if (freq_max < BLOCK_INFO (bb)->frequency)
2912 freq_max = BLOCK_INFO (bb)->frequency;
2914 freq_max = real_bb_freq_max / freq_max;
2915 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
2917 sreal tmp = BLOCK_INFO (bb)->frequency * freq_max + real_one_half;
2918 bb->frequency = tmp.to_int ();
2921 free_aux_for_blocks ();
2922 free_aux_for_edges ();
2924 compute_function_frequency ();
2927 /* Decide whether function is hot, cold or unlikely executed. */
2928 void
2929 compute_function_frequency (void)
2931 basic_block bb;
2932 struct cgraph_node *node = cgraph_node::get (current_function_decl);
2934 if (DECL_STATIC_CONSTRUCTOR (current_function_decl)
2935 || MAIN_NAME_P (DECL_NAME (current_function_decl)))
2936 node->only_called_at_startup = true;
2937 if (DECL_STATIC_DESTRUCTOR (current_function_decl))
2938 node->only_called_at_exit = true;
2940 if (profile_status_for_fn (cfun) != PROFILE_READ)
2942 int flags = flags_from_decl_or_type (current_function_decl);
2943 if (lookup_attribute ("cold", DECL_ATTRIBUTES (current_function_decl))
2944 != NULL)
2945 node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
2946 else if (lookup_attribute ("hot", DECL_ATTRIBUTES (current_function_decl))
2947 != NULL)
2948 node->frequency = NODE_FREQUENCY_HOT;
2949 else if (flags & ECF_NORETURN)
2950 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
2951 else if (MAIN_NAME_P (DECL_NAME (current_function_decl)))
2952 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
2953 else if (DECL_STATIC_CONSTRUCTOR (current_function_decl)
2954 || DECL_STATIC_DESTRUCTOR (current_function_decl))
2955 node->frequency = NODE_FREQUENCY_EXECUTED_ONCE;
2956 return;
2959 /* Only first time try to drop function into unlikely executed.
2960 After inlining the roundoff errors may confuse us.
2961 Ipa-profile pass will drop functions only called from unlikely
2962 functions to unlikely and that is most of what we care about. */
2963 if (!cfun->after_inlining)
2964 node->frequency = NODE_FREQUENCY_UNLIKELY_EXECUTED;
2965 FOR_EACH_BB_FN (bb, cfun)
2967 if (maybe_hot_bb_p (cfun, bb))
2969 node->frequency = NODE_FREQUENCY_HOT;
2970 return;
2972 if (!probably_never_executed_bb_p (cfun, bb))
2973 node->frequency = NODE_FREQUENCY_NORMAL;
2977 /* Build PREDICT_EXPR. */
2978 tree
2979 build_predict_expr (enum br_predictor predictor, enum prediction taken)
2981 tree t = build1 (PREDICT_EXPR, void_type_node,
2982 build_int_cst (integer_type_node, predictor));
2983 SET_PREDICT_EXPR_OUTCOME (t, taken);
2984 return t;
2987 const char *
2988 predictor_name (enum br_predictor predictor)
2990 return predictor_info[predictor].name;
2993 /* Predict branch probabilities and estimate profile of the tree CFG. */
2995 namespace {
2997 const pass_data pass_data_profile =
2999 GIMPLE_PASS, /* type */
3000 "profile_estimate", /* name */
3001 OPTGROUP_NONE, /* optinfo_flags */
3002 TV_BRANCH_PROB, /* tv_id */
3003 PROP_cfg, /* properties_required */
3004 0, /* properties_provided */
3005 0, /* properties_destroyed */
3006 0, /* todo_flags_start */
3007 0, /* todo_flags_finish */
3010 class pass_profile : public gimple_opt_pass
3012 public:
3013 pass_profile (gcc::context *ctxt)
3014 : gimple_opt_pass (pass_data_profile, ctxt)
3017 /* opt_pass methods: */
3018 virtual bool gate (function *) { return flag_guess_branch_prob; }
3019 virtual unsigned int execute (function *);
3021 }; // class pass_profile
3023 unsigned int
3024 pass_profile::execute (function *fun)
3026 unsigned nb_loops;
3028 loop_optimizer_init (LOOPS_NORMAL);
3029 if (dump_file && (dump_flags & TDF_DETAILS))
3030 flow_loops_dump (dump_file, NULL, 0);
3032 mark_irreducible_loops ();
3034 nb_loops = number_of_loops (fun);
3035 if (nb_loops > 1)
3036 scev_initialize ();
3038 tree_estimate_probability ();
3040 if (nb_loops > 1)
3041 scev_finalize ();
3043 loop_optimizer_finalize ();
3044 if (dump_file && (dump_flags & TDF_DETAILS))
3045 gimple_dump_cfg (dump_file, dump_flags);
3046 if (profile_status_for_fn (fun) == PROFILE_ABSENT)
3047 profile_status_for_fn (fun) = PROFILE_GUESSED;
3048 return 0;
3051 } // anon namespace
3053 gimple_opt_pass *
3054 make_pass_profile (gcc::context *ctxt)
3056 return new pass_profile (ctxt);
3059 namespace {
3061 const pass_data pass_data_strip_predict_hints =
3063 GIMPLE_PASS, /* type */
3064 "*strip_predict_hints", /* name */
3065 OPTGROUP_NONE, /* optinfo_flags */
3066 TV_BRANCH_PROB, /* tv_id */
3067 PROP_cfg, /* properties_required */
3068 0, /* properties_provided */
3069 0, /* properties_destroyed */
3070 0, /* todo_flags_start */
3071 0, /* todo_flags_finish */
3074 class pass_strip_predict_hints : public gimple_opt_pass
3076 public:
3077 pass_strip_predict_hints (gcc::context *ctxt)
3078 : gimple_opt_pass (pass_data_strip_predict_hints, ctxt)
3081 /* opt_pass methods: */
3082 opt_pass * clone () { return new pass_strip_predict_hints (m_ctxt); }
3083 virtual unsigned int execute (function *);
3085 }; // class pass_strip_predict_hints
3087 /* Get rid of all builtin_expect calls and GIMPLE_PREDICT statements
3088 we no longer need. */
3089 unsigned int
3090 pass_strip_predict_hints::execute (function *fun)
3092 basic_block bb;
3093 gimple ass_stmt;
3094 tree var;
3096 FOR_EACH_BB_FN (bb, fun)
3098 gimple_stmt_iterator bi;
3099 for (bi = gsi_start_bb (bb); !gsi_end_p (bi);)
3101 gimple stmt = gsi_stmt (bi);
3103 if (gimple_code (stmt) == GIMPLE_PREDICT)
3105 gsi_remove (&bi, true);
3106 continue;
3108 else if (is_gimple_call (stmt))
3110 tree fndecl = gimple_call_fndecl (stmt);
3112 if ((fndecl
3113 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
3114 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_EXPECT
3115 && gimple_call_num_args (stmt) == 2)
3116 || (gimple_call_internal_p (stmt)
3117 && gimple_call_internal_fn (stmt) == IFN_BUILTIN_EXPECT))
3119 var = gimple_call_lhs (stmt);
3120 if (var)
3122 ass_stmt
3123 = gimple_build_assign (var, gimple_call_arg (stmt, 0));
3124 gsi_replace (&bi, ass_stmt, true);
3126 else
3128 gsi_remove (&bi, true);
3129 continue;
3133 gsi_next (&bi);
3136 return 0;
3139 } // anon namespace
3141 gimple_opt_pass *
3142 make_pass_strip_predict_hints (gcc::context *ctxt)
3144 return new pass_strip_predict_hints (ctxt);
3147 /* Rebuild function frequencies. Passes are in general expected to
3148 maintain profile by hand, however in some cases this is not possible:
3149 for example when inlining several functions with loops freuqencies might run
3150 out of scale and thus needs to be recomputed. */
3152 void
3153 rebuild_frequencies (void)
3155 timevar_push (TV_REBUILD_FREQUENCIES);
3157 /* When the max bb count in the function is small, there is a higher
3158 chance that there were truncation errors in the integer scaling
3159 of counts by inlining and other optimizations. This could lead
3160 to incorrect classification of code as being cold when it isn't.
3161 In that case, force the estimation of bb counts/frequencies from the
3162 branch probabilities, rather than computing frequencies from counts,
3163 which may also lead to frequencies incorrectly reduced to 0. There
3164 is less precision in the probabilities, so we only do this for small
3165 max counts. */
3166 gcov_type count_max = 0;
3167 basic_block bb;
3168 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
3169 count_max = MAX (bb->count, count_max);
3171 if (profile_status_for_fn (cfun) == PROFILE_GUESSED
3172 || (!flag_auto_profile && profile_status_for_fn (cfun) == PROFILE_READ
3173 && count_max < REG_BR_PROB_BASE/10))
3175 loop_optimizer_init (0);
3176 add_noreturn_fake_exit_edges ();
3177 mark_irreducible_loops ();
3178 connect_infinite_loops_to_exit ();
3179 estimate_bb_frequencies (true);
3180 remove_fake_exit_edges ();
3181 loop_optimizer_finalize ();
3183 else if (profile_status_for_fn (cfun) == PROFILE_READ)
3184 counts_to_freqs ();
3185 else
3186 gcc_unreachable ();
3187 timevar_pop (TV_REBUILD_FREQUENCIES);