* lto-streamer-out.c (lto_output_location): Stream
[official-gcc.git] / gcc / bb-reorder.c
blob522d94489ef96e7d48271bb8ca835abbde5d343b
1 /* Basic block reordering routines for the GNU compiler.
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This (greedy) algorithm constructs traces in several rounds.
21 The construction starts from "seeds". The seed for the first round
22 is the entry point of the function. When there are more than one seed,
23 the one with the lowest key in the heap is selected first (see bb_to_key).
24 Then the algorithm repeatedly adds the most probable successor to the end
25 of a trace. Finally it connects the traces.
27 There are two parameters: Branch Threshold and Exec Threshold.
28 If the probability of an edge to a successor of the current basic block is
29 lower than Branch Threshold or its frequency is lower than Exec Threshold,
30 then the successor will be the seed in one of the next rounds.
31 Each round has these parameters lower than the previous one.
32 The last round has to have these parameters set to zero so that the
33 remaining blocks are picked up.
35 The algorithm selects the most probable successor from all unvisited
36 successors and successors that have been added to this trace.
37 The other successors (that has not been "sent" to the next round) will be
38 other seeds for this round and the secondary traces will start from them.
39 If the successor has not been visited in this trace, it is added to the
40 trace (however, there is some heuristic for simple branches).
41 If the successor has been visited in this trace, a loop has been found.
42 If the loop has many iterations, the loop is rotated so that the source
43 block of the most probable edge going out of the loop is the last block
44 of the trace.
45 If the loop has few iterations and there is no edge from the last block of
46 the loop going out of the loop, the loop header is duplicated.
48 When connecting traces, the algorithm first checks whether there is an edge
49 from the last block of a trace to the first block of another trace.
50 When there are still some unconnected traces it checks whether there exists
51 a basic block BB such that BB is a successor of the last block of a trace
52 and BB is a predecessor of the first block of another trace. In this case,
53 BB is duplicated, added at the end of the first trace and the traces are
54 connected through it.
55 The rest of traces are simply connected so there will be a jump to the
56 beginning of the rest of traces.
58 The above description is for the full algorithm, which is used when the
59 function is optimized for speed. When the function is optimized for size,
60 in order to reduce long jumps and connect more fallthru edges, the
61 algorithm is modified as follows:
62 (1) Break long traces to short ones. A trace is broken at a block that has
63 multiple predecessors/ successors during trace discovery. When connecting
64 traces, only connect Trace n with Trace n + 1. This change reduces most
65 long jumps compared with the above algorithm.
66 (2) Ignore the edge probability and frequency for fallthru edges.
67 (3) Keep the original order of blocks when there is no chance to fall
68 through. We rely on the results of cfg_cleanup.
70 To implement the change for code size optimization, block's index is
71 selected as the key and all traces are found in one round.
73 References:
75 "Software Trace Cache"
76 A. Ramirez, J. Larriba-Pey, C. Navarro, J. Torrellas and M. Valero; 1999
77 http://citeseer.nj.nec.com/15361.html
81 #include "config.h"
82 #include "system.h"
83 #include "coretypes.h"
84 #include "tm.h"
85 #include "input.h"
86 #include "alias.h"
87 #include "symtab.h"
88 #include "tree.h"
89 #include "rtl.h"
90 #include "regs.h"
91 #include "flags.h"
92 #include "output.h"
93 #include "target.h"
94 #include "hard-reg-set.h"
95 #include "function.h"
96 #include "tm_p.h"
97 #include "obstack.h"
98 #include "insn-config.h"
99 #include "expmed.h"
100 #include "dojump.h"
101 #include "explow.h"
102 #include "calls.h"
103 #include "emit-rtl.h"
104 #include "varasm.h"
105 #include "stmt.h"
106 #include "expr.h"
107 #include "optabs.h"
108 #include "params.h"
109 #include "diagnostic-core.h"
110 #include "toplev.h" /* user_defined_section_attribute */
111 #include "tree-pass.h"
112 #include "dominance.h"
113 #include "cfg.h"
114 #include "cfgrtl.h"
115 #include "cfganal.h"
116 #include "cfgbuild.h"
117 #include "cfgcleanup.h"
118 #include "predict.h"
119 #include "basic-block.h"
120 #include "df.h"
121 #include "bb-reorder.h"
122 #include "is-a.h"
123 #include "plugin-api.h"
124 #include "ipa-ref.h"
125 #include "cgraph.h"
126 #include "except.h"
127 #include "fibonacci_heap.h"
129 /* The number of rounds. In most cases there will only be 4 rounds, but
130 when partitioning hot and cold basic blocks into separate sections of
131 the object file there will be an extra round. */
132 #define N_ROUNDS 5
134 struct target_bb_reorder default_target_bb_reorder;
135 #if SWITCHABLE_TARGET
136 struct target_bb_reorder *this_target_bb_reorder = &default_target_bb_reorder;
137 #endif
139 #define uncond_jump_length \
140 (this_target_bb_reorder->x_uncond_jump_length)
142 /* Branch thresholds in thousandths (per mille) of the REG_BR_PROB_BASE. */
143 static const int branch_threshold[N_ROUNDS] = {400, 200, 100, 0, 0};
145 /* Exec thresholds in thousandths (per mille) of the frequency of bb 0. */
146 static const int exec_threshold[N_ROUNDS] = {500, 200, 50, 0, 0};
148 /* If edge frequency is lower than DUPLICATION_THRESHOLD per mille of entry
149 block the edge destination is not duplicated while connecting traces. */
150 #define DUPLICATION_THRESHOLD 100
152 typedef fibonacci_heap <long, basic_block_def> bb_heap_t;
153 typedef fibonacci_node <long, basic_block_def> bb_heap_node_t;
155 /* Structure to hold needed information for each basic block. */
156 typedef struct bbro_basic_block_data_def
158 /* Which trace is the bb start of (-1 means it is not a start of any). */
159 int start_of_trace;
161 /* Which trace is the bb end of (-1 means it is not an end of any). */
162 int end_of_trace;
164 /* Which trace is the bb in? */
165 int in_trace;
167 /* Which trace was this bb visited in? */
168 int visited;
170 /* Which heap is BB in (if any)? */
171 bb_heap_t *heap;
173 /* Which heap node is BB in (if any)? */
174 bb_heap_node_t *node;
175 } bbro_basic_block_data;
177 /* The current size of the following dynamic array. */
178 static int array_size;
180 /* The array which holds needed information for basic blocks. */
181 static bbro_basic_block_data *bbd;
183 /* To avoid frequent reallocation the size of arrays is greater than needed,
184 the number of elements is (not less than) 1.25 * size_wanted. */
185 #define GET_ARRAY_SIZE(X) ((((X) / 4) + 1) * 5)
187 /* Free the memory and set the pointer to NULL. */
188 #define FREE(P) (gcc_assert (P), free (P), P = 0)
190 /* Structure for holding information about a trace. */
191 struct trace
193 /* First and last basic block of the trace. */
194 basic_block first, last;
196 /* The round of the STC creation which this trace was found in. */
197 int round;
199 /* The length (i.e. the number of basic blocks) of the trace. */
200 int length;
203 /* Maximum frequency and count of one of the entry blocks. */
204 static int max_entry_frequency;
205 static gcov_type max_entry_count;
207 /* Local function prototypes. */
208 static void find_traces (int *, struct trace *);
209 static basic_block rotate_loop (edge, struct trace *, int);
210 static void mark_bb_visited (basic_block, int);
211 static void find_traces_1_round (int, int, gcov_type, struct trace *, int *,
212 int, bb_heap_t **, int);
213 static basic_block copy_bb (basic_block, edge, basic_block, int);
214 static long bb_to_key (basic_block);
215 static bool better_edge_p (const_basic_block, const_edge, int, int, int, int,
216 const_edge);
217 static bool connect_better_edge_p (const_edge, bool, int, const_edge,
218 struct trace *);
219 static void connect_traces (int, struct trace *);
220 static bool copy_bb_p (const_basic_block, int);
221 static bool push_to_next_round_p (const_basic_block, int, int, int, gcov_type);
223 /* Return the trace number in which BB was visited. */
225 static int
226 bb_visited_trace (const_basic_block bb)
228 gcc_assert (bb->index < array_size);
229 return bbd[bb->index].visited;
232 /* This function marks BB that it was visited in trace number TRACE. */
234 static void
235 mark_bb_visited (basic_block bb, int trace)
237 bbd[bb->index].visited = trace;
238 if (bbd[bb->index].heap)
240 bbd[bb->index].heap->delete_node (bbd[bb->index].node);
241 bbd[bb->index].heap = NULL;
242 bbd[bb->index].node = NULL;
246 /* Check to see if bb should be pushed into the next round of trace
247 collections or not. Reasons for pushing the block forward are 1).
248 If the block is cold, we are doing partitioning, and there will be
249 another round (cold partition blocks are not supposed to be
250 collected into traces until the very last round); or 2). There will
251 be another round, and the basic block is not "hot enough" for the
252 current round of trace collection. */
254 static bool
255 push_to_next_round_p (const_basic_block bb, int round, int number_of_rounds,
256 int exec_th, gcov_type count_th)
258 bool there_exists_another_round;
259 bool block_not_hot_enough;
261 there_exists_another_round = round < number_of_rounds - 1;
263 block_not_hot_enough = (bb->frequency < exec_th
264 || bb->count < count_th
265 || probably_never_executed_bb_p (cfun, bb));
267 if (there_exists_another_round
268 && block_not_hot_enough)
269 return true;
270 else
271 return false;
274 /* Find the traces for Software Trace Cache. Chain each trace through
275 RBI()->next. Store the number of traces to N_TRACES and description of
276 traces to TRACES. */
278 static void
279 find_traces (int *n_traces, struct trace *traces)
281 int i;
282 int number_of_rounds;
283 edge e;
284 edge_iterator ei;
285 bb_heap_t *heap = new bb_heap_t (LONG_MIN);
287 /* Add one extra round of trace collection when partitioning hot/cold
288 basic blocks into separate sections. The last round is for all the
289 cold blocks (and ONLY the cold blocks). */
291 number_of_rounds = N_ROUNDS - 1;
293 /* Insert entry points of function into heap. */
294 max_entry_frequency = 0;
295 max_entry_count = 0;
296 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
298 bbd[e->dest->index].heap = heap;
299 bbd[e->dest->index].node = heap->insert (bb_to_key (e->dest), e->dest);
300 if (e->dest->frequency > max_entry_frequency)
301 max_entry_frequency = e->dest->frequency;
302 if (e->dest->count > max_entry_count)
303 max_entry_count = e->dest->count;
306 /* Find the traces. */
307 for (i = 0; i < number_of_rounds; i++)
309 gcov_type count_threshold;
311 if (dump_file)
312 fprintf (dump_file, "STC - round %d\n", i + 1);
314 if (max_entry_count < INT_MAX / 1000)
315 count_threshold = max_entry_count * exec_threshold[i] / 1000;
316 else
317 count_threshold = max_entry_count / 1000 * exec_threshold[i];
319 find_traces_1_round (REG_BR_PROB_BASE * branch_threshold[i] / 1000,
320 max_entry_frequency * exec_threshold[i] / 1000,
321 count_threshold, traces, n_traces, i, &heap,
322 number_of_rounds);
324 delete heap;
326 if (dump_file)
328 for (i = 0; i < *n_traces; i++)
330 basic_block bb;
331 fprintf (dump_file, "Trace %d (round %d): ", i + 1,
332 traces[i].round + 1);
333 for (bb = traces[i].first;
334 bb != traces[i].last;
335 bb = (basic_block) bb->aux)
336 fprintf (dump_file, "%d [%d] ", bb->index, bb->frequency);
337 fprintf (dump_file, "%d [%d]\n", bb->index, bb->frequency);
339 fflush (dump_file);
343 /* Rotate loop whose back edge is BACK_EDGE in the tail of trace TRACE
344 (with sequential number TRACE_N). */
346 static basic_block
347 rotate_loop (edge back_edge, struct trace *trace, int trace_n)
349 basic_block bb;
351 /* Information about the best end (end after rotation) of the loop. */
352 basic_block best_bb = NULL;
353 edge best_edge = NULL;
354 int best_freq = -1;
355 gcov_type best_count = -1;
356 /* The best edge is preferred when its destination is not visited yet
357 or is a start block of some trace. */
358 bool is_preferred = false;
360 /* Find the most frequent edge that goes out from current trace. */
361 bb = back_edge->dest;
364 edge e;
365 edge_iterator ei;
367 FOR_EACH_EDGE (e, ei, bb->succs)
368 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
369 && bb_visited_trace (e->dest) != trace_n
370 && (e->flags & EDGE_CAN_FALLTHRU)
371 && !(e->flags & EDGE_COMPLEX))
373 if (is_preferred)
375 /* The best edge is preferred. */
376 if (!bb_visited_trace (e->dest)
377 || bbd[e->dest->index].start_of_trace >= 0)
379 /* The current edge E is also preferred. */
380 int freq = EDGE_FREQUENCY (e);
381 if (freq > best_freq || e->count > best_count)
383 best_freq = freq;
384 best_count = e->count;
385 best_edge = e;
386 best_bb = bb;
390 else
392 if (!bb_visited_trace (e->dest)
393 || bbd[e->dest->index].start_of_trace >= 0)
395 /* The current edge E is preferred. */
396 is_preferred = true;
397 best_freq = EDGE_FREQUENCY (e);
398 best_count = e->count;
399 best_edge = e;
400 best_bb = bb;
402 else
404 int freq = EDGE_FREQUENCY (e);
405 if (!best_edge || freq > best_freq || e->count > best_count)
407 best_freq = freq;
408 best_count = e->count;
409 best_edge = e;
410 best_bb = bb;
415 bb = (basic_block) bb->aux;
417 while (bb != back_edge->dest);
419 if (best_bb)
421 /* Rotate the loop so that the BEST_EDGE goes out from the last block of
422 the trace. */
423 if (back_edge->dest == trace->first)
425 trace->first = (basic_block) best_bb->aux;
427 else
429 basic_block prev_bb;
431 for (prev_bb = trace->first;
432 prev_bb->aux != back_edge->dest;
433 prev_bb = (basic_block) prev_bb->aux)
435 prev_bb->aux = best_bb->aux;
437 /* Try to get rid of uncond jump to cond jump. */
438 if (single_succ_p (prev_bb))
440 basic_block header = single_succ (prev_bb);
442 /* Duplicate HEADER if it is a small block containing cond jump
443 in the end. */
444 if (any_condjump_p (BB_END (header)) && copy_bb_p (header, 0)
445 && !CROSSING_JUMP_P (BB_END (header)))
446 copy_bb (header, single_succ_edge (prev_bb), prev_bb, trace_n);
450 else
452 /* We have not found suitable loop tail so do no rotation. */
453 best_bb = back_edge->src;
455 best_bb->aux = NULL;
456 return best_bb;
459 /* One round of finding traces. Find traces for BRANCH_TH and EXEC_TH i.e. do
460 not include basic blocks whose probability is lower than BRANCH_TH or whose
461 frequency is lower than EXEC_TH into traces (or whose count is lower than
462 COUNT_TH). Store the new traces into TRACES and modify the number of
463 traces *N_TRACES. Set the round (which the trace belongs to) to ROUND.
464 The function expects starting basic blocks to be in *HEAP and will delete
465 *HEAP and store starting points for the next round into new *HEAP. */
467 static void
468 find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
469 struct trace *traces, int *n_traces, int round,
470 bb_heap_t **heap, int number_of_rounds)
472 /* Heap for discarded basic blocks which are possible starting points for
473 the next round. */
474 bb_heap_t *new_heap = new bb_heap_t (LONG_MIN);
475 bool for_size = optimize_function_for_size_p (cfun);
477 while (!(*heap)->empty ())
479 basic_block bb;
480 struct trace *trace;
481 edge best_edge, e;
482 long key;
483 edge_iterator ei;
485 bb = (*heap)->extract_min ();
486 bbd[bb->index].heap = NULL;
487 bbd[bb->index].node = NULL;
489 if (dump_file)
490 fprintf (dump_file, "Getting bb %d\n", bb->index);
492 /* If the BB's frequency is too low, send BB to the next round. When
493 partitioning hot/cold blocks into separate sections, make sure all
494 the cold blocks (and ONLY the cold blocks) go into the (extra) final
495 round. When optimizing for size, do not push to next round. */
497 if (!for_size
498 && push_to_next_round_p (bb, round, number_of_rounds, exec_th,
499 count_th))
501 int key = bb_to_key (bb);
502 bbd[bb->index].heap = new_heap;
503 bbd[bb->index].node = new_heap->insert (key, bb);
505 if (dump_file)
506 fprintf (dump_file,
507 " Possible start point of next round: %d (key: %d)\n",
508 bb->index, key);
509 continue;
512 trace = traces + *n_traces;
513 trace->first = bb;
514 trace->round = round;
515 trace->length = 0;
516 bbd[bb->index].in_trace = *n_traces;
517 (*n_traces)++;
521 int prob, freq;
522 bool ends_in_call;
524 /* The probability and frequency of the best edge. */
525 int best_prob = INT_MIN / 2;
526 int best_freq = INT_MIN / 2;
528 best_edge = NULL;
529 mark_bb_visited (bb, *n_traces);
530 trace->length++;
532 if (dump_file)
533 fprintf (dump_file, "Basic block %d was visited in trace %d\n",
534 bb->index, *n_traces - 1);
536 ends_in_call = block_ends_with_call_p (bb);
538 /* Select the successor that will be placed after BB. */
539 FOR_EACH_EDGE (e, ei, bb->succs)
541 gcc_assert (!(e->flags & EDGE_FAKE));
543 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
544 continue;
546 if (bb_visited_trace (e->dest)
547 && bb_visited_trace (e->dest) != *n_traces)
548 continue;
550 if (BB_PARTITION (e->dest) != BB_PARTITION (bb))
551 continue;
553 prob = e->probability;
554 freq = e->dest->frequency;
556 /* The only sensible preference for a call instruction is the
557 fallthru edge. Don't bother selecting anything else. */
558 if (ends_in_call)
560 if (e->flags & EDGE_CAN_FALLTHRU)
562 best_edge = e;
563 best_prob = prob;
564 best_freq = freq;
566 continue;
569 /* Edge that cannot be fallthru or improbable or infrequent
570 successor (i.e. it is unsuitable successor). When optimizing
571 for size, ignore the probability and frequency. */
572 if (!(e->flags & EDGE_CAN_FALLTHRU) || (e->flags & EDGE_COMPLEX)
573 || ((prob < branch_th || EDGE_FREQUENCY (e) < exec_th
574 || e->count < count_th) && (!for_size)))
575 continue;
577 /* If partitioning hot/cold basic blocks, don't consider edges
578 that cross section boundaries. */
580 if (better_edge_p (bb, e, prob, freq, best_prob, best_freq,
581 best_edge))
583 best_edge = e;
584 best_prob = prob;
585 best_freq = freq;
589 /* If the best destination has multiple predecessors, and can be
590 duplicated cheaper than a jump, don't allow it to be added
591 to a trace. We'll duplicate it when connecting traces. */
592 if (best_edge && EDGE_COUNT (best_edge->dest->preds) >= 2
593 && copy_bb_p (best_edge->dest, 0))
594 best_edge = NULL;
596 /* If the best destination has multiple successors or predecessors,
597 don't allow it to be added when optimizing for size. This makes
598 sure predecessors with smaller index are handled before the best
599 destinarion. It breaks long trace and reduces long jumps.
601 Take if-then-else as an example.
607 If we do not remove the best edge B->D/C->D, the final order might
608 be A B D ... C. C is at the end of the program. If D's successors
609 and D are complicated, might need long jumps for A->C and C->D.
610 Similar issue for order: A C D ... B.
612 After removing the best edge, the final result will be ABCD/ ACBD.
613 It does not add jump compared with the previous order. But it
614 reduces the possibility of long jumps. */
615 if (best_edge && for_size
616 && (EDGE_COUNT (best_edge->dest->succs) > 1
617 || EDGE_COUNT (best_edge->dest->preds) > 1))
618 best_edge = NULL;
620 /* Add all non-selected successors to the heaps. */
621 FOR_EACH_EDGE (e, ei, bb->succs)
623 if (e == best_edge
624 || e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
625 || bb_visited_trace (e->dest))
626 continue;
628 key = bb_to_key (e->dest);
630 if (bbd[e->dest->index].heap)
632 /* E->DEST is already in some heap. */
633 if (key != bbd[e->dest->index].node->get_key ())
635 if (dump_file)
637 fprintf (dump_file,
638 "Changing key for bb %d from %ld to %ld.\n",
639 e->dest->index,
640 (long) bbd[e->dest->index].node->get_key (),
641 key);
643 bbd[e->dest->index].heap->replace_key
644 (bbd[e->dest->index].node, key);
647 else
649 bb_heap_t *which_heap = *heap;
651 prob = e->probability;
652 freq = EDGE_FREQUENCY (e);
654 if (!(e->flags & EDGE_CAN_FALLTHRU)
655 || (e->flags & EDGE_COMPLEX)
656 || prob < branch_th || freq < exec_th
657 || e->count < count_th)
659 /* When partitioning hot/cold basic blocks, make sure
660 the cold blocks (and only the cold blocks) all get
661 pushed to the last round of trace collection. When
662 optimizing for size, do not push to next round. */
664 if (!for_size && push_to_next_round_p (e->dest, round,
665 number_of_rounds,
666 exec_th, count_th))
667 which_heap = new_heap;
670 bbd[e->dest->index].heap = which_heap;
671 bbd[e->dest->index].node = which_heap->insert (key, e->dest);
673 if (dump_file)
675 fprintf (dump_file,
676 " Possible start of %s round: %d (key: %ld)\n",
677 (which_heap == new_heap) ? "next" : "this",
678 e->dest->index, (long) key);
684 if (best_edge) /* Suitable successor was found. */
686 if (bb_visited_trace (best_edge->dest) == *n_traces)
688 /* We do nothing with one basic block loops. */
689 if (best_edge->dest != bb)
691 if (EDGE_FREQUENCY (best_edge)
692 > 4 * best_edge->dest->frequency / 5)
694 /* The loop has at least 4 iterations. If the loop
695 header is not the first block of the function
696 we can rotate the loop. */
698 if (best_edge->dest
699 != ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)
701 if (dump_file)
703 fprintf (dump_file,
704 "Rotating loop %d - %d\n",
705 best_edge->dest->index, bb->index);
707 bb->aux = best_edge->dest;
708 bbd[best_edge->dest->index].in_trace =
709 (*n_traces) - 1;
710 bb = rotate_loop (best_edge, trace, *n_traces);
713 else
715 /* The loop has less than 4 iterations. */
717 if (single_succ_p (bb)
718 && copy_bb_p (best_edge->dest,
719 optimize_edge_for_speed_p
720 (best_edge)))
722 bb = copy_bb (best_edge->dest, best_edge, bb,
723 *n_traces);
724 trace->length++;
729 /* Terminate the trace. */
730 break;
732 else
734 /* Check for a situation
742 where
743 EDGE_FREQUENCY (AB) + EDGE_FREQUENCY (BC)
744 >= EDGE_FREQUENCY (AC).
745 (i.e. 2 * B->frequency >= EDGE_FREQUENCY (AC) )
746 Best ordering is then A B C.
748 When optimizing for size, A B C is always the best order.
750 This situation is created for example by:
752 if (A) B;
757 FOR_EACH_EDGE (e, ei, bb->succs)
758 if (e != best_edge
759 && (e->flags & EDGE_CAN_FALLTHRU)
760 && !(e->flags & EDGE_COMPLEX)
761 && !bb_visited_trace (e->dest)
762 && single_pred_p (e->dest)
763 && !(e->flags & EDGE_CROSSING)
764 && single_succ_p (e->dest)
765 && (single_succ_edge (e->dest)->flags
766 & EDGE_CAN_FALLTHRU)
767 && !(single_succ_edge (e->dest)->flags & EDGE_COMPLEX)
768 && single_succ (e->dest) == best_edge->dest
769 && (2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge)
770 || for_size))
772 best_edge = e;
773 if (dump_file)
774 fprintf (dump_file, "Selecting BB %d\n",
775 best_edge->dest->index);
776 break;
779 bb->aux = best_edge->dest;
780 bbd[best_edge->dest->index].in_trace = (*n_traces) - 1;
781 bb = best_edge->dest;
785 while (best_edge);
786 trace->last = bb;
787 bbd[trace->first->index].start_of_trace = *n_traces - 1;
788 bbd[trace->last->index].end_of_trace = *n_traces - 1;
790 /* The trace is terminated so we have to recount the keys in heap
791 (some block can have a lower key because now one of its predecessors
792 is an end of the trace). */
793 FOR_EACH_EDGE (e, ei, bb->succs)
795 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
796 || bb_visited_trace (e->dest))
797 continue;
799 if (bbd[e->dest->index].heap)
801 key = bb_to_key (e->dest);
802 if (key != bbd[e->dest->index].node->get_key ())
804 if (dump_file)
806 fprintf (dump_file,
807 "Changing key for bb %d from %ld to %ld.\n",
808 e->dest->index,
809 (long) bbd[e->dest->index].node->get_key (), key);
811 bbd[e->dest->index].heap->replace_key
812 (bbd[e->dest->index].node, key);
818 delete (*heap);
820 /* "Return" the new heap. */
821 *heap = new_heap;
824 /* Create a duplicate of the basic block OLD_BB and redirect edge E to it, add
825 it to trace after BB, mark OLD_BB visited and update pass' data structures
826 (TRACE is a number of trace which OLD_BB is duplicated to). */
828 static basic_block
829 copy_bb (basic_block old_bb, edge e, basic_block bb, int trace)
831 basic_block new_bb;
833 new_bb = duplicate_block (old_bb, e, bb);
834 BB_COPY_PARTITION (new_bb, old_bb);
836 gcc_assert (e->dest == new_bb);
838 if (dump_file)
839 fprintf (dump_file,
840 "Duplicated bb %d (created bb %d)\n",
841 old_bb->index, new_bb->index);
843 if (new_bb->index >= array_size
844 || last_basic_block_for_fn (cfun) > array_size)
846 int i;
847 int new_size;
849 new_size = MAX (last_basic_block_for_fn (cfun), new_bb->index + 1);
850 new_size = GET_ARRAY_SIZE (new_size);
851 bbd = XRESIZEVEC (bbro_basic_block_data, bbd, new_size);
852 for (i = array_size; i < new_size; i++)
854 bbd[i].start_of_trace = -1;
855 bbd[i].end_of_trace = -1;
856 bbd[i].in_trace = -1;
857 bbd[i].visited = 0;
858 bbd[i].heap = NULL;
859 bbd[i].node = NULL;
861 array_size = new_size;
863 if (dump_file)
865 fprintf (dump_file,
866 "Growing the dynamic array to %d elements.\n",
867 array_size);
871 gcc_assert (!bb_visited_trace (e->dest));
872 mark_bb_visited (new_bb, trace);
873 new_bb->aux = bb->aux;
874 bb->aux = new_bb;
876 bbd[new_bb->index].in_trace = trace;
878 return new_bb;
881 /* Compute and return the key (for the heap) of the basic block BB. */
883 static long
884 bb_to_key (basic_block bb)
886 edge e;
887 edge_iterator ei;
888 int priority = 0;
890 /* Use index as key to align with its original order. */
891 if (optimize_function_for_size_p (cfun))
892 return bb->index;
894 /* Do not start in probably never executed blocks. */
896 if (BB_PARTITION (bb) == BB_COLD_PARTITION
897 || probably_never_executed_bb_p (cfun, bb))
898 return BB_FREQ_MAX;
900 /* Prefer blocks whose predecessor is an end of some trace
901 or whose predecessor edge is EDGE_DFS_BACK. */
902 FOR_EACH_EDGE (e, ei, bb->preds)
904 if ((e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
905 && bbd[e->src->index].end_of_trace >= 0)
906 || (e->flags & EDGE_DFS_BACK))
908 int edge_freq = EDGE_FREQUENCY (e);
910 if (edge_freq > priority)
911 priority = edge_freq;
915 if (priority)
916 /* The block with priority should have significantly lower key. */
917 return -(100 * BB_FREQ_MAX + 100 * priority + bb->frequency);
919 return -bb->frequency;
922 /* Return true when the edge E from basic block BB is better than the temporary
923 best edge (details are in function). The probability of edge E is PROB. The
924 frequency of the successor is FREQ. The current best probability is
925 BEST_PROB, the best frequency is BEST_FREQ.
926 The edge is considered to be equivalent when PROB does not differ much from
927 BEST_PROB; similarly for frequency. */
929 static bool
930 better_edge_p (const_basic_block bb, const_edge e, int prob, int freq,
931 int best_prob, int best_freq, const_edge cur_best_edge)
933 bool is_better_edge;
935 /* The BEST_* values do not have to be best, but can be a bit smaller than
936 maximum values. */
937 int diff_prob = best_prob / 10;
938 int diff_freq = best_freq / 10;
940 /* The smaller one is better to keep the original order. */
941 if (optimize_function_for_size_p (cfun))
942 return !cur_best_edge
943 || cur_best_edge->dest->index > e->dest->index;
945 if (prob > best_prob + diff_prob)
946 /* The edge has higher probability than the temporary best edge. */
947 is_better_edge = true;
948 else if (prob < best_prob - diff_prob)
949 /* The edge has lower probability than the temporary best edge. */
950 is_better_edge = false;
951 else if (freq < best_freq - diff_freq)
952 /* The edge and the temporary best edge have almost equivalent
953 probabilities. The higher frequency of a successor now means
954 that there is another edge going into that successor.
955 This successor has lower frequency so it is better. */
956 is_better_edge = true;
957 else if (freq > best_freq + diff_freq)
958 /* This successor has higher frequency so it is worse. */
959 is_better_edge = false;
960 else if (e->dest->prev_bb == bb)
961 /* The edges have equivalent probabilities and the successors
962 have equivalent frequencies. Select the previous successor. */
963 is_better_edge = true;
964 else
965 is_better_edge = false;
967 /* If we are doing hot/cold partitioning, make sure that we always favor
968 non-crossing edges over crossing edges. */
970 if (!is_better_edge
971 && flag_reorder_blocks_and_partition
972 && cur_best_edge
973 && (cur_best_edge->flags & EDGE_CROSSING)
974 && !(e->flags & EDGE_CROSSING))
975 is_better_edge = true;
977 return is_better_edge;
980 /* Return true when the edge E is better than the temporary best edge
981 CUR_BEST_EDGE. If SRC_INDEX_P is true, the function compares the src bb of
982 E and CUR_BEST_EDGE; otherwise it will compare the dest bb.
983 BEST_LEN is the trace length of src (or dest) bb in CUR_BEST_EDGE.
984 TRACES record the information about traces.
985 When optimizing for size, the edge with smaller index is better.
986 When optimizing for speed, the edge with bigger probability or longer trace
987 is better. */
989 static bool
990 connect_better_edge_p (const_edge e, bool src_index_p, int best_len,
991 const_edge cur_best_edge, struct trace *traces)
993 int e_index;
994 int b_index;
995 bool is_better_edge;
997 if (!cur_best_edge)
998 return true;
1000 if (optimize_function_for_size_p (cfun))
1002 e_index = src_index_p ? e->src->index : e->dest->index;
1003 b_index = src_index_p ? cur_best_edge->src->index
1004 : cur_best_edge->dest->index;
1005 /* The smaller one is better to keep the original order. */
1006 return b_index > e_index;
1009 if (src_index_p)
1011 e_index = e->src->index;
1013 if (e->probability > cur_best_edge->probability)
1014 /* The edge has higher probability than the temporary best edge. */
1015 is_better_edge = true;
1016 else if (e->probability < cur_best_edge->probability)
1017 /* The edge has lower probability than the temporary best edge. */
1018 is_better_edge = false;
1019 else if (traces[bbd[e_index].end_of_trace].length > best_len)
1020 /* The edge and the temporary best edge have equivalent probabilities.
1021 The edge with longer trace is better. */
1022 is_better_edge = true;
1023 else
1024 is_better_edge = false;
1026 else
1028 e_index = e->dest->index;
1030 if (e->probability > cur_best_edge->probability)
1031 /* The edge has higher probability than the temporary best edge. */
1032 is_better_edge = true;
1033 else if (e->probability < cur_best_edge->probability)
1034 /* The edge has lower probability than the temporary best edge. */
1035 is_better_edge = false;
1036 else if (traces[bbd[e_index].start_of_trace].length > best_len)
1037 /* The edge and the temporary best edge have equivalent probabilities.
1038 The edge with longer trace is better. */
1039 is_better_edge = true;
1040 else
1041 is_better_edge = false;
1044 return is_better_edge;
1047 /* Connect traces in array TRACES, N_TRACES is the count of traces. */
1049 static void
1050 connect_traces (int n_traces, struct trace *traces)
1052 int i;
1053 bool *connected;
1054 bool two_passes;
1055 int last_trace;
1056 int current_pass;
1057 int current_partition;
1058 int freq_threshold;
1059 gcov_type count_threshold;
1060 bool for_size = optimize_function_for_size_p (cfun);
1062 freq_threshold = max_entry_frequency * DUPLICATION_THRESHOLD / 1000;
1063 if (max_entry_count < INT_MAX / 1000)
1064 count_threshold = max_entry_count * DUPLICATION_THRESHOLD / 1000;
1065 else
1066 count_threshold = max_entry_count / 1000 * DUPLICATION_THRESHOLD;
1068 connected = XCNEWVEC (bool, n_traces);
1069 last_trace = -1;
1070 current_pass = 1;
1071 current_partition = BB_PARTITION (traces[0].first);
1072 two_passes = false;
1074 if (crtl->has_bb_partition)
1075 for (i = 0; i < n_traces && !two_passes; i++)
1076 if (BB_PARTITION (traces[0].first)
1077 != BB_PARTITION (traces[i].first))
1078 two_passes = true;
1080 for (i = 0; i < n_traces || (two_passes && current_pass == 1) ; i++)
1082 int t = i;
1083 int t2;
1084 edge e, best;
1085 int best_len;
1087 if (i >= n_traces)
1089 gcc_assert (two_passes && current_pass == 1);
1090 i = 0;
1091 t = i;
1092 current_pass = 2;
1093 if (current_partition == BB_HOT_PARTITION)
1094 current_partition = BB_COLD_PARTITION;
1095 else
1096 current_partition = BB_HOT_PARTITION;
1099 if (connected[t])
1100 continue;
1102 if (two_passes
1103 && BB_PARTITION (traces[t].first) != current_partition)
1104 continue;
1106 connected[t] = true;
1108 /* Find the predecessor traces. */
1109 for (t2 = t; t2 > 0;)
1111 edge_iterator ei;
1112 best = NULL;
1113 best_len = 0;
1114 FOR_EACH_EDGE (e, ei, traces[t2].first->preds)
1116 int si = e->src->index;
1118 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1119 && (e->flags & EDGE_CAN_FALLTHRU)
1120 && !(e->flags & EDGE_COMPLEX)
1121 && bbd[si].end_of_trace >= 0
1122 && !connected[bbd[si].end_of_trace]
1123 && (BB_PARTITION (e->src) == current_partition)
1124 && connect_better_edge_p (e, true, best_len, best, traces))
1126 best = e;
1127 best_len = traces[bbd[si].end_of_trace].length;
1130 if (best)
1132 best->src->aux = best->dest;
1133 t2 = bbd[best->src->index].end_of_trace;
1134 connected[t2] = true;
1136 if (dump_file)
1138 fprintf (dump_file, "Connection: %d %d\n",
1139 best->src->index, best->dest->index);
1142 else
1143 break;
1146 if (last_trace >= 0)
1147 traces[last_trace].last->aux = traces[t2].first;
1148 last_trace = t;
1150 /* Find the successor traces. */
1151 while (1)
1153 /* Find the continuation of the chain. */
1154 edge_iterator ei;
1155 best = NULL;
1156 best_len = 0;
1157 FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1159 int di = e->dest->index;
1161 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1162 && (e->flags & EDGE_CAN_FALLTHRU)
1163 && !(e->flags & EDGE_COMPLEX)
1164 && bbd[di].start_of_trace >= 0
1165 && !connected[bbd[di].start_of_trace]
1166 && (BB_PARTITION (e->dest) == current_partition)
1167 && connect_better_edge_p (e, false, best_len, best, traces))
1169 best = e;
1170 best_len = traces[bbd[di].start_of_trace].length;
1174 if (for_size)
1176 if (!best)
1177 /* Stop finding the successor traces. */
1178 break;
1180 /* It is OK to connect block n with block n + 1 or a block
1181 before n. For others, only connect to the loop header. */
1182 if (best->dest->index > (traces[t].last->index + 1))
1184 int count = EDGE_COUNT (best->dest->preds);
1186 FOR_EACH_EDGE (e, ei, best->dest->preds)
1187 if (e->flags & EDGE_DFS_BACK)
1188 count--;
1190 /* If dest has multiple predecessors, skip it. We expect
1191 that one predecessor with smaller index connects with it
1192 later. */
1193 if (count != 1)
1194 break;
1197 /* Only connect Trace n with Trace n + 1. It is conservative
1198 to keep the order as close as possible to the original order.
1199 It also helps to reduce long jumps. */
1200 if (last_trace != bbd[best->dest->index].start_of_trace - 1)
1201 break;
1203 if (dump_file)
1204 fprintf (dump_file, "Connection: %d %d\n",
1205 best->src->index, best->dest->index);
1207 t = bbd[best->dest->index].start_of_trace;
1208 traces[last_trace].last->aux = traces[t].first;
1209 connected[t] = true;
1210 last_trace = t;
1212 else if (best)
1214 if (dump_file)
1216 fprintf (dump_file, "Connection: %d %d\n",
1217 best->src->index, best->dest->index);
1219 t = bbd[best->dest->index].start_of_trace;
1220 traces[last_trace].last->aux = traces[t].first;
1221 connected[t] = true;
1222 last_trace = t;
1224 else
1226 /* Try to connect the traces by duplication of 1 block. */
1227 edge e2;
1228 basic_block next_bb = NULL;
1229 bool try_copy = false;
1231 FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1232 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1233 && (e->flags & EDGE_CAN_FALLTHRU)
1234 && !(e->flags & EDGE_COMPLEX)
1235 && (!best || e->probability > best->probability))
1237 edge_iterator ei;
1238 edge best2 = NULL;
1239 int best2_len = 0;
1241 /* If the destination is a start of a trace which is only
1242 one block long, then no need to search the successor
1243 blocks of the trace. Accept it. */
1244 if (bbd[e->dest->index].start_of_trace >= 0
1245 && traces[bbd[e->dest->index].start_of_trace].length
1246 == 1)
1248 best = e;
1249 try_copy = true;
1250 continue;
1253 FOR_EACH_EDGE (e2, ei, e->dest->succs)
1255 int di = e2->dest->index;
1257 if (e2->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
1258 || ((e2->flags & EDGE_CAN_FALLTHRU)
1259 && !(e2->flags & EDGE_COMPLEX)
1260 && bbd[di].start_of_trace >= 0
1261 && !connected[bbd[di].start_of_trace]
1262 && BB_PARTITION (e2->dest) == current_partition
1263 && EDGE_FREQUENCY (e2) >= freq_threshold
1264 && e2->count >= count_threshold
1265 && (!best2
1266 || e2->probability > best2->probability
1267 || (e2->probability == best2->probability
1268 && traces[bbd[di].start_of_trace].length
1269 > best2_len))))
1271 best = e;
1272 best2 = e2;
1273 if (e2->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1274 best2_len = traces[bbd[di].start_of_trace].length;
1275 else
1276 best2_len = INT_MAX;
1277 next_bb = e2->dest;
1278 try_copy = true;
1283 if (crtl->has_bb_partition)
1284 try_copy = false;
1286 /* Copy tiny blocks always; copy larger blocks only when the
1287 edge is traversed frequently enough. */
1288 if (try_copy
1289 && copy_bb_p (best->dest,
1290 optimize_edge_for_speed_p (best)
1291 && EDGE_FREQUENCY (best) >= freq_threshold
1292 && best->count >= count_threshold))
1294 basic_block new_bb;
1296 if (dump_file)
1298 fprintf (dump_file, "Connection: %d %d ",
1299 traces[t].last->index, best->dest->index);
1300 if (!next_bb)
1301 fputc ('\n', dump_file);
1302 else if (next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1303 fprintf (dump_file, "exit\n");
1304 else
1305 fprintf (dump_file, "%d\n", next_bb->index);
1308 new_bb = copy_bb (best->dest, best, traces[t].last, t);
1309 traces[t].last = new_bb;
1310 if (next_bb && next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
1312 t = bbd[next_bb->index].start_of_trace;
1313 traces[last_trace].last->aux = traces[t].first;
1314 connected[t] = true;
1315 last_trace = t;
1317 else
1318 break; /* Stop finding the successor traces. */
1320 else
1321 break; /* Stop finding the successor traces. */
1326 if (dump_file)
1328 basic_block bb;
1330 fprintf (dump_file, "Final order:\n");
1331 for (bb = traces[0].first; bb; bb = (basic_block) bb->aux)
1332 fprintf (dump_file, "%d ", bb->index);
1333 fprintf (dump_file, "\n");
1334 fflush (dump_file);
1337 FREE (connected);
1340 /* Return true when BB can and should be copied. CODE_MAY_GROW is true
1341 when code size is allowed to grow by duplication. */
1343 static bool
1344 copy_bb_p (const_basic_block bb, int code_may_grow)
1346 int size = 0;
1347 int max_size = uncond_jump_length;
1348 rtx_insn *insn;
1350 if (!bb->frequency)
1351 return false;
1352 if (EDGE_COUNT (bb->preds) < 2)
1353 return false;
1354 if (!can_duplicate_block_p (bb))
1355 return false;
1357 /* Avoid duplicating blocks which have many successors (PR/13430). */
1358 if (EDGE_COUNT (bb->succs) > 8)
1359 return false;
1361 if (code_may_grow && optimize_bb_for_speed_p (bb))
1362 max_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
1364 FOR_BB_INSNS (bb, insn)
1366 if (INSN_P (insn))
1367 size += get_attr_min_length (insn);
1370 if (size <= max_size)
1371 return true;
1373 if (dump_file)
1375 fprintf (dump_file,
1376 "Block %d can't be copied because its size = %d.\n",
1377 bb->index, size);
1380 return false;
1383 /* Return the length of unconditional jump instruction. */
1386 get_uncond_jump_length (void)
1388 int length;
1390 start_sequence ();
1391 rtx_code_label *label = emit_label (gen_label_rtx ());
1392 rtx_insn *jump = emit_jump_insn (gen_jump (label));
1393 length = get_attr_min_length (jump);
1394 end_sequence ();
1396 return length;
1399 /* The landing pad OLD_LP, in block OLD_BB, has edges from both partitions.
1400 Duplicate the landing pad and split the edges so that no EH edge
1401 crosses partitions. */
1403 static void
1404 fix_up_crossing_landing_pad (eh_landing_pad old_lp, basic_block old_bb)
1406 eh_landing_pad new_lp;
1407 basic_block new_bb, last_bb, post_bb;
1408 rtx_insn *jump;
1409 unsigned new_partition;
1410 edge_iterator ei;
1411 edge e;
1413 /* Generate the new landing-pad structure. */
1414 new_lp = gen_eh_landing_pad (old_lp->region);
1415 new_lp->post_landing_pad = old_lp->post_landing_pad;
1416 new_lp->landing_pad = gen_label_rtx ();
1417 LABEL_PRESERVE_P (new_lp->landing_pad) = 1;
1419 /* Put appropriate instructions in new bb. */
1420 rtx_code_label *new_label = emit_label (new_lp->landing_pad);
1422 expand_dw2_landing_pad_for_region (old_lp->region);
1424 post_bb = BLOCK_FOR_INSN (old_lp->landing_pad);
1425 post_bb = single_succ (post_bb);
1426 rtx_code_label *post_label = block_label (post_bb);
1427 jump = emit_jump_insn (gen_jump (post_label));
1428 JUMP_LABEL (jump) = post_label;
1430 /* Create new basic block to be dest for lp. */
1431 last_bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
1432 new_bb = create_basic_block (new_label, jump, last_bb);
1433 new_bb->aux = last_bb->aux;
1434 last_bb->aux = new_bb;
1436 emit_barrier_after_bb (new_bb);
1438 make_edge (new_bb, post_bb, 0);
1440 /* Make sure new bb is in the other partition. */
1441 new_partition = BB_PARTITION (old_bb);
1442 new_partition ^= BB_HOT_PARTITION | BB_COLD_PARTITION;
1443 BB_SET_PARTITION (new_bb, new_partition);
1445 /* Fix up the edges. */
1446 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)) != NULL; )
1447 if (BB_PARTITION (e->src) == new_partition)
1449 rtx_insn *insn = BB_END (e->src);
1450 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
1452 gcc_assert (note != NULL);
1453 gcc_checking_assert (INTVAL (XEXP (note, 0)) == old_lp->index);
1454 XEXP (note, 0) = GEN_INT (new_lp->index);
1456 /* Adjust the edge to the new destination. */
1457 redirect_edge_succ (e, new_bb);
1459 else
1460 ei_next (&ei);
1464 /* Ensure that all hot bbs are included in a hot path through the
1465 procedure. This is done by calling this function twice, once
1466 with WALK_UP true (to look for paths from the entry to hot bbs) and
1467 once with WALK_UP false (to look for paths from hot bbs to the exit).
1468 Returns the updated value of COLD_BB_COUNT and adds newly-hot bbs
1469 to BBS_IN_HOT_PARTITION. */
1471 static unsigned int
1472 sanitize_hot_paths (bool walk_up, unsigned int cold_bb_count,
1473 vec<basic_block> *bbs_in_hot_partition)
1475 /* Callers check this. */
1476 gcc_checking_assert (cold_bb_count);
1478 /* Keep examining hot bbs while we still have some left to check
1479 and there are remaining cold bbs. */
1480 vec<basic_block> hot_bbs_to_check = bbs_in_hot_partition->copy ();
1481 while (! hot_bbs_to_check.is_empty ()
1482 && cold_bb_count)
1484 basic_block bb = hot_bbs_to_check.pop ();
1485 vec<edge, va_gc> *edges = walk_up ? bb->preds : bb->succs;
1486 edge e;
1487 edge_iterator ei;
1488 int highest_probability = 0;
1489 int highest_freq = 0;
1490 gcov_type highest_count = 0;
1491 bool found = false;
1493 /* Walk the preds/succs and check if there is at least one already
1494 marked hot. Keep track of the most frequent pred/succ so that we
1495 can mark it hot if we don't find one. */
1496 FOR_EACH_EDGE (e, ei, edges)
1498 basic_block reach_bb = walk_up ? e->src : e->dest;
1500 if (e->flags & EDGE_DFS_BACK)
1501 continue;
1503 if (BB_PARTITION (reach_bb) != BB_COLD_PARTITION)
1505 found = true;
1506 break;
1508 /* The following loop will look for the hottest edge via
1509 the edge count, if it is non-zero, then fallback to the edge
1510 frequency and finally the edge probability. */
1511 if (e->count > highest_count)
1512 highest_count = e->count;
1513 int edge_freq = EDGE_FREQUENCY (e);
1514 if (edge_freq > highest_freq)
1515 highest_freq = edge_freq;
1516 if (e->probability > highest_probability)
1517 highest_probability = e->probability;
1520 /* If bb is reached by (or reaches, in the case of !WALK_UP) another hot
1521 block (or unpartitioned, e.g. the entry block) then it is ok. If not,
1522 then the most frequent pred (or succ) needs to be adjusted. In the
1523 case where multiple preds/succs have the same frequency (e.g. a
1524 50-50 branch), then both will be adjusted. */
1525 if (found)
1526 continue;
1528 FOR_EACH_EDGE (e, ei, edges)
1530 if (e->flags & EDGE_DFS_BACK)
1531 continue;
1532 /* Select the hottest edge using the edge count, if it is non-zero,
1533 then fallback to the edge frequency and finally the edge
1534 probability. */
1535 if (highest_count)
1537 if (e->count < highest_count)
1538 continue;
1540 else if (highest_freq)
1542 if (EDGE_FREQUENCY (e) < highest_freq)
1543 continue;
1545 else if (e->probability < highest_probability)
1546 continue;
1548 basic_block reach_bb = walk_up ? e->src : e->dest;
1550 /* We have a hot bb with an immediate dominator that is cold.
1551 The dominator needs to be re-marked hot. */
1552 BB_SET_PARTITION (reach_bb, BB_HOT_PARTITION);
1553 cold_bb_count--;
1555 /* Now we need to examine newly-hot reach_bb to see if it is also
1556 dominated by a cold bb. */
1557 bbs_in_hot_partition->safe_push (reach_bb);
1558 hot_bbs_to_check.safe_push (reach_bb);
1562 return cold_bb_count;
1566 /* Find the basic blocks that are rarely executed and need to be moved to
1567 a separate section of the .o file (to cut down on paging and improve
1568 cache locality). Return a vector of all edges that cross. */
1570 static vec<edge>
1571 find_rarely_executed_basic_blocks_and_crossing_edges (void)
1573 vec<edge> crossing_edges = vNULL;
1574 basic_block bb;
1575 edge e;
1576 edge_iterator ei;
1577 unsigned int cold_bb_count = 0;
1578 auto_vec<basic_block> bbs_in_hot_partition;
1580 /* Mark which partition (hot/cold) each basic block belongs in. */
1581 FOR_EACH_BB_FN (bb, cfun)
1583 bool cold_bb = false;
1585 if (probably_never_executed_bb_p (cfun, bb))
1587 /* Handle profile insanities created by upstream optimizations
1588 by also checking the incoming edge weights. If there is a non-cold
1589 incoming edge, conservatively prevent this block from being split
1590 into the cold section. */
1591 cold_bb = true;
1592 FOR_EACH_EDGE (e, ei, bb->preds)
1593 if (!probably_never_executed_edge_p (cfun, e))
1595 cold_bb = false;
1596 break;
1599 if (cold_bb)
1601 BB_SET_PARTITION (bb, BB_COLD_PARTITION);
1602 cold_bb_count++;
1604 else
1606 BB_SET_PARTITION (bb, BB_HOT_PARTITION);
1607 bbs_in_hot_partition.safe_push (bb);
1611 /* Ensure that hot bbs are included along a hot path from the entry to exit.
1612 Several different possibilities may include cold bbs along all paths
1613 to/from a hot bb. One is that there are edge weight insanities
1614 due to optimization phases that do not properly update basic block profile
1615 counts. The second is that the entry of the function may not be hot, because
1616 it is entered fewer times than the number of profile training runs, but there
1617 is a loop inside the function that causes blocks within the function to be
1618 above the threshold for hotness. This is fixed by walking up from hot bbs
1619 to the entry block, and then down from hot bbs to the exit, performing
1620 partitioning fixups as necessary. */
1621 if (cold_bb_count)
1623 mark_dfs_back_edges ();
1624 cold_bb_count = sanitize_hot_paths (true, cold_bb_count,
1625 &bbs_in_hot_partition);
1626 if (cold_bb_count)
1627 sanitize_hot_paths (false, cold_bb_count, &bbs_in_hot_partition);
1630 /* The format of .gcc_except_table does not allow landing pads to
1631 be in a different partition as the throw. Fix this by either
1632 moving or duplicating the landing pads. */
1633 if (cfun->eh->lp_array)
1635 unsigned i;
1636 eh_landing_pad lp;
1638 FOR_EACH_VEC_ELT (*cfun->eh->lp_array, i, lp)
1640 bool all_same, all_diff;
1642 if (lp == NULL
1643 || lp->landing_pad == NULL_RTX
1644 || !LABEL_P (lp->landing_pad))
1645 continue;
1647 all_same = all_diff = true;
1648 bb = BLOCK_FOR_INSN (lp->landing_pad);
1649 FOR_EACH_EDGE (e, ei, bb->preds)
1651 gcc_assert (e->flags & EDGE_EH);
1652 if (BB_PARTITION (bb) == BB_PARTITION (e->src))
1653 all_diff = false;
1654 else
1655 all_same = false;
1658 if (all_same)
1660 else if (all_diff)
1662 int which = BB_PARTITION (bb);
1663 which ^= BB_HOT_PARTITION | BB_COLD_PARTITION;
1664 BB_SET_PARTITION (bb, which);
1666 else
1667 fix_up_crossing_landing_pad (lp, bb);
1671 /* Mark every edge that crosses between sections. */
1673 FOR_EACH_BB_FN (bb, cfun)
1674 FOR_EACH_EDGE (e, ei, bb->succs)
1676 unsigned int flags = e->flags;
1678 /* We should never have EDGE_CROSSING set yet. */
1679 gcc_checking_assert ((flags & EDGE_CROSSING) == 0);
1681 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1682 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1683 && BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1685 crossing_edges.safe_push (e);
1686 flags |= EDGE_CROSSING;
1689 /* Now that we've split eh edges as appropriate, allow landing pads
1690 to be merged with the post-landing pads. */
1691 flags &= ~EDGE_PRESERVE;
1693 e->flags = flags;
1696 return crossing_edges;
1699 /* Set the flag EDGE_CAN_FALLTHRU for edges that can be fallthru. */
1701 static void
1702 set_edge_can_fallthru_flag (void)
1704 basic_block bb;
1706 FOR_EACH_BB_FN (bb, cfun)
1708 edge e;
1709 edge_iterator ei;
1711 FOR_EACH_EDGE (e, ei, bb->succs)
1713 e->flags &= ~EDGE_CAN_FALLTHRU;
1715 /* The FALLTHRU edge is also CAN_FALLTHRU edge. */
1716 if (e->flags & EDGE_FALLTHRU)
1717 e->flags |= EDGE_CAN_FALLTHRU;
1720 /* If the BB ends with an invertible condjump all (2) edges are
1721 CAN_FALLTHRU edges. */
1722 if (EDGE_COUNT (bb->succs) != 2)
1723 continue;
1724 if (!any_condjump_p (BB_END (bb)))
1725 continue;
1727 rtx_jump_insn *bb_end_jump = as_a <rtx_jump_insn *> (BB_END (bb));
1728 if (!invert_jump (bb_end_jump, JUMP_LABEL (bb_end_jump), 0))
1729 continue;
1730 invert_jump (bb_end_jump, JUMP_LABEL (bb_end_jump), 0);
1731 EDGE_SUCC (bb, 0)->flags |= EDGE_CAN_FALLTHRU;
1732 EDGE_SUCC (bb, 1)->flags |= EDGE_CAN_FALLTHRU;
1736 /* If any destination of a crossing edge does not have a label, add label;
1737 Convert any easy fall-through crossing edges to unconditional jumps. */
1739 static void
1740 add_labels_and_missing_jumps (vec<edge> crossing_edges)
1742 size_t i;
1743 edge e;
1745 FOR_EACH_VEC_ELT (crossing_edges, i, e)
1747 basic_block src = e->src;
1748 basic_block dest = e->dest;
1749 rtx_jump_insn *new_jump;
1751 if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1752 continue;
1754 /* Make sure dest has a label. */
1755 rtx_code_label *label = block_label (dest);
1757 /* Nothing to do for non-fallthru edges. */
1758 if (src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1759 continue;
1760 if ((e->flags & EDGE_FALLTHRU) == 0)
1761 continue;
1763 /* If the block does not end with a control flow insn, then we
1764 can trivially add a jump to the end to fixup the crossing.
1765 Otherwise the jump will have to go in a new bb, which will
1766 be handled by fix_up_fall_thru_edges function. */
1767 if (control_flow_insn_p (BB_END (src)))
1768 continue;
1770 /* Make sure there's only one successor. */
1771 gcc_assert (single_succ_p (src));
1773 new_jump = emit_jump_insn_after (gen_jump (label), BB_END (src));
1774 BB_END (src) = new_jump;
1775 JUMP_LABEL (new_jump) = label;
1776 LABEL_NUSES (label) += 1;
1778 emit_barrier_after_bb (src);
1780 /* Mark edge as non-fallthru. */
1781 e->flags &= ~EDGE_FALLTHRU;
1785 /* Find any bb's where the fall-through edge is a crossing edge (note that
1786 these bb's must also contain a conditional jump or end with a call
1787 instruction; we've already dealt with fall-through edges for blocks
1788 that didn't have a conditional jump or didn't end with call instruction
1789 in the call to add_labels_and_missing_jumps). Convert the fall-through
1790 edge to non-crossing edge by inserting a new bb to fall-through into.
1791 The new bb will contain an unconditional jump (crossing edge) to the
1792 original fall through destination. */
1794 static void
1795 fix_up_fall_thru_edges (void)
1797 basic_block cur_bb;
1798 basic_block new_bb;
1799 edge succ1;
1800 edge succ2;
1801 edge fall_thru;
1802 edge cond_jump = NULL;
1803 bool cond_jump_crosses;
1804 int invert_worked;
1805 rtx_insn *old_jump;
1806 rtx_code_label *fall_thru_label;
1808 FOR_EACH_BB_FN (cur_bb, cfun)
1810 fall_thru = NULL;
1811 if (EDGE_COUNT (cur_bb->succs) > 0)
1812 succ1 = EDGE_SUCC (cur_bb, 0);
1813 else
1814 succ1 = NULL;
1816 if (EDGE_COUNT (cur_bb->succs) > 1)
1817 succ2 = EDGE_SUCC (cur_bb, 1);
1818 else
1819 succ2 = NULL;
1821 /* Find the fall-through edge. */
1823 if (succ1
1824 && (succ1->flags & EDGE_FALLTHRU))
1826 fall_thru = succ1;
1827 cond_jump = succ2;
1829 else if (succ2
1830 && (succ2->flags & EDGE_FALLTHRU))
1832 fall_thru = succ2;
1833 cond_jump = succ1;
1835 else if (succ1
1836 && (block_ends_with_call_p (cur_bb)
1837 || can_throw_internal (BB_END (cur_bb))))
1839 edge e;
1840 edge_iterator ei;
1842 FOR_EACH_EDGE (e, ei, cur_bb->succs)
1843 if (e->flags & EDGE_FALLTHRU)
1845 fall_thru = e;
1846 break;
1850 if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)))
1852 /* Check to see if the fall-thru edge is a crossing edge. */
1854 if (fall_thru->flags & EDGE_CROSSING)
1856 /* The fall_thru edge crosses; now check the cond jump edge, if
1857 it exists. */
1859 cond_jump_crosses = true;
1860 invert_worked = 0;
1861 old_jump = BB_END (cur_bb);
1863 /* Find the jump instruction, if there is one. */
1865 if (cond_jump)
1867 if (!(cond_jump->flags & EDGE_CROSSING))
1868 cond_jump_crosses = false;
1870 /* We know the fall-thru edge crosses; if the cond
1871 jump edge does NOT cross, and its destination is the
1872 next block in the bb order, invert the jump
1873 (i.e. fix it so the fall through does not cross and
1874 the cond jump does). */
1876 if (!cond_jump_crosses)
1878 /* Find label in fall_thru block. We've already added
1879 any missing labels, so there must be one. */
1881 fall_thru_label = block_label (fall_thru->dest);
1883 if (old_jump && fall_thru_label)
1885 rtx_jump_insn *old_jump_insn =
1886 dyn_cast <rtx_jump_insn *> (old_jump);
1887 if (old_jump_insn)
1888 invert_worked = invert_jump (old_jump_insn,
1889 fall_thru_label, 0);
1892 if (invert_worked)
1894 fall_thru->flags &= ~EDGE_FALLTHRU;
1895 cond_jump->flags |= EDGE_FALLTHRU;
1896 update_br_prob_note (cur_bb);
1897 std::swap (fall_thru, cond_jump);
1898 cond_jump->flags |= EDGE_CROSSING;
1899 fall_thru->flags &= ~EDGE_CROSSING;
1904 if (cond_jump_crosses || !invert_worked)
1906 /* This is the case where both edges out of the basic
1907 block are crossing edges. Here we will fix up the
1908 fall through edge. The jump edge will be taken care
1909 of later. The EDGE_CROSSING flag of fall_thru edge
1910 is unset before the call to force_nonfallthru
1911 function because if a new basic-block is created
1912 this edge remains in the current section boundary
1913 while the edge between new_bb and the fall_thru->dest
1914 becomes EDGE_CROSSING. */
1916 fall_thru->flags &= ~EDGE_CROSSING;
1917 new_bb = force_nonfallthru (fall_thru);
1919 if (new_bb)
1921 new_bb->aux = cur_bb->aux;
1922 cur_bb->aux = new_bb;
1924 /* This is done by force_nonfallthru_and_redirect. */
1925 gcc_assert (BB_PARTITION (new_bb)
1926 == BB_PARTITION (cur_bb));
1928 single_succ_edge (new_bb)->flags |= EDGE_CROSSING;
1930 else
1932 /* If a new basic-block was not created; restore
1933 the EDGE_CROSSING flag. */
1934 fall_thru->flags |= EDGE_CROSSING;
1937 /* Add barrier after new jump */
1938 emit_barrier_after_bb (new_bb ? new_bb : cur_bb);
1945 /* This function checks the destination block of a "crossing jump" to
1946 see if it has any crossing predecessors that begin with a code label
1947 and end with an unconditional jump. If so, it returns that predecessor
1948 block. (This is to avoid creating lots of new basic blocks that all
1949 contain unconditional jumps to the same destination). */
1951 static basic_block
1952 find_jump_block (basic_block jump_dest)
1954 basic_block source_bb = NULL;
1955 edge e;
1956 rtx_insn *insn;
1957 edge_iterator ei;
1959 FOR_EACH_EDGE (e, ei, jump_dest->preds)
1960 if (e->flags & EDGE_CROSSING)
1962 basic_block src = e->src;
1964 /* Check each predecessor to see if it has a label, and contains
1965 only one executable instruction, which is an unconditional jump.
1966 If so, we can use it. */
1968 if (LABEL_P (BB_HEAD (src)))
1969 for (insn = BB_HEAD (src);
1970 !INSN_P (insn) && insn != NEXT_INSN (BB_END (src));
1971 insn = NEXT_INSN (insn))
1973 if (INSN_P (insn)
1974 && insn == BB_END (src)
1975 && JUMP_P (insn)
1976 && !any_condjump_p (insn))
1978 source_bb = src;
1979 break;
1983 if (source_bb)
1984 break;
1987 return source_bb;
1990 /* Find all BB's with conditional jumps that are crossing edges;
1991 insert a new bb and make the conditional jump branch to the new
1992 bb instead (make the new bb same color so conditional branch won't
1993 be a 'crossing' edge). Insert an unconditional jump from the
1994 new bb to the original destination of the conditional jump. */
1996 static void
1997 fix_crossing_conditional_branches (void)
1999 basic_block cur_bb;
2000 basic_block new_bb;
2001 basic_block dest;
2002 edge succ1;
2003 edge succ2;
2004 edge crossing_edge;
2005 edge new_edge;
2006 rtx set_src;
2007 rtx old_label = NULL_RTX;
2008 rtx_code_label *new_label;
2010 FOR_EACH_BB_FN (cur_bb, cfun)
2012 crossing_edge = NULL;
2013 if (EDGE_COUNT (cur_bb->succs) > 0)
2014 succ1 = EDGE_SUCC (cur_bb, 0);
2015 else
2016 succ1 = NULL;
2018 if (EDGE_COUNT (cur_bb->succs) > 1)
2019 succ2 = EDGE_SUCC (cur_bb, 1);
2020 else
2021 succ2 = NULL;
2023 /* We already took care of fall-through edges, so only one successor
2024 can be a crossing edge. */
2026 if (succ1 && (succ1->flags & EDGE_CROSSING))
2027 crossing_edge = succ1;
2028 else if (succ2 && (succ2->flags & EDGE_CROSSING))
2029 crossing_edge = succ2;
2031 if (crossing_edge)
2033 rtx_insn *old_jump = BB_END (cur_bb);
2035 /* Check to make sure the jump instruction is a
2036 conditional jump. */
2038 set_src = NULL_RTX;
2040 if (any_condjump_p (old_jump))
2042 if (GET_CODE (PATTERN (old_jump)) == SET)
2043 set_src = SET_SRC (PATTERN (old_jump));
2044 else if (GET_CODE (PATTERN (old_jump)) == PARALLEL)
2046 set_src = XVECEXP (PATTERN (old_jump), 0,0);
2047 if (GET_CODE (set_src) == SET)
2048 set_src = SET_SRC (set_src);
2049 else
2050 set_src = NULL_RTX;
2054 if (set_src && (GET_CODE (set_src) == IF_THEN_ELSE))
2056 rtx_jump_insn *old_jump_insn =
2057 as_a <rtx_jump_insn *> (old_jump);
2059 if (GET_CODE (XEXP (set_src, 1)) == PC)
2060 old_label = XEXP (set_src, 2);
2061 else if (GET_CODE (XEXP (set_src, 2)) == PC)
2062 old_label = XEXP (set_src, 1);
2064 /* Check to see if new bb for jumping to that dest has
2065 already been created; if so, use it; if not, create
2066 a new one. */
2068 new_bb = find_jump_block (crossing_edge->dest);
2070 if (new_bb)
2071 new_label = block_label (new_bb);
2072 else
2074 basic_block last_bb;
2075 rtx_code_label *old_jump_target;
2076 rtx_jump_insn *new_jump;
2078 /* Create new basic block to be dest for
2079 conditional jump. */
2081 /* Put appropriate instructions in new bb. */
2083 new_label = gen_label_rtx ();
2084 emit_label (new_label);
2086 gcc_assert (GET_CODE (old_label) == LABEL_REF);
2087 old_jump_target = old_jump_insn->jump_target ();
2088 new_jump = as_a <rtx_jump_insn *>
2089 (emit_jump_insn (gen_jump (old_jump_target)));
2090 new_jump->set_jump_target (old_jump_target);
2092 last_bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
2093 new_bb = create_basic_block (new_label, new_jump, last_bb);
2094 new_bb->aux = last_bb->aux;
2095 last_bb->aux = new_bb;
2097 emit_barrier_after_bb (new_bb);
2099 /* Make sure new bb is in same partition as source
2100 of conditional branch. */
2101 BB_COPY_PARTITION (new_bb, cur_bb);
2104 /* Make old jump branch to new bb. */
2106 redirect_jump (old_jump_insn, new_label, 0);
2108 /* Remove crossing_edge as predecessor of 'dest'. */
2110 dest = crossing_edge->dest;
2112 redirect_edge_succ (crossing_edge, new_bb);
2114 /* Make a new edge from new_bb to old dest; new edge
2115 will be a successor for new_bb and a predecessor
2116 for 'dest'. */
2118 if (EDGE_COUNT (new_bb->succs) == 0)
2119 new_edge = make_edge (new_bb, dest, 0);
2120 else
2121 new_edge = EDGE_SUCC (new_bb, 0);
2123 crossing_edge->flags &= ~EDGE_CROSSING;
2124 new_edge->flags |= EDGE_CROSSING;
2130 /* Find any unconditional branches that cross between hot and cold
2131 sections. Convert them into indirect jumps instead. */
2133 static void
2134 fix_crossing_unconditional_branches (void)
2136 basic_block cur_bb;
2137 rtx_insn *last_insn;
2138 rtx label;
2139 rtx label_addr;
2140 rtx_insn *indirect_jump_sequence;
2141 rtx_insn *jump_insn = NULL;
2142 rtx new_reg;
2143 rtx_insn *cur_insn;
2144 edge succ;
2146 FOR_EACH_BB_FN (cur_bb, cfun)
2148 last_insn = BB_END (cur_bb);
2150 if (EDGE_COUNT (cur_bb->succs) < 1)
2151 continue;
2153 succ = EDGE_SUCC (cur_bb, 0);
2155 /* Check to see if bb ends in a crossing (unconditional) jump. At
2156 this point, no crossing jumps should be conditional. */
2158 if (JUMP_P (last_insn)
2159 && (succ->flags & EDGE_CROSSING))
2161 gcc_assert (!any_condjump_p (last_insn));
2163 /* Make sure the jump is not already an indirect or table jump. */
2165 if (!computed_jump_p (last_insn)
2166 && !tablejump_p (last_insn, NULL, NULL))
2168 /* We have found a "crossing" unconditional branch. Now
2169 we must convert it to an indirect jump. First create
2170 reference of label, as target for jump. */
2172 label = JUMP_LABEL (last_insn);
2173 label_addr = gen_rtx_LABEL_REF (Pmode, label);
2174 LABEL_NUSES (label) += 1;
2176 /* Get a register to use for the indirect jump. */
2178 new_reg = gen_reg_rtx (Pmode);
2180 /* Generate indirect the jump sequence. */
2182 start_sequence ();
2183 emit_move_insn (new_reg, label_addr);
2184 emit_indirect_jump (new_reg);
2185 indirect_jump_sequence = get_insns ();
2186 end_sequence ();
2188 /* Make sure every instruction in the new jump sequence has
2189 its basic block set to be cur_bb. */
2191 for (cur_insn = indirect_jump_sequence; cur_insn;
2192 cur_insn = NEXT_INSN (cur_insn))
2194 if (!BARRIER_P (cur_insn))
2195 BLOCK_FOR_INSN (cur_insn) = cur_bb;
2196 if (JUMP_P (cur_insn))
2197 jump_insn = cur_insn;
2200 /* Insert the new (indirect) jump sequence immediately before
2201 the unconditional jump, then delete the unconditional jump. */
2203 emit_insn_before (indirect_jump_sequence, last_insn);
2204 delete_insn (last_insn);
2206 JUMP_LABEL (jump_insn) = label;
2207 LABEL_NUSES (label)++;
2209 /* Make BB_END for cur_bb be the jump instruction (NOT the
2210 barrier instruction at the end of the sequence...). */
2212 BB_END (cur_bb) = jump_insn;
2218 /* Update CROSSING_JUMP_P flags on all jump insns. */
2220 static void
2221 update_crossing_jump_flags (void)
2223 basic_block bb;
2224 edge e;
2225 edge_iterator ei;
2227 FOR_EACH_BB_FN (bb, cfun)
2228 FOR_EACH_EDGE (e, ei, bb->succs)
2229 if (e->flags & EDGE_CROSSING)
2231 if (JUMP_P (BB_END (bb))
2232 /* Some flags were added during fix_up_fall_thru_edges, via
2233 force_nonfallthru_and_redirect. */
2234 && !CROSSING_JUMP_P (BB_END (bb)))
2235 CROSSING_JUMP_P (BB_END (bb)) = 1;
2236 break;
2240 /* Reorder basic blocks. The main entry point to this file. FLAGS is
2241 the set of flags to pass to cfg_layout_initialize(). */
2243 static void
2244 reorder_basic_blocks (void)
2246 int n_traces;
2247 int i;
2248 struct trace *traces;
2250 gcc_assert (current_ir_type () == IR_RTL_CFGLAYOUT);
2252 if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1)
2253 return;
2255 set_edge_can_fallthru_flag ();
2256 mark_dfs_back_edges ();
2258 /* We are estimating the length of uncond jump insn only once since the code
2259 for getting the insn length always returns the minimal length now. */
2260 if (uncond_jump_length == 0)
2261 uncond_jump_length = get_uncond_jump_length ();
2263 /* We need to know some information for each basic block. */
2264 array_size = GET_ARRAY_SIZE (last_basic_block_for_fn (cfun));
2265 bbd = XNEWVEC (bbro_basic_block_data, array_size);
2266 for (i = 0; i < array_size; i++)
2268 bbd[i].start_of_trace = -1;
2269 bbd[i].end_of_trace = -1;
2270 bbd[i].in_trace = -1;
2271 bbd[i].visited = 0;
2272 bbd[i].heap = NULL;
2273 bbd[i].node = NULL;
2276 traces = XNEWVEC (struct trace, n_basic_blocks_for_fn (cfun));
2277 n_traces = 0;
2278 find_traces (&n_traces, traces);
2279 connect_traces (n_traces, traces);
2280 FREE (traces);
2281 FREE (bbd);
2283 relink_block_chain (/*stay_in_cfglayout_mode=*/true);
2285 if (dump_file)
2287 if (dump_flags & TDF_DETAILS)
2288 dump_reg_info (dump_file);
2289 dump_flow_info (dump_file, dump_flags);
2292 /* Signal that rtl_verify_flow_info_1 can now verify that there
2293 is at most one switch between hot/cold sections. */
2294 crtl->bb_reorder_complete = true;
2297 /* Determine which partition the first basic block in the function
2298 belongs to, then find the first basic block in the current function
2299 that belongs to a different section, and insert a
2300 NOTE_INSN_SWITCH_TEXT_SECTIONS note immediately before it in the
2301 instruction stream. When writing out the assembly code,
2302 encountering this note will make the compiler switch between the
2303 hot and cold text sections. */
2305 void
2306 insert_section_boundary_note (void)
2308 basic_block bb;
2309 bool switched_sections = false;
2310 int current_partition = 0;
2312 if (!crtl->has_bb_partition)
2313 return;
2315 FOR_EACH_BB_FN (bb, cfun)
2317 if (!current_partition)
2318 current_partition = BB_PARTITION (bb);
2319 if (BB_PARTITION (bb) != current_partition)
2321 gcc_assert (!switched_sections);
2322 switched_sections = true;
2323 emit_note_before (NOTE_INSN_SWITCH_TEXT_SECTIONS, BB_HEAD (bb));
2324 current_partition = BB_PARTITION (bb);
2329 namespace {
2331 const pass_data pass_data_reorder_blocks =
2333 RTL_PASS, /* type */
2334 "bbro", /* name */
2335 OPTGROUP_NONE, /* optinfo_flags */
2336 TV_REORDER_BLOCKS, /* tv_id */
2337 0, /* properties_required */
2338 0, /* properties_provided */
2339 0, /* properties_destroyed */
2340 0, /* todo_flags_start */
2341 0, /* todo_flags_finish */
2344 class pass_reorder_blocks : public rtl_opt_pass
2346 public:
2347 pass_reorder_blocks (gcc::context *ctxt)
2348 : rtl_opt_pass (pass_data_reorder_blocks, ctxt)
2351 /* opt_pass methods: */
2352 virtual bool gate (function *)
2354 if (targetm.cannot_modify_jumps_p ())
2355 return false;
2356 return (optimize > 0
2357 && (flag_reorder_blocks || flag_reorder_blocks_and_partition));
2360 virtual unsigned int execute (function *);
2362 }; // class pass_reorder_blocks
2364 unsigned int
2365 pass_reorder_blocks::execute (function *fun)
2367 basic_block bb;
2369 /* Last attempt to optimize CFG, as scheduling, peepholing and insn
2370 splitting possibly introduced more crossjumping opportunities. */
2371 cfg_layout_initialize (CLEANUP_EXPENSIVE);
2373 reorder_basic_blocks ();
2374 cleanup_cfg (CLEANUP_EXPENSIVE);
2376 FOR_EACH_BB_FN (bb, fun)
2377 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (fun))
2378 bb->aux = bb->next_bb;
2379 cfg_layout_finalize ();
2381 return 0;
2384 } // anon namespace
2386 rtl_opt_pass *
2387 make_pass_reorder_blocks (gcc::context *ctxt)
2389 return new pass_reorder_blocks (ctxt);
2392 /* Duplicate the blocks containing computed gotos. This basically unfactors
2393 computed gotos that were factored early on in the compilation process to
2394 speed up edge based data flow. We used to not unfactoring them again,
2395 which can seriously pessimize code with many computed jumps in the source
2396 code, such as interpreters. See e.g. PR15242. */
2398 namespace {
2400 const pass_data pass_data_duplicate_computed_gotos =
2402 RTL_PASS, /* type */
2403 "compgotos", /* name */
2404 OPTGROUP_NONE, /* optinfo_flags */
2405 TV_REORDER_BLOCKS, /* tv_id */
2406 0, /* properties_required */
2407 0, /* properties_provided */
2408 0, /* properties_destroyed */
2409 0, /* todo_flags_start */
2410 0, /* todo_flags_finish */
2413 class pass_duplicate_computed_gotos : public rtl_opt_pass
2415 public:
2416 pass_duplicate_computed_gotos (gcc::context *ctxt)
2417 : rtl_opt_pass (pass_data_duplicate_computed_gotos, ctxt)
2420 /* opt_pass methods: */
2421 virtual bool gate (function *);
2422 virtual unsigned int execute (function *);
2424 }; // class pass_duplicate_computed_gotos
2426 bool
2427 pass_duplicate_computed_gotos::gate (function *fun)
2429 if (targetm.cannot_modify_jumps_p ())
2430 return false;
2431 return (optimize > 0
2432 && flag_expensive_optimizations
2433 && ! optimize_function_for_size_p (fun));
2436 unsigned int
2437 pass_duplicate_computed_gotos::execute (function *fun)
2439 basic_block bb, new_bb;
2440 bitmap candidates;
2441 int max_size;
2442 bool changed = false;
2444 if (n_basic_blocks_for_fn (fun) <= NUM_FIXED_BLOCKS + 1)
2445 return 0;
2447 clear_bb_flags ();
2448 cfg_layout_initialize (0);
2450 /* We are estimating the length of uncond jump insn only once
2451 since the code for getting the insn length always returns
2452 the minimal length now. */
2453 if (uncond_jump_length == 0)
2454 uncond_jump_length = get_uncond_jump_length ();
2456 max_size
2457 = uncond_jump_length * PARAM_VALUE (PARAM_MAX_GOTO_DUPLICATION_INSNS);
2458 candidates = BITMAP_ALLOC (NULL);
2460 /* Look for blocks that end in a computed jump, and see if such blocks
2461 are suitable for unfactoring. If a block is a candidate for unfactoring,
2462 mark it in the candidates. */
2463 FOR_EACH_BB_FN (bb, fun)
2465 rtx_insn *insn;
2466 edge e;
2467 edge_iterator ei;
2468 int size, all_flags;
2470 /* Build the reorder chain for the original order of blocks. */
2471 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (fun))
2472 bb->aux = bb->next_bb;
2474 /* Obviously the block has to end in a computed jump. */
2475 if (!computed_jump_p (BB_END (bb)))
2476 continue;
2478 /* Only consider blocks that can be duplicated. */
2479 if (CROSSING_JUMP_P (BB_END (bb))
2480 || !can_duplicate_block_p (bb))
2481 continue;
2483 /* Make sure that the block is small enough. */
2484 size = 0;
2485 FOR_BB_INSNS (bb, insn)
2486 if (INSN_P (insn))
2488 size += get_attr_min_length (insn);
2489 if (size > max_size)
2490 break;
2492 if (size > max_size)
2493 continue;
2495 /* Final check: there must not be any incoming abnormal edges. */
2496 all_flags = 0;
2497 FOR_EACH_EDGE (e, ei, bb->preds)
2498 all_flags |= e->flags;
2499 if (all_flags & EDGE_COMPLEX)
2500 continue;
2502 bitmap_set_bit (candidates, bb->index);
2505 /* Nothing to do if there is no computed jump here. */
2506 if (bitmap_empty_p (candidates))
2507 goto done;
2509 /* Duplicate computed gotos. */
2510 FOR_EACH_BB_FN (bb, fun)
2512 if (bb->flags & BB_VISITED)
2513 continue;
2515 bb->flags |= BB_VISITED;
2517 /* BB must have one outgoing edge. That edge must not lead to
2518 the exit block or the next block.
2519 The destination must have more than one predecessor. */
2520 if (!single_succ_p (bb)
2521 || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (fun)
2522 || single_succ (bb) == bb->next_bb
2523 || single_pred_p (single_succ (bb)))
2524 continue;
2526 /* The successor block has to be a duplication candidate. */
2527 if (!bitmap_bit_p (candidates, single_succ (bb)->index))
2528 continue;
2530 /* Don't duplicate a partition crossing edge, which requires difficult
2531 fixup. */
2532 if (JUMP_P (BB_END (bb)) && CROSSING_JUMP_P (BB_END (bb)))
2533 continue;
2535 new_bb = duplicate_block (single_succ (bb), single_succ_edge (bb), bb);
2536 new_bb->aux = bb->aux;
2537 bb->aux = new_bb;
2538 new_bb->flags |= BB_VISITED;
2539 changed = true;
2542 done:
2543 if (changed)
2545 /* Duplicating blocks above will redirect edges and may cause hot
2546 blocks previously reached by both hot and cold blocks to become
2547 dominated only by cold blocks. */
2548 fixup_partitions ();
2550 /* Merge the duplicated blocks into predecessors, when possible. */
2551 cfg_layout_finalize ();
2552 cleanup_cfg (0);
2554 else
2555 cfg_layout_finalize ();
2557 BITMAP_FREE (candidates);
2558 return 0;
2561 } // anon namespace
2563 rtl_opt_pass *
2564 make_pass_duplicate_computed_gotos (gcc::context *ctxt)
2566 return new pass_duplicate_computed_gotos (ctxt);
2569 /* This function is the main 'entrance' for the optimization that
2570 partitions hot and cold basic blocks into separate sections of the
2571 .o file (to improve performance and cache locality). Ideally it
2572 would be called after all optimizations that rearrange the CFG have
2573 been called. However part of this optimization may introduce new
2574 register usage, so it must be called before register allocation has
2575 occurred. This means that this optimization is actually called
2576 well before the optimization that reorders basic blocks (see
2577 function above).
2579 This optimization checks the feedback information to determine
2580 which basic blocks are hot/cold, updates flags on the basic blocks
2581 to indicate which section they belong in. This information is
2582 later used for writing out sections in the .o file. Because hot
2583 and cold sections can be arbitrarily large (within the bounds of
2584 memory), far beyond the size of a single function, it is necessary
2585 to fix up all edges that cross section boundaries, to make sure the
2586 instructions used can actually span the required distance. The
2587 fixes are described below.
2589 Fall-through edges must be changed into jumps; it is not safe or
2590 legal to fall through across a section boundary. Whenever a
2591 fall-through edge crossing a section boundary is encountered, a new
2592 basic block is inserted (in the same section as the fall-through
2593 source), and the fall through edge is redirected to the new basic
2594 block. The new basic block contains an unconditional jump to the
2595 original fall-through target. (If the unconditional jump is
2596 insufficient to cross section boundaries, that is dealt with a
2597 little later, see below).
2599 In order to deal with architectures that have short conditional
2600 branches (which cannot span all of memory) we take any conditional
2601 jump that attempts to cross a section boundary and add a level of
2602 indirection: it becomes a conditional jump to a new basic block, in
2603 the same section. The new basic block contains an unconditional
2604 jump to the original target, in the other section.
2606 For those architectures whose unconditional branch is also
2607 incapable of reaching all of memory, those unconditional jumps are
2608 converted into indirect jumps, through a register.
2610 IMPORTANT NOTE: This optimization causes some messy interactions
2611 with the cfg cleanup optimizations; those optimizations want to
2612 merge blocks wherever possible, and to collapse indirect jump
2613 sequences (change "A jumps to B jumps to C" directly into "A jumps
2614 to C"). Those optimizations can undo the jump fixes that
2615 partitioning is required to make (see above), in order to ensure
2616 that jumps attempting to cross section boundaries are really able
2617 to cover whatever distance the jump requires (on many architectures
2618 conditional or unconditional jumps are not able to reach all of
2619 memory). Therefore tests have to be inserted into each such
2620 optimization to make sure that it does not undo stuff necessary to
2621 cross partition boundaries. This would be much less of a problem
2622 if we could perform this optimization later in the compilation, but
2623 unfortunately the fact that we may need to create indirect jumps
2624 (through registers) requires that this optimization be performed
2625 before register allocation.
2627 Hot and cold basic blocks are partitioned and put in separate
2628 sections of the .o file, to reduce paging and improve cache
2629 performance (hopefully). This can result in bits of code from the
2630 same function being widely separated in the .o file. However this
2631 is not obvious to the current bb structure. Therefore we must take
2632 care to ensure that: 1). There are no fall_thru edges that cross
2633 between sections; 2). For those architectures which have "short"
2634 conditional branches, all conditional branches that attempt to
2635 cross between sections are converted to unconditional branches;
2636 and, 3). For those architectures which have "short" unconditional
2637 branches, all unconditional branches that attempt to cross between
2638 sections are converted to indirect jumps.
2640 The code for fixing up fall_thru edges that cross between hot and
2641 cold basic blocks does so by creating new basic blocks containing
2642 unconditional branches to the appropriate label in the "other"
2643 section. The new basic block is then put in the same (hot or cold)
2644 section as the original conditional branch, and the fall_thru edge
2645 is modified to fall into the new basic block instead. By adding
2646 this level of indirection we end up with only unconditional branches
2647 crossing between hot and cold sections.
2649 Conditional branches are dealt with by adding a level of indirection.
2650 A new basic block is added in the same (hot/cold) section as the
2651 conditional branch, and the conditional branch is retargeted to the
2652 new basic block. The new basic block contains an unconditional branch
2653 to the original target of the conditional branch (in the other section).
2655 Unconditional branches are dealt with by converting them into
2656 indirect jumps. */
2658 namespace {
2660 const pass_data pass_data_partition_blocks =
2662 RTL_PASS, /* type */
2663 "bbpart", /* name */
2664 OPTGROUP_NONE, /* optinfo_flags */
2665 TV_REORDER_BLOCKS, /* tv_id */
2666 PROP_cfglayout, /* properties_required */
2667 0, /* properties_provided */
2668 0, /* properties_destroyed */
2669 0, /* todo_flags_start */
2670 0, /* todo_flags_finish */
2673 class pass_partition_blocks : public rtl_opt_pass
2675 public:
2676 pass_partition_blocks (gcc::context *ctxt)
2677 : rtl_opt_pass (pass_data_partition_blocks, ctxt)
2680 /* opt_pass methods: */
2681 virtual bool gate (function *);
2682 virtual unsigned int execute (function *);
2684 }; // class pass_partition_blocks
2686 bool
2687 pass_partition_blocks::gate (function *fun)
2689 /* The optimization to partition hot/cold basic blocks into separate
2690 sections of the .o file does not work well with linkonce or with
2691 user defined section attributes. Don't call it if either case
2692 arises. */
2693 return (flag_reorder_blocks_and_partition
2694 && optimize
2695 /* See gate_handle_reorder_blocks. We should not partition if
2696 we are going to omit the reordering. */
2697 && optimize_function_for_speed_p (fun)
2698 && !DECL_COMDAT_GROUP (current_function_decl)
2699 && !user_defined_section_attribute);
2702 unsigned
2703 pass_partition_blocks::execute (function *fun)
2705 vec<edge> crossing_edges;
2707 if (n_basic_blocks_for_fn (fun) <= NUM_FIXED_BLOCKS + 1)
2708 return 0;
2710 df_set_flags (DF_DEFER_INSN_RESCAN);
2712 crossing_edges = find_rarely_executed_basic_blocks_and_crossing_edges ();
2713 if (!crossing_edges.exists ())
2714 return 0;
2716 crtl->has_bb_partition = true;
2718 /* Make sure the source of any crossing edge ends in a jump and the
2719 destination of any crossing edge has a label. */
2720 add_labels_and_missing_jumps (crossing_edges);
2722 /* Convert all crossing fall_thru edges to non-crossing fall
2723 thrus to unconditional jumps (that jump to the original fall
2724 through dest). */
2725 fix_up_fall_thru_edges ();
2727 /* If the architecture does not have conditional branches that can
2728 span all of memory, convert crossing conditional branches into
2729 crossing unconditional branches. */
2730 if (!HAS_LONG_COND_BRANCH)
2731 fix_crossing_conditional_branches ();
2733 /* If the architecture does not have unconditional branches that
2734 can span all of memory, convert crossing unconditional branches
2735 into indirect jumps. Since adding an indirect jump also adds
2736 a new register usage, update the register usage information as
2737 well. */
2738 if (!HAS_LONG_UNCOND_BRANCH)
2739 fix_crossing_unconditional_branches ();
2741 update_crossing_jump_flags ();
2743 /* Clear bb->aux fields that the above routines were using. */
2744 clear_aux_for_blocks ();
2746 crossing_edges.release ();
2748 /* ??? FIXME: DF generates the bb info for a block immediately.
2749 And by immediately, I mean *during* creation of the block.
2751 #0 df_bb_refs_collect
2752 #1 in df_bb_refs_record
2753 #2 in create_basic_block_structure
2755 Which means that the bb_has_eh_pred test in df_bb_refs_collect
2756 will *always* fail, because no edges can have been added to the
2757 block yet. Which of course means we don't add the right
2758 artificial refs, which means we fail df_verify (much) later.
2760 Cleanest solution would seem to make DF_DEFER_INSN_RESCAN imply
2761 that we also shouldn't grab data from the new blocks those new
2762 insns are in either. In this way one can create the block, link
2763 it up properly, and have everything Just Work later, when deferred
2764 insns are processed.
2766 In the meantime, we have no other option but to throw away all
2767 of the DF data and recompute it all. */
2768 if (fun->eh->lp_array)
2770 df_finish_pass (true);
2771 df_scan_alloc (NULL);
2772 df_scan_blocks ();
2773 /* Not all post-landing pads use all of the EH_RETURN_DATA_REGNO
2774 data. We blindly generated all of them when creating the new
2775 landing pad. Delete those assignments we don't use. */
2776 df_set_flags (DF_LR_RUN_DCE);
2777 df_analyze ();
2780 return 0;
2783 } // anon namespace
2785 rtl_opt_pass *
2786 make_pass_partition_blocks (gcc::context *ctxt)
2788 return new pass_partition_blocks (ctxt);