PR optimization/15242
[official-gcc.git] / gcc / bb-reorder.c
blobf454ce0a3c37f50ef0e67ab33adbaf19bc138432
1 /* Basic block reordering routines for the GNU compiler.
2 Copyright (C) 2000, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
19 02111-1307, USA. */
21 /* This (greedy) algorithm constructs traces in several rounds.
22 The construction starts from "seeds". The seed for the first round
23 is the entry point of function. When there are more than one seed
24 that one is selected first that has the lowest key in the heap
25 (see function bb_to_key). Then the algorithm repeatedly adds the most
26 probable successor to the end of a trace. Finally it connects the traces.
28 There are two parameters: Branch Threshold and Exec Threshold.
29 If the edge to a successor of the actual basic block is lower than
30 Branch Threshold or the frequency of the successor is lower than
31 Exec Threshold the successor will be the seed in one of the next rounds.
32 Each round has these parameters lower than the previous one.
33 The last round has to have these parameters set to zero
34 so that the remaining blocks are picked up.
36 The algorithm selects the most probable successor from all unvisited
37 successors and successors that have been added to this trace.
38 The other successors (that has not been "sent" to the next round) will be
39 other seeds for this round and the secondary traces will start in them.
40 If the successor has not been visited in this trace it is added to the trace
41 (however, there is some heuristic for simple branches).
42 If the successor has been visited in this trace the loop has been found.
43 If the loop has many iterations the loop is rotated so that the
44 source block of the most probable edge going out from the loop
45 is the last block of the trace.
46 If the loop has few iterations and there is no edge from the last block of
47 the loop going out from loop the loop header is duplicated.
48 Finally, the construction of the trace is terminated.
50 When connecting traces it first checks whether there is an edge from the
51 last block of one trace to the first block of another trace.
52 When there are still some unconnected traces it checks whether there exists
53 a basic block BB such that BB is a successor of the last bb of one trace
54 and BB is a predecessor of the first block of another trace. In this case,
55 BB is duplicated and the traces are connected through this duplicate.
56 The rest of traces are simply connected so there will be a jump to the
57 beginning of the rest of trace.
60 References:
62 "Software Trace Cache"
63 A. Ramirez, J. Larriba-Pey, C. Navarro, J. Torrellas and M. Valero; 1999
64 http://citeseer.nj.nec.com/15361.html
68 #include "config.h"
69 #include "system.h"
70 #include "coretypes.h"
71 #include "tm.h"
72 #include "rtl.h"
73 #include "regs.h"
74 #include "flags.h"
75 #include "timevar.h"
76 #include "output.h"
77 #include "cfglayout.h"
78 #include "fibheap.h"
79 #include "target.h"
80 #include "function.h"
81 #include "tm_p.h"
82 #include "obstack.h"
83 #include "expr.h"
84 #include "params.h"
86 /* The number of rounds. In most cases there will only be 4 rounds, but
87 when partitioning hot and cold basic blocks into separate sections of
88 the .o file there will be an extra round.*/
89 #define N_ROUNDS 5
91 /* Stubs in case we don't have a return insn.
92 We have to check at runtime too, not only compiletime. */
94 #ifndef HAVE_return
95 #define HAVE_return 0
96 #define gen_return() NULL_RTX
97 #endif
100 /* Branch thresholds in thousandths (per mille) of the REG_BR_PROB_BASE. */
101 static int branch_threshold[N_ROUNDS] = {400, 200, 100, 0, 0};
103 /* Exec thresholds in thousandths (per mille) of the frequency of bb 0. */
104 static int exec_threshold[N_ROUNDS] = {500, 200, 50, 0, 0};
106 /* If edge frequency is lower than DUPLICATION_THRESHOLD per mille of entry
107 block the edge destination is not duplicated while connecting traces. */
108 #define DUPLICATION_THRESHOLD 100
110 /* Length of unconditional jump instruction. */
111 static int uncond_jump_length;
113 /* Structure to hold needed information for each basic block. */
114 typedef struct bbro_basic_block_data_def
116 /* Which trace is the bb start of (-1 means it is not a start of a trace). */
117 int start_of_trace;
119 /* Which trace is the bb end of (-1 means it is not an end of a trace). */
120 int end_of_trace;
122 /* Which heap is BB in (if any)? */
123 fibheap_t heap;
125 /* Which heap node is BB in (if any)? */
126 fibnode_t node;
127 } bbro_basic_block_data;
129 /* The current size of the following dynamic array. */
130 static int array_size;
132 /* The array which holds needed information for basic blocks. */
133 static bbro_basic_block_data *bbd;
135 /* To avoid frequent reallocation the size of arrays is greater than needed,
136 the number of elements is (not less than) 1.25 * size_wanted. */
137 #define GET_ARRAY_SIZE(X) ((((X) / 4) + 1) * 5)
139 /* Free the memory and set the pointer to NULL. */
140 #define FREE(P) (gcc_assert (P), free (P), P = 0)
142 /* Structure for holding information about a trace. */
143 struct trace
145 /* First and last basic block of the trace. */
146 basic_block first, last;
148 /* The round of the STC creation which this trace was found in. */
149 int round;
151 /* The length (i.e. the number of basic blocks) of the trace. */
152 int length;
155 /* Maximum frequency and count of one of the entry blocks. */
156 int max_entry_frequency;
157 gcov_type max_entry_count;
159 /* Local function prototypes. */
160 static void find_traces (int *, struct trace *);
161 static basic_block rotate_loop (edge, struct trace *, int);
162 static void mark_bb_visited (basic_block, int);
163 static void find_traces_1_round (int, int, gcov_type, struct trace *, int *,
164 int, fibheap_t *, int);
165 static basic_block copy_bb (basic_block, edge, basic_block, int);
166 static fibheapkey_t bb_to_key (basic_block);
167 static bool better_edge_p (basic_block, edge, int, int, int, int, edge);
168 static void connect_traces (int, struct trace *);
169 static bool copy_bb_p (basic_block, int);
170 static int get_uncond_jump_length (void);
171 static bool push_to_next_round_p (basic_block, int, int, int, gcov_type);
172 static void add_unlikely_executed_notes (void);
173 static void find_rarely_executed_basic_blocks_and_crossing_edges (edge *,
174 int *,
175 int *);
176 static void mark_bb_for_unlikely_executed_section (basic_block);
177 static void add_labels_and_missing_jumps (edge *, int);
178 static void add_reg_crossing_jump_notes (void);
179 static void fix_up_fall_thru_edges (void);
180 static void fix_edges_for_rarely_executed_code (edge *, int);
181 static void fix_crossing_conditional_branches (void);
182 static void fix_crossing_unconditional_branches (void);
184 /* Check to see if bb should be pushed into the next round of trace
185 collections or not. Reasons for pushing the block forward are 1).
186 If the block is cold, we are doing partitioning, and there will be
187 another round (cold partition blocks are not supposed to be
188 collected into traces until the very last round); or 2). There will
189 be another round, and the basic block is not "hot enough" for the
190 current round of trace collection. */
192 static bool
193 push_to_next_round_p (basic_block bb, int round, int number_of_rounds,
194 int exec_th, gcov_type count_th)
196 bool there_exists_another_round;
197 bool cold_block;
198 bool block_not_hot_enough;
199 bool next_round_is_last;
201 there_exists_another_round = round < number_of_rounds - 1;
202 next_round_is_last = round + 1 == number_of_rounds - 1;
204 cold_block = (flag_reorder_blocks_and_partition
205 && BB_PARTITION (bb) == BB_COLD_PARTITION);
207 block_not_hot_enough = (bb->frequency < exec_th
208 || bb->count < count_th
209 || probably_never_executed_bb_p (bb));
211 if (flag_reorder_blocks_and_partition
212 && next_round_is_last
213 && BB_PARTITION (bb) != BB_COLD_PARTITION)
214 return false;
215 else if (there_exists_another_round
216 && (cold_block || block_not_hot_enough))
217 return true;
218 else
219 return false;
222 /* Find the traces for Software Trace Cache. Chain each trace through
223 RBI()->next. Store the number of traces to N_TRACES and description of
224 traces to TRACES. */
226 static void
227 find_traces (int *n_traces, struct trace *traces)
229 int i;
230 int number_of_rounds;
231 edge e;
232 edge_iterator ei;
233 fibheap_t heap;
235 /* Add one extra round of trace collection when partitioning hot/cold
236 basic blocks into separate sections. The last round is for all the
237 cold blocks (and ONLY the cold blocks). */
239 number_of_rounds = N_ROUNDS - 1;
240 if (flag_reorder_blocks_and_partition)
241 number_of_rounds = N_ROUNDS;
243 /* Insert entry points of function into heap. */
244 heap = fibheap_new ();
245 max_entry_frequency = 0;
246 max_entry_count = 0;
247 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
249 bbd[e->dest->index].heap = heap;
250 bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest),
251 e->dest);
252 if (e->dest->frequency > max_entry_frequency)
253 max_entry_frequency = e->dest->frequency;
254 if (e->dest->count > max_entry_count)
255 max_entry_count = e->dest->count;
258 /* Find the traces. */
259 for (i = 0; i < number_of_rounds; i++)
261 gcov_type count_threshold;
263 if (dump_file)
264 fprintf (dump_file, "STC - round %d\n", i + 1);
266 if (max_entry_count < INT_MAX / 1000)
267 count_threshold = max_entry_count * exec_threshold[i] / 1000;
268 else
269 count_threshold = max_entry_count / 1000 * exec_threshold[i];
271 find_traces_1_round (REG_BR_PROB_BASE * branch_threshold[i] / 1000,
272 max_entry_frequency * exec_threshold[i] / 1000,
273 count_threshold, traces, n_traces, i, &heap,
274 number_of_rounds);
276 fibheap_delete (heap);
278 if (dump_file)
280 for (i = 0; i < *n_traces; i++)
282 basic_block bb;
283 fprintf (dump_file, "Trace %d (round %d): ", i + 1,
284 traces[i].round + 1);
285 for (bb = traces[i].first; bb != traces[i].last; bb = bb->rbi->next)
286 fprintf (dump_file, "%d [%d] ", bb->index, bb->frequency);
287 fprintf (dump_file, "%d [%d]\n", bb->index, bb->frequency);
289 fflush (dump_file);
293 /* Rotate loop whose back edge is BACK_EDGE in the tail of trace TRACE
294 (with sequential number TRACE_N). */
296 static basic_block
297 rotate_loop (edge back_edge, struct trace *trace, int trace_n)
299 basic_block bb;
301 /* Information about the best end (end after rotation) of the loop. */
302 basic_block best_bb = NULL;
303 edge best_edge = NULL;
304 int best_freq = -1;
305 gcov_type best_count = -1;
306 /* The best edge is preferred when its destination is not visited yet
307 or is a start block of some trace. */
308 bool is_preferred = false;
310 /* Find the most frequent edge that goes out from current trace. */
311 bb = back_edge->dest;
314 edge e;
315 edge_iterator ei;
317 FOR_EACH_EDGE (e, ei, bb->succs)
318 if (e->dest != EXIT_BLOCK_PTR
319 && e->dest->rbi->visited != trace_n
320 && (e->flags & EDGE_CAN_FALLTHRU)
321 && !(e->flags & EDGE_COMPLEX))
323 if (is_preferred)
325 /* The best edge is preferred. */
326 if (!e->dest->rbi->visited
327 || bbd[e->dest->index].start_of_trace >= 0)
329 /* The current edge E is also preferred. */
330 int freq = EDGE_FREQUENCY (e);
331 if (freq > best_freq || e->count > best_count)
333 best_freq = freq;
334 best_count = e->count;
335 best_edge = e;
336 best_bb = bb;
340 else
342 if (!e->dest->rbi->visited
343 || bbd[e->dest->index].start_of_trace >= 0)
345 /* The current edge E is preferred. */
346 is_preferred = true;
347 best_freq = EDGE_FREQUENCY (e);
348 best_count = e->count;
349 best_edge = e;
350 best_bb = bb;
352 else
354 int freq = EDGE_FREQUENCY (e);
355 if (!best_edge || freq > best_freq || e->count > best_count)
357 best_freq = freq;
358 best_count = e->count;
359 best_edge = e;
360 best_bb = bb;
365 bb = bb->rbi->next;
367 while (bb != back_edge->dest);
369 if (best_bb)
371 /* Rotate the loop so that the BEST_EDGE goes out from the last block of
372 the trace. */
373 if (back_edge->dest == trace->first)
375 trace->first = best_bb->rbi->next;
377 else
379 basic_block prev_bb;
381 for (prev_bb = trace->first;
382 prev_bb->rbi->next != back_edge->dest;
383 prev_bb = prev_bb->rbi->next)
385 prev_bb->rbi->next = best_bb->rbi->next;
387 /* Try to get rid of uncond jump to cond jump. */
388 if (EDGE_COUNT (prev_bb->succs) == 1)
390 basic_block header = EDGE_SUCC (prev_bb, 0)->dest;
392 /* Duplicate HEADER if it is a small block containing cond jump
393 in the end. */
394 if (any_condjump_p (BB_END (header)) && copy_bb_p (header, 0)
395 && !find_reg_note (BB_END (header), REG_CROSSING_JUMP,
396 NULL_RTX))
398 copy_bb (header, EDGE_SUCC (prev_bb, 0), prev_bb, trace_n);
403 else
405 /* We have not found suitable loop tail so do no rotation. */
406 best_bb = back_edge->src;
408 best_bb->rbi->next = NULL;
409 return best_bb;
412 /* This function marks BB that it was visited in trace number TRACE. */
414 static void
415 mark_bb_visited (basic_block bb, int trace)
417 bb->rbi->visited = trace;
418 if (bbd[bb->index].heap)
420 fibheap_delete_node (bbd[bb->index].heap, bbd[bb->index].node);
421 bbd[bb->index].heap = NULL;
422 bbd[bb->index].node = NULL;
426 /* One round of finding traces. Find traces for BRANCH_TH and EXEC_TH i.e. do
427 not include basic blocks their probability is lower than BRANCH_TH or their
428 frequency is lower than EXEC_TH into traces (or count is lower than
429 COUNT_TH). It stores the new traces into TRACES and modifies the number of
430 traces *N_TRACES. Sets the round (which the trace belongs to) to ROUND. It
431 expects that starting basic blocks are in *HEAP and at the end it deletes
432 *HEAP and stores starting points for the next round into new *HEAP. */
434 static void
435 find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
436 struct trace *traces, int *n_traces, int round,
437 fibheap_t *heap, int number_of_rounds)
439 /* The following variable refers to the last round in which non-"cold"
440 blocks may be collected into a trace. */
442 int last_round = N_ROUNDS - 1;
444 /* Heap for discarded basic blocks which are possible starting points for
445 the next round. */
446 fibheap_t new_heap = fibheap_new ();
448 while (!fibheap_empty (*heap))
450 basic_block bb;
451 struct trace *trace;
452 edge best_edge, e;
453 fibheapkey_t key;
454 edge_iterator ei;
456 bb = fibheap_extract_min (*heap);
457 bbd[bb->index].heap = NULL;
458 bbd[bb->index].node = NULL;
460 if (dump_file)
461 fprintf (dump_file, "Getting bb %d\n", bb->index);
463 /* If the BB's frequency is too low send BB to the next round. When
464 partitioning hot/cold blocks into separate sections, make sure all
465 the cold blocks (and ONLY the cold blocks) go into the (extra) final
466 round. */
468 if (push_to_next_round_p (bb, round, number_of_rounds, exec_th,
469 count_th))
471 int key = bb_to_key (bb);
472 bbd[bb->index].heap = new_heap;
473 bbd[bb->index].node = fibheap_insert (new_heap, key, bb);
475 if (dump_file)
476 fprintf (dump_file,
477 " Possible start point of next round: %d (key: %d)\n",
478 bb->index, key);
479 continue;
482 trace = traces + *n_traces;
483 trace->first = bb;
484 trace->round = round;
485 trace->length = 0;
486 (*n_traces)++;
490 int prob, freq;
492 /* The probability and frequency of the best edge. */
493 int best_prob = INT_MIN / 2;
494 int best_freq = INT_MIN / 2;
496 best_edge = NULL;
497 mark_bb_visited (bb, *n_traces);
498 trace->length++;
500 if (dump_file)
501 fprintf (dump_file, "Basic block %d was visited in trace %d\n",
502 bb->index, *n_traces - 1);
504 /* Select the successor that will be placed after BB. */
505 FOR_EACH_EDGE (e, ei, bb->succs)
507 gcc_assert (!(e->flags & EDGE_FAKE));
509 if (e->dest == EXIT_BLOCK_PTR)
510 continue;
512 if (e->dest->rbi->visited
513 && e->dest->rbi->visited != *n_traces)
514 continue;
516 if (BB_PARTITION (e->dest) == BB_COLD_PARTITION
517 && round < last_round)
518 continue;
520 prob = e->probability;
521 freq = EDGE_FREQUENCY (e);
523 /* Edge that cannot be fallthru or improbable or infrequent
524 successor (i.e. it is unsuitable successor). */
525 if (!(e->flags & EDGE_CAN_FALLTHRU) || (e->flags & EDGE_COMPLEX)
526 || prob < branch_th || freq < exec_th || e->count < count_th)
527 continue;
529 /* If partitioning hot/cold basic blocks, don't consider edges
530 that cross section boundaries. */
532 if (better_edge_p (bb, e, prob, freq, best_prob, best_freq,
533 best_edge))
535 best_edge = e;
536 best_prob = prob;
537 best_freq = freq;
541 /* If the best destination has multiple predecessors, and can be
542 duplicated cheaper than a jump, don't allow it to be added
543 to a trace. We'll duplicate it when connecting traces. */
544 if (best_edge && EDGE_COUNT (best_edge->dest->preds) >= 2
545 && copy_bb_p (best_edge->dest, 0))
546 best_edge = NULL;
548 /* Add all non-selected successors to the heaps. */
549 FOR_EACH_EDGE (e, ei, bb->succs)
551 if (e == best_edge
552 || e->dest == EXIT_BLOCK_PTR
553 || e->dest->rbi->visited)
554 continue;
556 key = bb_to_key (e->dest);
558 if (bbd[e->dest->index].heap)
560 /* E->DEST is already in some heap. */
561 if (key != bbd[e->dest->index].node->key)
563 if (dump_file)
565 fprintf (dump_file,
566 "Changing key for bb %d from %ld to %ld.\n",
567 e->dest->index,
568 (long) bbd[e->dest->index].node->key,
569 key);
571 fibheap_replace_key (bbd[e->dest->index].heap,
572 bbd[e->dest->index].node, key);
575 else
577 fibheap_t which_heap = *heap;
579 prob = e->probability;
580 freq = EDGE_FREQUENCY (e);
582 if (!(e->flags & EDGE_CAN_FALLTHRU)
583 || (e->flags & EDGE_COMPLEX)
584 || prob < branch_th || freq < exec_th
585 || e->count < count_th)
587 /* When partitioning hot/cold basic blocks, make sure
588 the cold blocks (and only the cold blocks) all get
589 pushed to the last round of trace collection. */
591 if (push_to_next_round_p (e->dest, round,
592 number_of_rounds,
593 exec_th, count_th))
594 which_heap = new_heap;
597 bbd[e->dest->index].heap = which_heap;
598 bbd[e->dest->index].node = fibheap_insert (which_heap,
599 key, e->dest);
601 if (dump_file)
603 fprintf (dump_file,
604 " Possible start of %s round: %d (key: %ld)\n",
605 (which_heap == new_heap) ? "next" : "this",
606 e->dest->index, (long) key);
612 if (best_edge) /* Suitable successor was found. */
614 if (best_edge->dest->rbi->visited == *n_traces)
616 /* We do nothing with one basic block loops. */
617 if (best_edge->dest != bb)
619 if (EDGE_FREQUENCY (best_edge)
620 > 4 * best_edge->dest->frequency / 5)
622 /* The loop has at least 4 iterations. If the loop
623 header is not the first block of the function
624 we can rotate the loop. */
626 if (best_edge->dest != ENTRY_BLOCK_PTR->next_bb)
628 if (dump_file)
630 fprintf (dump_file,
631 "Rotating loop %d - %d\n",
632 best_edge->dest->index, bb->index);
634 bb->rbi->next = best_edge->dest;
635 bb = rotate_loop (best_edge, trace, *n_traces);
638 else
640 /* The loop has less than 4 iterations. */
642 if (EDGE_COUNT (bb->succs) == 1
643 && copy_bb_p (best_edge->dest, !optimize_size))
645 bb = copy_bb (best_edge->dest, best_edge, bb,
646 *n_traces);
651 /* Terminate the trace. */
652 break;
654 else
656 /* Check for a situation
664 where
665 EDGE_FREQUENCY (AB) + EDGE_FREQUENCY (BC)
666 >= EDGE_FREQUENCY (AC).
667 (i.e. 2 * B->frequency >= EDGE_FREQUENCY (AC) )
668 Best ordering is then A B C.
670 This situation is created for example by:
672 if (A) B;
677 FOR_EACH_EDGE (e, ei, bb->succs)
678 if (e != best_edge
679 && (e->flags & EDGE_CAN_FALLTHRU)
680 && !(e->flags & EDGE_COMPLEX)
681 && !e->dest->rbi->visited
682 && EDGE_COUNT (e->dest->preds) == 1
683 && !(e->flags & EDGE_CROSSING)
684 && EDGE_COUNT (e->dest->succs) == 1
685 && (EDGE_SUCC (e->dest, 0)->flags & EDGE_CAN_FALLTHRU)
686 && !(EDGE_SUCC (e->dest, 0)->flags & EDGE_COMPLEX)
687 && EDGE_SUCC (e->dest, 0)->dest == best_edge->dest
688 && 2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge))
690 best_edge = e;
691 if (dump_file)
692 fprintf (dump_file, "Selecting BB %d\n",
693 best_edge->dest->index);
694 break;
697 bb->rbi->next = best_edge->dest;
698 bb = best_edge->dest;
702 while (best_edge);
703 trace->last = bb;
704 bbd[trace->first->index].start_of_trace = *n_traces - 1;
705 bbd[trace->last->index].end_of_trace = *n_traces - 1;
707 /* The trace is terminated so we have to recount the keys in heap
708 (some block can have a lower key because now one of its predecessors
709 is an end of the trace). */
710 FOR_EACH_EDGE (e, ei, bb->succs)
712 if (e->dest == EXIT_BLOCK_PTR
713 || e->dest->rbi->visited)
714 continue;
716 if (bbd[e->dest->index].heap)
718 key = bb_to_key (e->dest);
719 if (key != bbd[e->dest->index].node->key)
721 if (dump_file)
723 fprintf (dump_file,
724 "Changing key for bb %d from %ld to %ld.\n",
725 e->dest->index,
726 (long) bbd[e->dest->index].node->key, key);
728 fibheap_replace_key (bbd[e->dest->index].heap,
729 bbd[e->dest->index].node,
730 key);
736 fibheap_delete (*heap);
738 /* "Return" the new heap. */
739 *heap = new_heap;
742 /* Create a duplicate of the basic block OLD_BB and redirect edge E to it, add
743 it to trace after BB, mark OLD_BB visited and update pass' data structures
744 (TRACE is a number of trace which OLD_BB is duplicated to). */
746 static basic_block
747 copy_bb (basic_block old_bb, edge e, basic_block bb, int trace)
749 basic_block new_bb;
751 new_bb = duplicate_block (old_bb, e);
752 BB_COPY_PARTITION (new_bb, old_bb);
754 gcc_assert (e->dest == new_bb);
755 gcc_assert (!e->dest->rbi->visited);
757 if (dump_file)
758 fprintf (dump_file,
759 "Duplicated bb %d (created bb %d)\n",
760 old_bb->index, new_bb->index);
761 new_bb->rbi->visited = trace;
762 new_bb->rbi->next = bb->rbi->next;
763 bb->rbi->next = new_bb;
765 if (new_bb->index >= array_size || last_basic_block > array_size)
767 int i;
768 int new_size;
770 new_size = MAX (last_basic_block, new_bb->index + 1);
771 new_size = GET_ARRAY_SIZE (new_size);
772 bbd = xrealloc (bbd, new_size * sizeof (bbro_basic_block_data));
773 for (i = array_size; i < new_size; i++)
775 bbd[i].start_of_trace = -1;
776 bbd[i].end_of_trace = -1;
777 bbd[i].heap = NULL;
778 bbd[i].node = NULL;
780 array_size = new_size;
782 if (dump_file)
784 fprintf (dump_file,
785 "Growing the dynamic array to %d elements.\n",
786 array_size);
790 return new_bb;
793 /* Compute and return the key (for the heap) of the basic block BB. */
795 static fibheapkey_t
796 bb_to_key (basic_block bb)
798 edge e;
799 edge_iterator ei;
800 int priority = 0;
802 /* Do not start in probably never executed blocks. */
804 if (BB_PARTITION (bb) == BB_COLD_PARTITION
805 || probably_never_executed_bb_p (bb))
806 return BB_FREQ_MAX;
808 /* Prefer blocks whose predecessor is an end of some trace
809 or whose predecessor edge is EDGE_DFS_BACK. */
810 FOR_EACH_EDGE (e, ei, bb->preds)
812 if ((e->src != ENTRY_BLOCK_PTR && bbd[e->src->index].end_of_trace >= 0)
813 || (e->flags & EDGE_DFS_BACK))
815 int edge_freq = EDGE_FREQUENCY (e);
817 if (edge_freq > priority)
818 priority = edge_freq;
822 if (priority)
823 /* The block with priority should have significantly lower key. */
824 return -(100 * BB_FREQ_MAX + 100 * priority + bb->frequency);
825 return -bb->frequency;
828 /* Return true when the edge E from basic block BB is better than the temporary
829 best edge (details are in function). The probability of edge E is PROB. The
830 frequency of the successor is FREQ. The current best probability is
831 BEST_PROB, the best frequency is BEST_FREQ.
832 The edge is considered to be equivalent when PROB does not differ much from
833 BEST_PROB; similarly for frequency. */
835 static bool
836 better_edge_p (basic_block bb, edge e, int prob, int freq, int best_prob,
837 int best_freq, edge cur_best_edge)
839 bool is_better_edge;
841 /* The BEST_* values do not have to be best, but can be a bit smaller than
842 maximum values. */
843 int diff_prob = best_prob / 10;
844 int diff_freq = best_freq / 10;
846 if (prob > best_prob + diff_prob)
847 /* The edge has higher probability than the temporary best edge. */
848 is_better_edge = true;
849 else if (prob < best_prob - diff_prob)
850 /* The edge has lower probability than the temporary best edge. */
851 is_better_edge = false;
852 else if (freq < best_freq - diff_freq)
853 /* The edge and the temporary best edge have almost equivalent
854 probabilities. The higher frequency of a successor now means
855 that there is another edge going into that successor.
856 This successor has lower frequency so it is better. */
857 is_better_edge = true;
858 else if (freq > best_freq + diff_freq)
859 /* This successor has higher frequency so it is worse. */
860 is_better_edge = false;
861 else if (e->dest->prev_bb == bb)
862 /* The edges have equivalent probabilities and the successors
863 have equivalent frequencies. Select the previous successor. */
864 is_better_edge = true;
865 else
866 is_better_edge = false;
868 /* If we are doing hot/cold partitioning, make sure that we always favor
869 non-crossing edges over crossing edges. */
871 if (!is_better_edge
872 && flag_reorder_blocks_and_partition
873 && cur_best_edge
874 && (cur_best_edge->flags & EDGE_CROSSING)
875 && !(e->flags & EDGE_CROSSING))
876 is_better_edge = true;
878 return is_better_edge;
881 /* Connect traces in array TRACES, N_TRACES is the count of traces. */
883 static void
884 connect_traces (int n_traces, struct trace *traces)
886 int i;
887 int unconnected_hot_trace_count = 0;
888 bool cold_connected = true;
889 bool *connected;
890 bool *cold_traces;
891 int last_trace;
892 int freq_threshold;
893 gcov_type count_threshold;
895 freq_threshold = max_entry_frequency * DUPLICATION_THRESHOLD / 1000;
896 if (max_entry_count < INT_MAX / 1000)
897 count_threshold = max_entry_count * DUPLICATION_THRESHOLD / 1000;
898 else
899 count_threshold = max_entry_count / 1000 * DUPLICATION_THRESHOLD;
901 connected = xcalloc (n_traces, sizeof (bool));
902 last_trace = -1;
904 /* If we are partitioning hot/cold basic blocks, mark the cold
905 traces as already connected, to remove them from consideration
906 for connection to the hot traces. After the hot traces have all
907 been connected (determined by "unconnected_hot_trace_count"), we
908 will go back and connect the cold traces. */
910 cold_traces = xcalloc (n_traces, sizeof (bool));
912 if (flag_reorder_blocks_and_partition)
913 for (i = 0; i < n_traces; i++)
915 if (BB_PARTITION (traces[i].first) == BB_COLD_PARTITION)
917 connected[i] = true;
918 cold_traces[i] = true;
919 cold_connected = false;
921 else
922 unconnected_hot_trace_count++;
925 for (i = 0; i < n_traces || !cold_connected ; i++)
927 int t = i;
928 int t2;
929 edge e, best;
930 int best_len;
932 /* If we are partitioning hot/cold basic blocks, check to see
933 if all the hot traces have been connected. If so, go back
934 and mark the cold traces as unconnected so we can connect
935 them up too. Re-set "i" to the first (unconnected) cold
936 trace. Use flag "cold_connected" to make sure we don't do
937 this step more than once. */
939 if (flag_reorder_blocks_and_partition
940 && (i >= n_traces || unconnected_hot_trace_count <= 0)
941 && !cold_connected)
943 int j;
944 int first_cold_trace = -1;
946 for (j = 0; j < n_traces; j++)
947 if (cold_traces[j])
949 connected[j] = false;
950 if (first_cold_trace == -1)
951 first_cold_trace = j;
953 i = t = first_cold_trace;
954 cold_connected = true;
957 if (connected[t])
958 continue;
960 connected[t] = true;
961 if (unconnected_hot_trace_count > 0)
962 unconnected_hot_trace_count--;
964 /* Find the predecessor traces. */
965 for (t2 = t; t2 > 0;)
967 edge_iterator ei;
968 best = NULL;
969 best_len = 0;
970 FOR_EACH_EDGE (e, ei, traces[t2].first->preds)
972 int si = e->src->index;
974 if (e->src != ENTRY_BLOCK_PTR
975 && (e->flags & EDGE_CAN_FALLTHRU)
976 && !(e->flags & EDGE_COMPLEX)
977 && bbd[si].end_of_trace >= 0
978 && !connected[bbd[si].end_of_trace]
979 && (!best
980 || e->probability > best->probability
981 || (e->probability == best->probability
982 && traces[bbd[si].end_of_trace].length > best_len)))
984 best = e;
985 best_len = traces[bbd[si].end_of_trace].length;
988 if (best)
990 best->src->rbi->next = best->dest;
991 t2 = bbd[best->src->index].end_of_trace;
992 connected[t2] = true;
994 if (unconnected_hot_trace_count > 0)
995 unconnected_hot_trace_count--;
997 if (dump_file)
999 fprintf (dump_file, "Connection: %d %d\n",
1000 best->src->index, best->dest->index);
1003 else
1004 break;
1007 if (last_trace >= 0)
1008 traces[last_trace].last->rbi->next = traces[t2].first;
1009 last_trace = t;
1011 /* Find the successor traces. */
1012 while (1)
1014 /* Find the continuation of the chain. */
1015 edge_iterator ei;
1016 best = NULL;
1017 best_len = 0;
1018 FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1020 int di = e->dest->index;
1022 if (e->dest != EXIT_BLOCK_PTR
1023 && (e->flags & EDGE_CAN_FALLTHRU)
1024 && !(e->flags & EDGE_COMPLEX)
1025 && bbd[di].start_of_trace >= 0
1026 && !connected[bbd[di].start_of_trace]
1027 && (!best
1028 || e->probability > best->probability
1029 || (e->probability == best->probability
1030 && traces[bbd[di].start_of_trace].length > best_len)))
1032 best = e;
1033 best_len = traces[bbd[di].start_of_trace].length;
1037 if (best)
1039 if (dump_file)
1041 fprintf (dump_file, "Connection: %d %d\n",
1042 best->src->index, best->dest->index);
1044 t = bbd[best->dest->index].start_of_trace;
1045 traces[last_trace].last->rbi->next = traces[t].first;
1046 connected[t] = true;
1047 if (unconnected_hot_trace_count > 0)
1048 unconnected_hot_trace_count--;
1049 last_trace = t;
1051 else
1053 /* Try to connect the traces by duplication of 1 block. */
1054 edge e2;
1055 basic_block next_bb = NULL;
1056 bool try_copy = false;
1058 FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1059 if (e->dest != EXIT_BLOCK_PTR
1060 && (e->flags & EDGE_CAN_FALLTHRU)
1061 && !(e->flags & EDGE_COMPLEX)
1062 && (!best || e->probability > best->probability))
1064 edge_iterator ei;
1065 edge best2 = NULL;
1066 int best2_len = 0;
1068 /* If the destination is a start of a trace which is only
1069 one block long, then no need to search the successor
1070 blocks of the trace. Accept it. */
1071 if (bbd[e->dest->index].start_of_trace >= 0
1072 && traces[bbd[e->dest->index].start_of_trace].length
1073 == 1)
1075 best = e;
1076 try_copy = true;
1077 continue;
1080 FOR_EACH_EDGE (e2, ei, e->dest->succs)
1082 int di = e2->dest->index;
1084 if (e2->dest == EXIT_BLOCK_PTR
1085 || ((e2->flags & EDGE_CAN_FALLTHRU)
1086 && !(e2->flags & EDGE_COMPLEX)
1087 && bbd[di].start_of_trace >= 0
1088 && !connected[bbd[di].start_of_trace]
1089 && (EDGE_FREQUENCY (e2) >= freq_threshold)
1090 && (e2->count >= count_threshold)
1091 && (!best2
1092 || e2->probability > best2->probability
1093 || (e2->probability == best2->probability
1094 && traces[bbd[di].start_of_trace].length
1095 > best2_len))))
1097 best = e;
1098 best2 = e2;
1099 if (e2->dest != EXIT_BLOCK_PTR)
1100 best2_len = traces[bbd[di].start_of_trace].length;
1101 else
1102 best2_len = INT_MAX;
1103 next_bb = e2->dest;
1104 try_copy = true;
1109 if (flag_reorder_blocks_and_partition)
1110 try_copy = false;
1112 /* Copy tiny blocks always; copy larger blocks only when the
1113 edge is traversed frequently enough. */
1114 if (try_copy
1115 && copy_bb_p (best->dest,
1116 !optimize_size
1117 && EDGE_FREQUENCY (best) >= freq_threshold
1118 && best->count >= count_threshold))
1120 basic_block new_bb;
1122 if (dump_file)
1124 fprintf (dump_file, "Connection: %d %d ",
1125 traces[t].last->index, best->dest->index);
1126 if (!next_bb)
1127 fputc ('\n', dump_file);
1128 else if (next_bb == EXIT_BLOCK_PTR)
1129 fprintf (dump_file, "exit\n");
1130 else
1131 fprintf (dump_file, "%d\n", next_bb->index);
1134 new_bb = copy_bb (best->dest, best, traces[t].last, t);
1135 traces[t].last = new_bb;
1136 if (next_bb && next_bb != EXIT_BLOCK_PTR)
1138 t = bbd[next_bb->index].start_of_trace;
1139 traces[last_trace].last->rbi->next = traces[t].first;
1140 connected[t] = true;
1141 if (unconnected_hot_trace_count > 0)
1142 unconnected_hot_trace_count--;
1143 last_trace = t;
1145 else
1146 break; /* Stop finding the successor traces. */
1148 else
1149 break; /* Stop finding the successor traces. */
1154 if (dump_file)
1156 basic_block bb;
1158 fprintf (dump_file, "Final order:\n");
1159 for (bb = traces[0].first; bb; bb = bb->rbi->next)
1160 fprintf (dump_file, "%d ", bb->index);
1161 fprintf (dump_file, "\n");
1162 fflush (dump_file);
1165 FREE (connected);
1166 FREE (cold_traces);
1169 /* Return true when BB can and should be copied. CODE_MAY_GROW is true
1170 when code size is allowed to grow by duplication. */
1172 static bool
1173 copy_bb_p (basic_block bb, int code_may_grow)
1175 int size = 0;
1176 int max_size = uncond_jump_length;
1177 rtx insn;
1179 if (!bb->frequency)
1180 return false;
1181 if (EDGE_COUNT (bb->preds) < 2)
1182 return false;
1183 if (!can_duplicate_block_p (bb))
1184 return false;
1186 /* Avoid duplicating blocks which have many successors (PR/13430). */
1187 if (EDGE_COUNT (bb->succs) > 8)
1188 return false;
1190 if (code_may_grow && maybe_hot_bb_p (bb))
1191 max_size *= 8;
1193 FOR_BB_INSNS (bb, insn)
1195 if (INSN_P (insn))
1196 size += get_attr_length (insn);
1199 if (size <= max_size)
1200 return true;
1202 if (dump_file)
1204 fprintf (dump_file,
1205 "Block %d can't be copied because its size = %d.\n",
1206 bb->index, size);
1209 return false;
1212 /* Return the length of unconditional jump instruction. */
1214 static int
1215 get_uncond_jump_length (void)
1217 rtx label, jump;
1218 int length;
1220 label = emit_label_before (gen_label_rtx (), get_insns ());
1221 jump = emit_jump_insn (gen_jump (label));
1223 length = get_attr_length (jump);
1225 delete_insn (jump);
1226 delete_insn (label);
1227 return length;
1230 static void
1231 add_unlikely_executed_notes (void)
1233 basic_block bb;
1235 /* Add the UNLIKELY_EXECUTED_NOTES to each cold basic block. */
1237 FOR_EACH_BB (bb)
1238 if (BB_PARTITION (bb) == BB_COLD_PARTITION)
1239 mark_bb_for_unlikely_executed_section (bb);
1242 /* Find the basic blocks that are rarely executed and need to be moved to
1243 a separate section of the .o file (to cut down on paging and improve
1244 cache locality). */
1246 static void
1247 find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges,
1248 int *n_crossing_edges,
1249 int *max_idx)
1251 basic_block bb;
1252 bool has_hot_blocks = false;
1253 edge e;
1254 int i;
1255 edge_iterator ei;
1257 /* Mark which partition (hot/cold) each basic block belongs in. */
1259 FOR_EACH_BB (bb)
1261 if (probably_never_executed_bb_p (bb))
1262 BB_SET_PARTITION (bb, BB_COLD_PARTITION);
1263 else
1265 BB_SET_PARTITION (bb, BB_HOT_PARTITION);
1266 has_hot_blocks = true;
1270 /* Since all "hot" basic blocks will eventually be scheduled before all
1271 cold basic blocks, make *sure* the real function entry block is in
1272 the hot partition (if there is one). */
1274 if (has_hot_blocks)
1275 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
1276 if (e->dest->index >= 0)
1278 BB_SET_PARTITION (e->dest, BB_HOT_PARTITION);
1279 break;
1282 /* Mark every edge that crosses between sections. */
1284 i = 0;
1285 if (targetm.have_named_sections)
1287 FOR_EACH_BB (bb)
1288 FOR_EACH_EDGE (e, ei, bb->succs)
1290 if (e->src != ENTRY_BLOCK_PTR
1291 && e->dest != EXIT_BLOCK_PTR
1292 && BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1294 e->flags |= EDGE_CROSSING;
1295 if (i == *max_idx)
1297 *max_idx *= 2;
1298 crossing_edges = xrealloc (crossing_edges,
1299 (*max_idx) * sizeof (edge));
1301 crossing_edges[i++] = e;
1303 else
1304 e->flags &= ~EDGE_CROSSING;
1307 *n_crossing_edges = i;
1310 /* Add NOTE_INSN_UNLIKELY_EXECUTED_CODE to top of basic block. This note
1311 is later used to mark the basic block to be put in the
1312 unlikely-to-be-executed section of the .o file. */
1314 static void
1315 mark_bb_for_unlikely_executed_section (basic_block bb)
1317 rtx cur_insn;
1318 rtx insert_insn = NULL;
1319 rtx new_note;
1321 /* Insert new NOTE immediately after BASIC_BLOCK note. */
1323 for (cur_insn = BB_HEAD (bb); cur_insn != NEXT_INSN (BB_END (bb));
1324 cur_insn = NEXT_INSN (cur_insn))
1325 if (GET_CODE (cur_insn) == NOTE
1326 && NOTE_LINE_NUMBER (cur_insn) == NOTE_INSN_BASIC_BLOCK)
1328 insert_insn = cur_insn;
1329 break;
1332 /* If basic block does not contain a NOTE_INSN_BASIC_BLOCK, there is
1333 a major problem. */
1334 gcc_assert (insert_insn);
1336 /* Insert note and assign basic block number to it. */
1338 new_note = emit_note_after (NOTE_INSN_UNLIKELY_EXECUTED_CODE,
1339 insert_insn);
1340 NOTE_BASIC_BLOCK (new_note) = bb;
1343 /* If any destination of a crossing edge does not have a label, add label;
1344 Convert any fall-through crossing edges (for blocks that do not contain
1345 a jump) to unconditional jumps. */
1347 static void
1348 add_labels_and_missing_jumps (edge *crossing_edges, int n_crossing_edges)
1350 int i;
1351 basic_block src;
1352 basic_block dest;
1353 rtx label;
1354 rtx barrier;
1355 rtx new_jump;
1357 for (i=0; i < n_crossing_edges; i++)
1359 if (crossing_edges[i])
1361 src = crossing_edges[i]->src;
1362 dest = crossing_edges[i]->dest;
1364 /* Make sure dest has a label. */
1366 if (dest && (dest != EXIT_BLOCK_PTR))
1368 label = block_label (dest);
1370 /* Make sure source block ends with a jump. */
1372 if (src && (src != ENTRY_BLOCK_PTR))
1374 if (!JUMP_P (BB_END (src)))
1375 /* bb just falls through. */
1377 /* make sure there's only one successor */
1378 gcc_assert (EDGE_COUNT (src->succs) == 1);
1380 /* Find label in dest block. */
1381 label = block_label (dest);
1383 new_jump = emit_jump_insn_after (gen_jump (label),
1384 BB_END (src));
1385 barrier = emit_barrier_after (new_jump);
1386 JUMP_LABEL (new_jump) = label;
1387 LABEL_NUSES (label) += 1;
1388 src->rbi->footer = unlink_insn_chain (barrier, barrier);
1389 /* Mark edge as non-fallthru. */
1390 crossing_edges[i]->flags &= ~EDGE_FALLTHRU;
1391 } /* end: 'if (GET_CODE ... ' */
1392 } /* end: 'if (src && src->index...' */
1393 } /* end: 'if (dest && dest->index...' */
1394 } /* end: 'if (crossing_edges[i]...' */
1395 } /* end for loop */
1398 /* Find any bb's where the fall-through edge is a crossing edge (note that
1399 these bb's must also contain a conditional jump; we've already
1400 dealt with fall-through edges for blocks that didn't have a
1401 conditional jump in the call to add_labels_and_missing_jumps).
1402 Convert the fall-through edge to non-crossing edge by inserting a
1403 new bb to fall-through into. The new bb will contain an
1404 unconditional jump (crossing edge) to the original fall through
1405 destination. */
1407 static void
1408 fix_up_fall_thru_edges (void)
1410 basic_block cur_bb;
1411 basic_block new_bb;
1412 edge succ1;
1413 edge succ2;
1414 edge fall_thru;
1415 edge cond_jump = NULL;
1416 edge e;
1417 bool cond_jump_crosses;
1418 int invert_worked;
1419 rtx old_jump;
1420 rtx fall_thru_label;
1421 rtx barrier;
1423 FOR_EACH_BB (cur_bb)
1425 fall_thru = NULL;
1426 if (EDGE_COUNT (cur_bb->succs) > 0)
1427 succ1 = EDGE_SUCC (cur_bb, 0);
1428 else
1429 succ1 = NULL;
1431 if (EDGE_COUNT (cur_bb->succs) > 1)
1432 succ2 = EDGE_SUCC (cur_bb, 1);
1433 else
1434 succ2 = NULL;
1436 /* Find the fall-through edge. */
1438 if (succ1
1439 && (succ1->flags & EDGE_FALLTHRU))
1441 fall_thru = succ1;
1442 cond_jump = succ2;
1444 else if (succ2
1445 && (succ2->flags & EDGE_FALLTHRU))
1447 fall_thru = succ2;
1448 cond_jump = succ1;
1451 if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR))
1453 /* Check to see if the fall-thru edge is a crossing edge. */
1455 if (fall_thru->flags & EDGE_CROSSING)
1457 /* The fall_thru edge crosses; now check the cond jump edge, if
1458 it exists. */
1460 cond_jump_crosses = true;
1461 invert_worked = 0;
1462 old_jump = BB_END (cur_bb);
1464 /* Find the jump instruction, if there is one. */
1466 if (cond_jump)
1468 if (!(cond_jump->flags & EDGE_CROSSING))
1469 cond_jump_crosses = false;
1471 /* We know the fall-thru edge crosses; if the cond
1472 jump edge does NOT cross, and its destination is the
1473 next block in the bb order, invert the jump
1474 (i.e. fix it so the fall thru does not cross and
1475 the cond jump does). */
1477 if (!cond_jump_crosses
1478 && cur_bb->rbi->next == cond_jump->dest)
1480 /* Find label in fall_thru block. We've already added
1481 any missing labels, so there must be one. */
1483 fall_thru_label = block_label (fall_thru->dest);
1485 if (old_jump && fall_thru_label)
1486 invert_worked = invert_jump (old_jump,
1487 fall_thru_label,0);
1488 if (invert_worked)
1490 fall_thru->flags &= ~EDGE_FALLTHRU;
1491 cond_jump->flags |= EDGE_FALLTHRU;
1492 update_br_prob_note (cur_bb);
1493 e = fall_thru;
1494 fall_thru = cond_jump;
1495 cond_jump = e;
1496 cond_jump->flags |= EDGE_CROSSING;
1497 fall_thru->flags &= ~EDGE_CROSSING;
1502 if (cond_jump_crosses || !invert_worked)
1504 /* This is the case where both edges out of the basic
1505 block are crossing edges. Here we will fix up the
1506 fall through edge. The jump edge will be taken care
1507 of later. */
1509 new_bb = force_nonfallthru (fall_thru);
1511 if (new_bb)
1513 new_bb->rbi->next = cur_bb->rbi->next;
1514 cur_bb->rbi->next = new_bb;
1516 /* Make sure new fall-through bb is in same
1517 partition as bb it's falling through from. */
1519 BB_COPY_PARTITION (new_bb, cur_bb);
1520 EDGE_SUCC (new_bb, 0)->flags |= EDGE_CROSSING;
1523 /* Add barrier after new jump */
1525 if (new_bb)
1527 barrier = emit_barrier_after (BB_END (new_bb));
1528 new_bb->rbi->footer = unlink_insn_chain (barrier,
1529 barrier);
1531 else
1533 barrier = emit_barrier_after (BB_END (cur_bb));
1534 cur_bb->rbi->footer = unlink_insn_chain (barrier,
1535 barrier);
1543 /* This function checks the destination blockof a "crossing jump" to
1544 see if it has any crossing predecessors that begin with a code label
1545 and end with an unconditional jump. If so, it returns that predecessor
1546 block. (This is to avoid creating lots of new basic blocks that all
1547 contain unconditional jumps to the same destination). */
1549 static basic_block
1550 find_jump_block (basic_block jump_dest)
1552 basic_block source_bb = NULL;
1553 edge e;
1554 rtx insn;
1555 edge_iterator ei;
1557 FOR_EACH_EDGE (e, ei, jump_dest->preds)
1558 if (e->flags & EDGE_CROSSING)
1560 basic_block src = e->src;
1562 /* Check each predecessor to see if it has a label, and contains
1563 only one executable instruction, which is an unconditional jump.
1564 If so, we can use it. */
1566 if (LABEL_P (BB_HEAD (src)))
1567 for (insn = BB_HEAD (src);
1568 !INSN_P (insn) && insn != NEXT_INSN (BB_END (src));
1569 insn = NEXT_INSN (insn))
1571 if (INSN_P (insn)
1572 && insn == BB_END (src)
1573 && JUMP_P (insn)
1574 && !any_condjump_p (insn))
1576 source_bb = src;
1577 break;
1581 if (source_bb)
1582 break;
1585 return source_bb;
1588 /* Find all BB's with conditional jumps that are crossing edges;
1589 insert a new bb and make the conditional jump branch to the new
1590 bb instead (make the new bb same color so conditional branch won't
1591 be a 'crossing' edge). Insert an unconditional jump from the
1592 new bb to the original destination of the conditional jump. */
1594 static void
1595 fix_crossing_conditional_branches (void)
1597 basic_block cur_bb;
1598 basic_block new_bb;
1599 basic_block last_bb;
1600 basic_block dest;
1601 basic_block prev_bb;
1602 edge succ1;
1603 edge succ2;
1604 edge crossing_edge;
1605 edge new_edge;
1606 rtx old_jump;
1607 rtx set_src;
1608 rtx old_label = NULL_RTX;
1609 rtx new_label;
1610 rtx new_jump;
1611 rtx barrier;
1613 last_bb = EXIT_BLOCK_PTR->prev_bb;
1615 FOR_EACH_BB (cur_bb)
1617 crossing_edge = NULL;
1618 if (EDGE_COUNT (cur_bb->succs) > 0)
1619 succ1 = EDGE_SUCC (cur_bb, 0);
1620 else
1621 succ1 = NULL;
1623 if (EDGE_COUNT (cur_bb->succs) > 1)
1624 succ2 = EDGE_SUCC (cur_bb, 1);
1625 else
1626 succ2 = NULL;
1628 /* We already took care of fall-through edges, so only one successor
1629 can be a crossing edge. */
1631 if (succ1 && (succ1->flags & EDGE_CROSSING))
1632 crossing_edge = succ1;
1633 else if (succ2 && (succ2->flags & EDGE_CROSSING))
1634 crossing_edge = succ2;
1636 if (crossing_edge)
1638 old_jump = BB_END (cur_bb);
1640 /* Check to make sure the jump instruction is a
1641 conditional jump. */
1643 set_src = NULL_RTX;
1645 if (any_condjump_p (old_jump))
1647 if (GET_CODE (PATTERN (old_jump)) == SET)
1648 set_src = SET_SRC (PATTERN (old_jump));
1649 else if (GET_CODE (PATTERN (old_jump)) == PARALLEL)
1651 set_src = XVECEXP (PATTERN (old_jump), 0,0);
1652 if (GET_CODE (set_src) == SET)
1653 set_src = SET_SRC (set_src);
1654 else
1655 set_src = NULL_RTX;
1659 if (set_src && (GET_CODE (set_src) == IF_THEN_ELSE))
1661 if (GET_CODE (XEXP (set_src, 1)) == PC)
1662 old_label = XEXP (set_src, 2);
1663 else if (GET_CODE (XEXP (set_src, 2)) == PC)
1664 old_label = XEXP (set_src, 1);
1666 /* Check to see if new bb for jumping to that dest has
1667 already been created; if so, use it; if not, create
1668 a new one. */
1670 new_bb = find_jump_block (crossing_edge->dest);
1672 if (new_bb)
1673 new_label = block_label (new_bb);
1674 else
1676 /* Create new basic block to be dest for
1677 conditional jump. */
1679 new_bb = create_basic_block (NULL, NULL, last_bb);
1680 new_bb->rbi->next = last_bb->rbi->next;
1681 last_bb->rbi->next = new_bb;
1682 prev_bb = last_bb;
1683 last_bb = new_bb;
1685 /* Update register liveness information. */
1687 new_bb->global_live_at_start = ALLOC_REG_SET (&reg_obstack);
1688 new_bb->global_live_at_end = ALLOC_REG_SET (&reg_obstack);
1689 COPY_REG_SET (new_bb->global_live_at_end,
1690 prev_bb->global_live_at_end);
1691 COPY_REG_SET (new_bb->global_live_at_start,
1692 prev_bb->global_live_at_end);
1694 /* Put appropriate instructions in new bb. */
1696 new_label = gen_label_rtx ();
1697 emit_label_before (new_label, BB_HEAD (new_bb));
1698 BB_HEAD (new_bb) = new_label;
1700 if (GET_CODE (old_label) == LABEL_REF)
1702 old_label = JUMP_LABEL (old_jump);
1703 new_jump = emit_jump_insn_after (gen_jump
1704 (old_label),
1705 BB_END (new_bb));
1707 else
1709 gcc_assert (HAVE_return
1710 && GET_CODE (old_label) == RETURN);
1711 new_jump = emit_jump_insn_after (gen_return (),
1712 BB_END (new_bb));
1715 barrier = emit_barrier_after (new_jump);
1716 JUMP_LABEL (new_jump) = old_label;
1717 new_bb->rbi->footer = unlink_insn_chain (barrier,
1718 barrier);
1720 /* Make sure new bb is in same partition as source
1721 of conditional branch. */
1722 BB_COPY_PARTITION (new_bb, cur_bb);
1725 /* Make old jump branch to new bb. */
1727 redirect_jump (old_jump, new_label, 0);
1729 /* Remove crossing_edge as predecessor of 'dest'. */
1731 dest = crossing_edge->dest;
1733 redirect_edge_succ (crossing_edge, new_bb);
1735 /* Make a new edge from new_bb to old dest; new edge
1736 will be a successor for new_bb and a predecessor
1737 for 'dest'. */
1739 if (EDGE_COUNT (new_bb->succs) == 0)
1740 new_edge = make_edge (new_bb, dest, 0);
1741 else
1742 new_edge = EDGE_SUCC (new_bb, 0);
1744 crossing_edge->flags &= ~EDGE_CROSSING;
1745 new_edge->flags |= EDGE_CROSSING;
1751 /* Find any unconditional branches that cross between hot and cold
1752 sections. Convert them into indirect jumps instead. */
1754 static void
1755 fix_crossing_unconditional_branches (void)
1757 basic_block cur_bb;
1758 rtx last_insn;
1759 rtx label;
1760 rtx label_addr;
1761 rtx indirect_jump_sequence;
1762 rtx jump_insn = NULL_RTX;
1763 rtx new_reg;
1764 rtx cur_insn;
1765 edge succ;
1767 FOR_EACH_BB (cur_bb)
1769 last_insn = BB_END (cur_bb);
1770 succ = EDGE_SUCC (cur_bb, 0);
1772 /* Check to see if bb ends in a crossing (unconditional) jump. At
1773 this point, no crossing jumps should be conditional. */
1775 if (JUMP_P (last_insn)
1776 && (succ->flags & EDGE_CROSSING))
1778 rtx label2, table;
1780 gcc_assert (!any_condjump_p (last_insn));
1782 /* Make sure the jump is not already an indirect or table jump. */
1784 if (!computed_jump_p (last_insn)
1785 && !tablejump_p (last_insn, &label2, &table))
1787 /* We have found a "crossing" unconditional branch. Now
1788 we must convert it to an indirect jump. First create
1789 reference of label, as target for jump. */
1791 label = JUMP_LABEL (last_insn);
1792 label_addr = gen_rtx_LABEL_REF (Pmode, label);
1793 LABEL_NUSES (label) += 1;
1795 /* Get a register to use for the indirect jump. */
1797 new_reg = gen_reg_rtx (Pmode);
1799 /* Generate indirect the jump sequence. */
1801 start_sequence ();
1802 emit_move_insn (new_reg, label_addr);
1803 emit_indirect_jump (new_reg);
1804 indirect_jump_sequence = get_insns ();
1805 end_sequence ();
1807 /* Make sure every instruction in the new jump sequence has
1808 its basic block set to be cur_bb. */
1810 for (cur_insn = indirect_jump_sequence; cur_insn;
1811 cur_insn = NEXT_INSN (cur_insn))
1813 BLOCK_FOR_INSN (cur_insn) = cur_bb;
1814 if (JUMP_P (cur_insn))
1815 jump_insn = cur_insn;
1818 /* Insert the new (indirect) jump sequence immediately before
1819 the unconditional jump, then delete the unconditional jump. */
1821 emit_insn_before (indirect_jump_sequence, last_insn);
1822 delete_insn (last_insn);
1824 /* Make BB_END for cur_bb be the jump instruction (NOT the
1825 barrier instruction at the end of the sequence...). */
1827 BB_END (cur_bb) = jump_insn;
1833 /* Add REG_CROSSING_JUMP note to all crossing jump insns. */
1835 static void
1836 add_reg_crossing_jump_notes (void)
1838 basic_block bb;
1839 edge e;
1840 edge_iterator ei;
1842 FOR_EACH_BB (bb)
1843 FOR_EACH_EDGE (e, ei, bb->succs)
1844 if ((e->flags & EDGE_CROSSING)
1845 && JUMP_P (BB_END (e->src)))
1846 REG_NOTES (BB_END (e->src)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
1847 NULL_RTX,
1848 REG_NOTES (BB_END
1849 (e->src)));
1852 /* Basic blocks containing NOTE_INSN_UNLIKELY_EXECUTED_CODE will be
1853 put in a separate section of the .o file, to reduce paging and
1854 improve cache performance (hopefully). This can result in bits of
1855 code from the same function being widely separated in the .o file.
1856 However this is not obvious to the current bb structure. Therefore
1857 we must take care to ensure that: 1). There are no fall_thru edges
1858 that cross between sections; 2). For those architectures which
1859 have "short" conditional branches, all conditional branches that
1860 attempt to cross between sections are converted to unconditional
1861 branches; and, 3). For those architectures which have "short"
1862 unconditional branches, all unconditional branches that attempt
1863 to cross between sections are converted to indirect jumps.
1865 The code for fixing up fall_thru edges that cross between hot and
1866 cold basic blocks does so by creating new basic blocks containing
1867 unconditional branches to the appropriate label in the "other"
1868 section. The new basic block is then put in the same (hot or cold)
1869 section as the original conditional branch, and the fall_thru edge
1870 is modified to fall into the new basic block instead. By adding
1871 this level of indirection we end up with only unconditional branches
1872 crossing between hot and cold sections.
1874 Conditional branches are dealt with by adding a level of indirection.
1875 A new basic block is added in the same (hot/cold) section as the
1876 conditional branch, and the conditional branch is retargeted to the
1877 new basic block. The new basic block contains an unconditional branch
1878 to the original target of the conditional branch (in the other section).
1880 Unconditional branches are dealt with by converting them into
1881 indirect jumps. */
1883 static void
1884 fix_edges_for_rarely_executed_code (edge *crossing_edges,
1885 int n_crossing_edges)
1887 /* Make sure the source of any crossing edge ends in a jump and the
1888 destination of any crossing edge has a label. */
1890 add_labels_and_missing_jumps (crossing_edges, n_crossing_edges);
1892 /* Convert all crossing fall_thru edges to non-crossing fall
1893 thrus to unconditional jumps (that jump to the original fall
1894 thru dest). */
1896 fix_up_fall_thru_edges ();
1898 /* Only do the parts necessary for writing separate sections if
1899 the target architecture has the ability to write separate sections
1900 (i.e. it has named sections). Otherwise, the hot/cold partitioning
1901 information will be used when reordering blocks to try to put all
1902 the hot blocks together, then all the cold blocks, but no actual
1903 section partitioning will be done. */
1905 if (targetm.have_named_sections)
1907 /* If the architecture does not have conditional branches that can
1908 span all of memory, convert crossing conditional branches into
1909 crossing unconditional branches. */
1911 if (!HAS_LONG_COND_BRANCH)
1912 fix_crossing_conditional_branches ();
1914 /* If the architecture does not have unconditional branches that
1915 can span all of memory, convert crossing unconditional branches
1916 into indirect jumps. Since adding an indirect jump also adds
1917 a new register usage, update the register usage information as
1918 well. */
1920 if (!HAS_LONG_UNCOND_BRANCH)
1922 fix_crossing_unconditional_branches ();
1923 reg_scan (get_insns(), max_reg_num ());
1926 add_reg_crossing_jump_notes ();
1930 /* Reorder basic blocks. The main entry point to this file. FLAGS is
1931 the set of flags to pass to cfg_layout_initialize(). */
1933 void
1934 reorder_basic_blocks (unsigned int flags)
1936 int n_traces;
1937 int i;
1938 struct trace *traces;
1940 if (n_basic_blocks <= 1)
1941 return;
1943 if (targetm.cannot_modify_jumps_p ())
1944 return;
1946 timevar_push (TV_REORDER_BLOCKS);
1948 cfg_layout_initialize (flags);
1950 set_edge_can_fallthru_flag ();
1951 mark_dfs_back_edges ();
1953 /* We are estimating the length of uncond jump insn only once since the code
1954 for getting the insn length always returns the minimal length now. */
1955 if (uncond_jump_length == 0)
1956 uncond_jump_length = get_uncond_jump_length ();
1958 /* We need to know some information for each basic block. */
1959 array_size = GET_ARRAY_SIZE (last_basic_block);
1960 bbd = xmalloc (array_size * sizeof (bbro_basic_block_data));
1961 for (i = 0; i < array_size; i++)
1963 bbd[i].start_of_trace = -1;
1964 bbd[i].end_of_trace = -1;
1965 bbd[i].heap = NULL;
1966 bbd[i].node = NULL;
1969 traces = xmalloc (n_basic_blocks * sizeof (struct trace));
1970 n_traces = 0;
1971 find_traces (&n_traces, traces);
1972 connect_traces (n_traces, traces);
1973 FREE (traces);
1974 FREE (bbd);
1976 if (dump_file)
1977 dump_flow_info (dump_file);
1979 if (flag_reorder_blocks_and_partition
1980 && targetm.have_named_sections)
1981 add_unlikely_executed_notes ();
1983 cfg_layout_finalize ();
1985 timevar_pop (TV_REORDER_BLOCKS);
1988 /* Duplicate the blocks containing computed gotos. This basically unfactors
1989 computed gotos that were factored early on in the compilation process to
1990 speed up edge based data flow. We used to not unfactoring them again,
1991 which can seriously pessimize code with many computed jumps in the source
1992 code, such as interpreters. See e.g. PR15242. */
1994 void
1995 duplicate_computed_gotos (void)
1997 basic_block bb, new_bb;
1998 bitmap candidates;
1999 int max_size;
2001 if (n_basic_blocks <= 1)
2002 return;
2004 if (targetm.cannot_modify_jumps_p ())
2005 return;
2007 timevar_push (TV_REORDER_BLOCKS);
2009 cfg_layout_initialize (0);
2011 /* We are estimating the length of uncond jump insn only once
2012 since the code for getting the insn length always returns
2013 the minimal length now. */
2014 if (uncond_jump_length == 0)
2015 uncond_jump_length = get_uncond_jump_length ();
2017 max_size = uncond_jump_length * PARAM_VALUE (PARAM_MAX_GOTO_DUPLICATION_INSNS);
2018 candidates = BITMAP_XMALLOC ();
2020 /* Build the reorder chain for the original order of blocks.
2021 Look for a computed jump while we are at it. */
2022 FOR_EACH_BB (bb)
2024 if (bb->next_bb != EXIT_BLOCK_PTR)
2025 bb->rbi->next = bb->next_bb;
2027 /* If the block ends in a computed jump and it is small enough,
2028 make it a candidate for duplication. */
2029 if (computed_jump_p (BB_END (bb)))
2031 rtx insn;
2032 int size = 0;
2034 FOR_BB_INSNS (bb, insn)
2036 if (INSN_P (insn))
2037 size += get_attr_length (insn);
2038 if (size > max_size)
2039 break;
2042 if (size <= max_size)
2043 bitmap_set_bit (candidates, bb->index);
2047 /* Nothing to do if there is no computed jump here. */
2048 if (bitmap_empty_p (candidates))
2049 goto done;
2051 /* Duplicate computed gotos. */
2052 FOR_EACH_BB (bb)
2054 if (bb->rbi->visited)
2055 continue;
2057 bb->rbi->visited = 1;
2059 /* BB must have one outgoing edge. That edge must not lead to
2060 the exit block or the next block.
2061 The destination must have more than one predecessor. */
2062 if (EDGE_COUNT(bb->succs) != 1
2063 || EDGE_SUCC(bb,0)->dest == EXIT_BLOCK_PTR
2064 || EDGE_SUCC(bb,0)->dest == bb->next_bb
2065 || EDGE_COUNT(EDGE_SUCC(bb,0)->dest->preds) <= 1)
2066 continue;
2068 /* The successor block has to be a duplication candidate. */
2069 if (!bitmap_bit_p (candidates, EDGE_SUCC(bb,0)->dest->index))
2070 continue;
2072 new_bb = duplicate_block (EDGE_SUCC(bb,0)->dest, EDGE_SUCC(bb,0));
2073 new_bb->rbi->next = bb->rbi->next;
2074 bb->rbi->next = new_bb;
2075 new_bb->rbi->visited = 1;
2078 done:
2079 cfg_layout_finalize ();
2081 BITMAP_XFREE (candidates);
2083 timevar_pop (TV_REORDER_BLOCKS);
2086 /* This function is the main 'entrance' for the optimization that
2087 partitions hot and cold basic blocks into separate sections of the
2088 .o file (to improve performance and cache locality). Ideally it
2089 would be called after all optimizations that rearrange the CFG have
2090 been called. However part of this optimization may introduce new
2091 register usage, so it must be called before register allocation has
2092 occurred. This means that this optimization is actually called
2093 well before the optimization that reorders basic blocks (see
2094 function above).
2096 This optimization checks the feedback information to determine
2097 which basic blocks are hot/cold and causes reorder_basic_blocks to
2098 add NOTE_INSN_UNLIKELY_EXECUTED_CODE to non-hot basic blocks. The
2099 presence or absence of this note is later used for writing out
2100 sections in the .o file. Because hot and cold sections can be
2101 arbitrarily large (within the bounds of memory), far beyond the
2102 size of a single function, it is necessary to fix up all edges that
2103 cross section boundaries, to make sure the instructions used can
2104 actually span the required distance. The fixes are described
2105 below.
2107 Fall-through edges must be changed into jumps; it is not safe or
2108 legal to fall through across a section boundary. Whenever a
2109 fall-through edge crossing a section boundary is encountered, a new
2110 basic block is inserted (in the same section as the fall-through
2111 source), and the fall through edge is redirected to the new basic
2112 block. The new basic block contains an unconditional jump to the
2113 original fall-through target. (If the unconditional jump is
2114 insufficient to cross section boundaries, that is dealt with a
2115 little later, see below).
2117 In order to deal with architectures that have short conditional
2118 branches (which cannot span all of memory) we take any conditional
2119 jump that attempts to cross a section boundary and add a level of
2120 indirection: it becomes a conditional jump to a new basic block, in
2121 the same section. The new basic block contains an unconditional
2122 jump to the original target, in the other section.
2124 For those architectures whose unconditional branch is also
2125 incapable of reaching all of memory, those unconditional jumps are
2126 converted into indirect jumps, through a register.
2128 IMPORTANT NOTE: This optimization causes some messy interactions
2129 with the cfg cleanup optimizations; those optimizations want to
2130 merge blocks wherever possible, and to collapse indirect jump
2131 sequences (change "A jumps to B jumps to C" directly into "A jumps
2132 to C"). Those optimizations can undo the jump fixes that
2133 partitioning is required to make (see above), in order to ensure
2134 that jumps attempting to cross section boundaries are really able
2135 to cover whatever distance the jump requires (on many architectures
2136 conditional or unconditional jumps are not able to reach all of
2137 memory). Therefore tests have to be inserted into each such
2138 optimization to make sure that it does not undo stuff necessary to
2139 cross partition boundaries. This would be much less of a problem
2140 if we could perform this optimization later in the compilation, but
2141 unfortunately the fact that we may need to create indirect jumps
2142 (through registers) requires that this optimization be performed
2143 before register allocation. */
2145 void
2146 partition_hot_cold_basic_blocks (void)
2148 basic_block cur_bb;
2149 edge *crossing_edges;
2150 int n_crossing_edges;
2151 int max_edges = 2 * last_basic_block;
2153 if (n_basic_blocks <= 1)
2154 return;
2156 crossing_edges = xcalloc (max_edges, sizeof (edge));
2158 cfg_layout_initialize (0);
2160 FOR_EACH_BB (cur_bb)
2161 if (cur_bb->index >= 0
2162 && cur_bb->next_bb->index >= 0)
2163 cur_bb->rbi->next = cur_bb->next_bb;
2165 find_rarely_executed_basic_blocks_and_crossing_edges (crossing_edges,
2166 &n_crossing_edges,
2167 &max_edges);
2169 if (n_crossing_edges > 0)
2170 fix_edges_for_rarely_executed_code (crossing_edges, n_crossing_edges);
2172 free (crossing_edges);
2174 cfg_layout_finalize();