* tree-cfg.c (make_goto_expr_edges): Don't use error_mark_node.
[official-gcc.git] / gcc / bb-reorder.c
bloba94c9e4e4688fdfdd65362e4f396e6d5762e2c11
1 /* Basic block reordering routines for the GNU compiler.
2 Copyright (C) 2000, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
19 02111-1307, USA. */
21 /* This (greedy) algorithm constructs traces in several rounds.
22 The construction starts from "seeds". The seed for the first round
23 is the entry point of function. When there are more than one seed
24 that one is selected first that has the lowest key in the heap
25 (see function bb_to_key). Then the algorithm repeatedly adds the most
26 probable successor to the end of a trace. Finally it connects the traces.
28 There are two parameters: Branch Threshold and Exec Threshold.
29 If the edge to a successor of the actual basic block is lower than
30 Branch Threshold or the frequency of the successor is lower than
31 Exec Threshold the successor will be the seed in one of the next rounds.
32 Each round has these parameters lower than the previous one.
33 The last round has to have these parameters set to zero
34 so that the remaining blocks are picked up.
36 The algorithm selects the most probable successor from all unvisited
37 successors and successors that have been added to this trace.
38 The other successors (that has not been "sent" to the next round) will be
39 other seeds for this round and the secondary traces will start in them.
40 If the successor has not been visited in this trace it is added to the trace
41 (however, there is some heuristic for simple branches).
42 If the successor has been visited in this trace the loop has been found.
43 If the loop has many iterations the loop is rotated so that the
44 source block of the most probable edge going out from the loop
45 is the last block of the trace.
46 If the loop has few iterations and there is no edge from the last block of
47 the loop going out from loop the loop header is duplicated.
48 Finally, the construction of the trace is terminated.
50 When connecting traces it first checks whether there is an edge from the
51 last block of one trace to the first block of another trace.
52 When there are still some unconnected traces it checks whether there exists
53 a basic block BB such that BB is a successor of the last bb of one trace
54 and BB is a predecessor of the first block of another trace. In this case,
55 BB is duplicated and the traces are connected through this duplicate.
56 The rest of traces are simply connected so there will be a jump to the
57 beginning of the rest of trace.
60 References:
62 "Software Trace Cache"
63 A. Ramirez, J. Larriba-Pey, C. Navarro, J. Torrellas and M. Valero; 1999
64 http://citeseer.nj.nec.com/15361.html
68 #include "config.h"
69 #include "system.h"
70 #include "coretypes.h"
71 #include "tm.h"
72 #include "rtl.h"
73 #include "regs.h"
74 #include "flags.h"
75 #include "timevar.h"
76 #include "output.h"
77 #include "cfglayout.h"
78 #include "fibheap.h"
79 #include "target.h"
80 #include "function.h"
81 #include "tm_p.h"
82 #include "obstack.h"
83 #include "expr.h"
84 #include "params.h"
86 /* The number of rounds. In most cases there will only be 4 rounds, but
87 when partitioning hot and cold basic blocks into separate sections of
88 the .o file there will be an extra round.*/
89 #define N_ROUNDS 5
91 /* Stubs in case we don't have a return insn.
92 We have to check at runtime too, not only compiletime. */
94 #ifndef HAVE_return
95 #define HAVE_return 0
96 #define gen_return() NULL_RTX
97 #endif
100 /* Branch thresholds in thousandths (per mille) of the REG_BR_PROB_BASE. */
101 static int branch_threshold[N_ROUNDS] = {400, 200, 100, 0, 0};
103 /* Exec thresholds in thousandths (per mille) of the frequency of bb 0. */
104 static int exec_threshold[N_ROUNDS] = {500, 200, 50, 0, 0};
106 /* If edge frequency is lower than DUPLICATION_THRESHOLD per mille of entry
107 block the edge destination is not duplicated while connecting traces. */
108 #define DUPLICATION_THRESHOLD 100
110 /* Length of unconditional jump instruction. */
111 static int uncond_jump_length;
113 /* Structure to hold needed information for each basic block. */
114 typedef struct bbro_basic_block_data_def
116 /* Which trace is the bb start of (-1 means it is not a start of a trace). */
117 int start_of_trace;
119 /* Which trace is the bb end of (-1 means it is not an end of a trace). */
120 int end_of_trace;
122 /* Which heap is BB in (if any)? */
123 fibheap_t heap;
125 /* Which heap node is BB in (if any)? */
126 fibnode_t node;
127 } bbro_basic_block_data;
129 /* The current size of the following dynamic array. */
130 static int array_size;
132 /* The array which holds needed information for basic blocks. */
133 static bbro_basic_block_data *bbd;
135 /* To avoid frequent reallocation the size of arrays is greater than needed,
136 the number of elements is (not less than) 1.25 * size_wanted. */
137 #define GET_ARRAY_SIZE(X) ((((X) / 4) + 1) * 5)
139 /* Free the memory and set the pointer to NULL. */
140 #define FREE(P) (gcc_assert (P), free (P), P = 0)
142 /* Structure for holding information about a trace. */
143 struct trace
145 /* First and last basic block of the trace. */
146 basic_block first, last;
148 /* The round of the STC creation which this trace was found in. */
149 int round;
151 /* The length (i.e. the number of basic blocks) of the trace. */
152 int length;
155 /* Maximum frequency and count of one of the entry blocks. */
156 static int max_entry_frequency;
157 static gcov_type max_entry_count;
159 /* Local function prototypes. */
160 static void find_traces (int *, struct trace *);
161 static basic_block rotate_loop (edge, struct trace *, int);
162 static void mark_bb_visited (basic_block, int);
163 static void find_traces_1_round (int, int, gcov_type, struct trace *, int *,
164 int, fibheap_t *, int);
165 static basic_block copy_bb (basic_block, edge, basic_block, int);
166 static fibheapkey_t bb_to_key (basic_block);
167 static bool better_edge_p (basic_block, edge, int, int, int, int, edge);
168 static void connect_traces (int, struct trace *);
169 static bool copy_bb_p (basic_block, int);
170 static int get_uncond_jump_length (void);
171 static bool push_to_next_round_p (basic_block, int, int, int, gcov_type);
172 static void add_unlikely_executed_notes (void);
173 static void find_rarely_executed_basic_blocks_and_crossing_edges (edge *,
174 int *,
175 int *);
176 static void mark_bb_for_unlikely_executed_section (basic_block);
177 static void add_labels_and_missing_jumps (edge *, int);
178 static void add_reg_crossing_jump_notes (void);
179 static void fix_up_fall_thru_edges (void);
180 static void fix_edges_for_rarely_executed_code (edge *, int);
181 static void fix_crossing_conditional_branches (void);
182 static void fix_crossing_unconditional_branches (void);
184 /* Check to see if bb should be pushed into the next round of trace
185 collections or not. Reasons for pushing the block forward are 1).
186 If the block is cold, we are doing partitioning, and there will be
187 another round (cold partition blocks are not supposed to be
188 collected into traces until the very last round); or 2). There will
189 be another round, and the basic block is not "hot enough" for the
190 current round of trace collection. */
192 static bool
193 push_to_next_round_p (basic_block bb, int round, int number_of_rounds,
194 int exec_th, gcov_type count_th)
196 bool there_exists_another_round;
197 bool cold_block;
198 bool block_not_hot_enough;
199 bool next_round_is_last;
201 there_exists_another_round = round < number_of_rounds - 1;
202 next_round_is_last = round + 1 == number_of_rounds - 1;
204 cold_block = (flag_reorder_blocks_and_partition
205 && BB_PARTITION (bb) == BB_COLD_PARTITION);
207 block_not_hot_enough = (bb->frequency < exec_th
208 || bb->count < count_th
209 || probably_never_executed_bb_p (bb));
211 if (flag_reorder_blocks_and_partition
212 && next_round_is_last
213 && BB_PARTITION (bb) != BB_COLD_PARTITION)
214 return false;
215 else if (there_exists_another_round
216 && (cold_block || block_not_hot_enough))
217 return true;
218 else
219 return false;
222 /* Find the traces for Software Trace Cache. Chain each trace through
223 RBI()->next. Store the number of traces to N_TRACES and description of
224 traces to TRACES. */
226 static void
227 find_traces (int *n_traces, struct trace *traces)
229 int i;
230 int number_of_rounds;
231 edge e;
232 edge_iterator ei;
233 fibheap_t heap;
235 /* Add one extra round of trace collection when partitioning hot/cold
236 basic blocks into separate sections. The last round is for all the
237 cold blocks (and ONLY the cold blocks). */
239 number_of_rounds = N_ROUNDS - 1;
240 if (flag_reorder_blocks_and_partition)
241 number_of_rounds = N_ROUNDS;
243 /* Insert entry points of function into heap. */
244 heap = fibheap_new ();
245 max_entry_frequency = 0;
246 max_entry_count = 0;
247 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
249 bbd[e->dest->index].heap = heap;
250 bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest),
251 e->dest);
252 if (e->dest->frequency > max_entry_frequency)
253 max_entry_frequency = e->dest->frequency;
254 if (e->dest->count > max_entry_count)
255 max_entry_count = e->dest->count;
258 /* Find the traces. */
259 for (i = 0; i < number_of_rounds; i++)
261 gcov_type count_threshold;
263 if (dump_file)
264 fprintf (dump_file, "STC - round %d\n", i + 1);
266 if (max_entry_count < INT_MAX / 1000)
267 count_threshold = max_entry_count * exec_threshold[i] / 1000;
268 else
269 count_threshold = max_entry_count / 1000 * exec_threshold[i];
271 find_traces_1_round (REG_BR_PROB_BASE * branch_threshold[i] / 1000,
272 max_entry_frequency * exec_threshold[i] / 1000,
273 count_threshold, traces, n_traces, i, &heap,
274 number_of_rounds);
276 fibheap_delete (heap);
278 if (dump_file)
280 for (i = 0; i < *n_traces; i++)
282 basic_block bb;
283 fprintf (dump_file, "Trace %d (round %d): ", i + 1,
284 traces[i].round + 1);
285 for (bb = traces[i].first; bb != traces[i].last; bb = bb->rbi->next)
286 fprintf (dump_file, "%d [%d] ", bb->index, bb->frequency);
287 fprintf (dump_file, "%d [%d]\n", bb->index, bb->frequency);
289 fflush (dump_file);
293 /* Rotate loop whose back edge is BACK_EDGE in the tail of trace TRACE
294 (with sequential number TRACE_N). */
296 static basic_block
297 rotate_loop (edge back_edge, struct trace *trace, int trace_n)
299 basic_block bb;
301 /* Information about the best end (end after rotation) of the loop. */
302 basic_block best_bb = NULL;
303 edge best_edge = NULL;
304 int best_freq = -1;
305 gcov_type best_count = -1;
306 /* The best edge is preferred when its destination is not visited yet
307 or is a start block of some trace. */
308 bool is_preferred = false;
310 /* Find the most frequent edge that goes out from current trace. */
311 bb = back_edge->dest;
314 edge e;
315 edge_iterator ei;
317 FOR_EACH_EDGE (e, ei, bb->succs)
318 if (e->dest != EXIT_BLOCK_PTR
319 && e->dest->rbi->visited != trace_n
320 && (e->flags & EDGE_CAN_FALLTHRU)
321 && !(e->flags & EDGE_COMPLEX))
323 if (is_preferred)
325 /* The best edge is preferred. */
326 if (!e->dest->rbi->visited
327 || bbd[e->dest->index].start_of_trace >= 0)
329 /* The current edge E is also preferred. */
330 int freq = EDGE_FREQUENCY (e);
331 if (freq > best_freq || e->count > best_count)
333 best_freq = freq;
334 best_count = e->count;
335 best_edge = e;
336 best_bb = bb;
340 else
342 if (!e->dest->rbi->visited
343 || bbd[e->dest->index].start_of_trace >= 0)
345 /* The current edge E is preferred. */
346 is_preferred = true;
347 best_freq = EDGE_FREQUENCY (e);
348 best_count = e->count;
349 best_edge = e;
350 best_bb = bb;
352 else
354 int freq = EDGE_FREQUENCY (e);
355 if (!best_edge || freq > best_freq || e->count > best_count)
357 best_freq = freq;
358 best_count = e->count;
359 best_edge = e;
360 best_bb = bb;
365 bb = bb->rbi->next;
367 while (bb != back_edge->dest);
369 if (best_bb)
371 /* Rotate the loop so that the BEST_EDGE goes out from the last block of
372 the trace. */
373 if (back_edge->dest == trace->first)
375 trace->first = best_bb->rbi->next;
377 else
379 basic_block prev_bb;
381 for (prev_bb = trace->first;
382 prev_bb->rbi->next != back_edge->dest;
383 prev_bb = prev_bb->rbi->next)
385 prev_bb->rbi->next = best_bb->rbi->next;
387 /* Try to get rid of uncond jump to cond jump. */
388 if (single_succ_p (prev_bb))
390 basic_block header = single_succ (prev_bb);
392 /* Duplicate HEADER if it is a small block containing cond jump
393 in the end. */
394 if (any_condjump_p (BB_END (header)) && copy_bb_p (header, 0)
395 && !find_reg_note (BB_END (header), REG_CROSSING_JUMP,
396 NULL_RTX))
397 copy_bb (header, single_succ_edge (prev_bb), prev_bb, trace_n);
401 else
403 /* We have not found suitable loop tail so do no rotation. */
404 best_bb = back_edge->src;
406 best_bb->rbi->next = NULL;
407 return best_bb;
410 /* This function marks BB that it was visited in trace number TRACE. */
412 static void
413 mark_bb_visited (basic_block bb, int trace)
415 bb->rbi->visited = trace;
416 if (bbd[bb->index].heap)
418 fibheap_delete_node (bbd[bb->index].heap, bbd[bb->index].node);
419 bbd[bb->index].heap = NULL;
420 bbd[bb->index].node = NULL;
424 /* One round of finding traces. Find traces for BRANCH_TH and EXEC_TH i.e. do
425 not include basic blocks their probability is lower than BRANCH_TH or their
426 frequency is lower than EXEC_TH into traces (or count is lower than
427 COUNT_TH). It stores the new traces into TRACES and modifies the number of
428 traces *N_TRACES. Sets the round (which the trace belongs to) to ROUND. It
429 expects that starting basic blocks are in *HEAP and at the end it deletes
430 *HEAP and stores starting points for the next round into new *HEAP. */
432 static void
433 find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
434 struct trace *traces, int *n_traces, int round,
435 fibheap_t *heap, int number_of_rounds)
437 /* The following variable refers to the last round in which non-"cold"
438 blocks may be collected into a trace. */
440 int last_round = N_ROUNDS - 1;
442 /* Heap for discarded basic blocks which are possible starting points for
443 the next round. */
444 fibheap_t new_heap = fibheap_new ();
446 while (!fibheap_empty (*heap))
448 basic_block bb;
449 struct trace *trace;
450 edge best_edge, e;
451 fibheapkey_t key;
452 edge_iterator ei;
454 bb = fibheap_extract_min (*heap);
455 bbd[bb->index].heap = NULL;
456 bbd[bb->index].node = NULL;
458 if (dump_file)
459 fprintf (dump_file, "Getting bb %d\n", bb->index);
461 /* If the BB's frequency is too low send BB to the next round. When
462 partitioning hot/cold blocks into separate sections, make sure all
463 the cold blocks (and ONLY the cold blocks) go into the (extra) final
464 round. */
466 if (push_to_next_round_p (bb, round, number_of_rounds, exec_th,
467 count_th))
469 int key = bb_to_key (bb);
470 bbd[bb->index].heap = new_heap;
471 bbd[bb->index].node = fibheap_insert (new_heap, key, bb);
473 if (dump_file)
474 fprintf (dump_file,
475 " Possible start point of next round: %d (key: %d)\n",
476 bb->index, key);
477 continue;
480 trace = traces + *n_traces;
481 trace->first = bb;
482 trace->round = round;
483 trace->length = 0;
484 (*n_traces)++;
488 int prob, freq;
489 bool ends_in_call;
491 /* The probability and frequency of the best edge. */
492 int best_prob = INT_MIN / 2;
493 int best_freq = INT_MIN / 2;
495 best_edge = NULL;
496 mark_bb_visited (bb, *n_traces);
497 trace->length++;
499 if (dump_file)
500 fprintf (dump_file, "Basic block %d was visited in trace %d\n",
501 bb->index, *n_traces - 1);
503 ends_in_call = block_ends_with_call_p (bb);
505 /* Select the successor that will be placed after BB. */
506 FOR_EACH_EDGE (e, ei, bb->succs)
508 gcc_assert (!(e->flags & EDGE_FAKE));
510 if (e->dest == EXIT_BLOCK_PTR)
511 continue;
513 if (e->dest->rbi->visited
514 && e->dest->rbi->visited != *n_traces)
515 continue;
517 if (BB_PARTITION (e->dest) == BB_COLD_PARTITION
518 && round < last_round)
519 continue;
521 prob = e->probability;
522 freq = EDGE_FREQUENCY (e);
524 /* The only sensible preference for a call instruction is the
525 fallthru edge. Don't bother selecting anything else. */
526 if (ends_in_call)
528 if (e->flags & EDGE_CAN_FALLTHRU)
530 best_edge = e;
531 best_prob = prob;
532 best_freq = freq;
534 continue;
537 /* Edge that cannot be fallthru or improbable or infrequent
538 successor (i.e. it is unsuitable successor). */
539 if (!(e->flags & EDGE_CAN_FALLTHRU) || (e->flags & EDGE_COMPLEX)
540 || prob < branch_th || freq < exec_th || e->count < count_th)
541 continue;
543 /* If partitioning hot/cold basic blocks, don't consider edges
544 that cross section boundaries. */
546 if (better_edge_p (bb, e, prob, freq, best_prob, best_freq,
547 best_edge))
549 best_edge = e;
550 best_prob = prob;
551 best_freq = freq;
555 /* If the best destination has multiple predecessors, and can be
556 duplicated cheaper than a jump, don't allow it to be added
557 to a trace. We'll duplicate it when connecting traces. */
558 if (best_edge && EDGE_COUNT (best_edge->dest->preds) >= 2
559 && copy_bb_p (best_edge->dest, 0))
560 best_edge = NULL;
562 /* Add all non-selected successors to the heaps. */
563 FOR_EACH_EDGE (e, ei, bb->succs)
565 if (e == best_edge
566 || e->dest == EXIT_BLOCK_PTR
567 || e->dest->rbi->visited)
568 continue;
570 key = bb_to_key (e->dest);
572 if (bbd[e->dest->index].heap)
574 /* E->DEST is already in some heap. */
575 if (key != bbd[e->dest->index].node->key)
577 if (dump_file)
579 fprintf (dump_file,
580 "Changing key for bb %d from %ld to %ld.\n",
581 e->dest->index,
582 (long) bbd[e->dest->index].node->key,
583 key);
585 fibheap_replace_key (bbd[e->dest->index].heap,
586 bbd[e->dest->index].node, key);
589 else
591 fibheap_t which_heap = *heap;
593 prob = e->probability;
594 freq = EDGE_FREQUENCY (e);
596 if (!(e->flags & EDGE_CAN_FALLTHRU)
597 || (e->flags & EDGE_COMPLEX)
598 || prob < branch_th || freq < exec_th
599 || e->count < count_th)
601 /* When partitioning hot/cold basic blocks, make sure
602 the cold blocks (and only the cold blocks) all get
603 pushed to the last round of trace collection. */
605 if (push_to_next_round_p (e->dest, round,
606 number_of_rounds,
607 exec_th, count_th))
608 which_heap = new_heap;
611 bbd[e->dest->index].heap = which_heap;
612 bbd[e->dest->index].node = fibheap_insert (which_heap,
613 key, e->dest);
615 if (dump_file)
617 fprintf (dump_file,
618 " Possible start of %s round: %d (key: %ld)\n",
619 (which_heap == new_heap) ? "next" : "this",
620 e->dest->index, (long) key);
626 if (best_edge) /* Suitable successor was found. */
628 if (best_edge->dest->rbi->visited == *n_traces)
630 /* We do nothing with one basic block loops. */
631 if (best_edge->dest != bb)
633 if (EDGE_FREQUENCY (best_edge)
634 > 4 * best_edge->dest->frequency / 5)
636 /* The loop has at least 4 iterations. If the loop
637 header is not the first block of the function
638 we can rotate the loop. */
640 if (best_edge->dest != ENTRY_BLOCK_PTR->next_bb)
642 if (dump_file)
644 fprintf (dump_file,
645 "Rotating loop %d - %d\n",
646 best_edge->dest->index, bb->index);
648 bb->rbi->next = best_edge->dest;
649 bb = rotate_loop (best_edge, trace, *n_traces);
652 else
654 /* The loop has less than 4 iterations. */
656 if (single_succ_p (bb)
657 && copy_bb_p (best_edge->dest, !optimize_size))
659 bb = copy_bb (best_edge->dest, best_edge, bb,
660 *n_traces);
665 /* Terminate the trace. */
666 break;
668 else
670 /* Check for a situation
678 where
679 EDGE_FREQUENCY (AB) + EDGE_FREQUENCY (BC)
680 >= EDGE_FREQUENCY (AC).
681 (i.e. 2 * B->frequency >= EDGE_FREQUENCY (AC) )
682 Best ordering is then A B C.
684 This situation is created for example by:
686 if (A) B;
691 FOR_EACH_EDGE (e, ei, bb->succs)
692 if (e != best_edge
693 && (e->flags & EDGE_CAN_FALLTHRU)
694 && !(e->flags & EDGE_COMPLEX)
695 && !e->dest->rbi->visited
696 && single_pred_p (e->dest)
697 && !(e->flags & EDGE_CROSSING)
698 && single_succ_p (e->dest) == 1
699 && (single_succ_edge (e->dest)->flags
700 & EDGE_CAN_FALLTHRU)
701 && !(single_succ_edge (e->dest)->flags & EDGE_COMPLEX)
702 && single_succ (e->dest) == best_edge->dest
703 && 2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge))
705 best_edge = e;
706 if (dump_file)
707 fprintf (dump_file, "Selecting BB %d\n",
708 best_edge->dest->index);
709 break;
712 bb->rbi->next = best_edge->dest;
713 bb = best_edge->dest;
717 while (best_edge);
718 trace->last = bb;
719 bbd[trace->first->index].start_of_trace = *n_traces - 1;
720 bbd[trace->last->index].end_of_trace = *n_traces - 1;
722 /* The trace is terminated so we have to recount the keys in heap
723 (some block can have a lower key because now one of its predecessors
724 is an end of the trace). */
725 FOR_EACH_EDGE (e, ei, bb->succs)
727 if (e->dest == EXIT_BLOCK_PTR
728 || e->dest->rbi->visited)
729 continue;
731 if (bbd[e->dest->index].heap)
733 key = bb_to_key (e->dest);
734 if (key != bbd[e->dest->index].node->key)
736 if (dump_file)
738 fprintf (dump_file,
739 "Changing key for bb %d from %ld to %ld.\n",
740 e->dest->index,
741 (long) bbd[e->dest->index].node->key, key);
743 fibheap_replace_key (bbd[e->dest->index].heap,
744 bbd[e->dest->index].node,
745 key);
751 fibheap_delete (*heap);
753 /* "Return" the new heap. */
754 *heap = new_heap;
757 /* Create a duplicate of the basic block OLD_BB and redirect edge E to it, add
758 it to trace after BB, mark OLD_BB visited and update pass' data structures
759 (TRACE is a number of trace which OLD_BB is duplicated to). */
761 static basic_block
762 copy_bb (basic_block old_bb, edge e, basic_block bb, int trace)
764 basic_block new_bb;
766 new_bb = duplicate_block (old_bb, e);
767 BB_COPY_PARTITION (new_bb, old_bb);
769 gcc_assert (e->dest == new_bb);
770 gcc_assert (!e->dest->rbi->visited);
772 if (dump_file)
773 fprintf (dump_file,
774 "Duplicated bb %d (created bb %d)\n",
775 old_bb->index, new_bb->index);
776 new_bb->rbi->visited = trace;
777 new_bb->rbi->next = bb->rbi->next;
778 bb->rbi->next = new_bb;
780 if (new_bb->index >= array_size || last_basic_block > array_size)
782 int i;
783 int new_size;
785 new_size = MAX (last_basic_block, new_bb->index + 1);
786 new_size = GET_ARRAY_SIZE (new_size);
787 bbd = xrealloc (bbd, new_size * sizeof (bbro_basic_block_data));
788 for (i = array_size; i < new_size; i++)
790 bbd[i].start_of_trace = -1;
791 bbd[i].end_of_trace = -1;
792 bbd[i].heap = NULL;
793 bbd[i].node = NULL;
795 array_size = new_size;
797 if (dump_file)
799 fprintf (dump_file,
800 "Growing the dynamic array to %d elements.\n",
801 array_size);
805 return new_bb;
808 /* Compute and return the key (for the heap) of the basic block BB. */
810 static fibheapkey_t
811 bb_to_key (basic_block bb)
813 edge e;
814 edge_iterator ei;
815 int priority = 0;
817 /* Do not start in probably never executed blocks. */
819 if (BB_PARTITION (bb) == BB_COLD_PARTITION
820 || probably_never_executed_bb_p (bb))
821 return BB_FREQ_MAX;
823 /* Prefer blocks whose predecessor is an end of some trace
824 or whose predecessor edge is EDGE_DFS_BACK. */
825 FOR_EACH_EDGE (e, ei, bb->preds)
827 if ((e->src != ENTRY_BLOCK_PTR && bbd[e->src->index].end_of_trace >= 0)
828 || (e->flags & EDGE_DFS_BACK))
830 int edge_freq = EDGE_FREQUENCY (e);
832 if (edge_freq > priority)
833 priority = edge_freq;
837 if (priority)
838 /* The block with priority should have significantly lower key. */
839 return -(100 * BB_FREQ_MAX + 100 * priority + bb->frequency);
840 return -bb->frequency;
843 /* Return true when the edge E from basic block BB is better than the temporary
844 best edge (details are in function). The probability of edge E is PROB. The
845 frequency of the successor is FREQ. The current best probability is
846 BEST_PROB, the best frequency is BEST_FREQ.
847 The edge is considered to be equivalent when PROB does not differ much from
848 BEST_PROB; similarly for frequency. */
850 static bool
851 better_edge_p (basic_block bb, edge e, int prob, int freq, int best_prob,
852 int best_freq, edge cur_best_edge)
854 bool is_better_edge;
856 /* The BEST_* values do not have to be best, but can be a bit smaller than
857 maximum values. */
858 int diff_prob = best_prob / 10;
859 int diff_freq = best_freq / 10;
861 if (prob > best_prob + diff_prob)
862 /* The edge has higher probability than the temporary best edge. */
863 is_better_edge = true;
864 else if (prob < best_prob - diff_prob)
865 /* The edge has lower probability than the temporary best edge. */
866 is_better_edge = false;
867 else if (freq < best_freq - diff_freq)
868 /* The edge and the temporary best edge have almost equivalent
869 probabilities. The higher frequency of a successor now means
870 that there is another edge going into that successor.
871 This successor has lower frequency so it is better. */
872 is_better_edge = true;
873 else if (freq > best_freq + diff_freq)
874 /* This successor has higher frequency so it is worse. */
875 is_better_edge = false;
876 else if (e->dest->prev_bb == bb)
877 /* The edges have equivalent probabilities and the successors
878 have equivalent frequencies. Select the previous successor. */
879 is_better_edge = true;
880 else
881 is_better_edge = false;
883 /* If we are doing hot/cold partitioning, make sure that we always favor
884 non-crossing edges over crossing edges. */
886 if (!is_better_edge
887 && flag_reorder_blocks_and_partition
888 && cur_best_edge
889 && (cur_best_edge->flags & EDGE_CROSSING)
890 && !(e->flags & EDGE_CROSSING))
891 is_better_edge = true;
893 return is_better_edge;
896 /* Connect traces in array TRACES, N_TRACES is the count of traces. */
898 static void
899 connect_traces (int n_traces, struct trace *traces)
901 int i;
902 int unconnected_hot_trace_count = 0;
903 bool cold_connected = true;
904 bool *connected;
905 bool *cold_traces;
906 int last_trace;
907 int freq_threshold;
908 gcov_type count_threshold;
910 freq_threshold = max_entry_frequency * DUPLICATION_THRESHOLD / 1000;
911 if (max_entry_count < INT_MAX / 1000)
912 count_threshold = max_entry_count * DUPLICATION_THRESHOLD / 1000;
913 else
914 count_threshold = max_entry_count / 1000 * DUPLICATION_THRESHOLD;
916 connected = xcalloc (n_traces, sizeof (bool));
917 last_trace = -1;
919 /* If we are partitioning hot/cold basic blocks, mark the cold
920 traces as already connected, to remove them from consideration
921 for connection to the hot traces. After the hot traces have all
922 been connected (determined by "unconnected_hot_trace_count"), we
923 will go back and connect the cold traces. */
925 cold_traces = xcalloc (n_traces, sizeof (bool));
927 if (flag_reorder_blocks_and_partition)
928 for (i = 0; i < n_traces; i++)
930 if (BB_PARTITION (traces[i].first) == BB_COLD_PARTITION)
932 connected[i] = true;
933 cold_traces[i] = true;
934 cold_connected = false;
936 else
937 unconnected_hot_trace_count++;
940 for (i = 0; i < n_traces || !cold_connected ; i++)
942 int t = i;
943 int t2;
944 edge e, best;
945 int best_len;
947 /* If we are partitioning hot/cold basic blocks, check to see
948 if all the hot traces have been connected. If so, go back
949 and mark the cold traces as unconnected so we can connect
950 them up too. Re-set "i" to the first (unconnected) cold
951 trace. Use flag "cold_connected" to make sure we don't do
952 this step more than once. */
954 if (flag_reorder_blocks_and_partition
955 && (i >= n_traces || unconnected_hot_trace_count <= 0)
956 && !cold_connected)
958 int j;
959 int first_cold_trace = -1;
961 for (j = 0; j < n_traces; j++)
962 if (cold_traces[j])
964 connected[j] = false;
965 if (first_cold_trace == -1)
966 first_cold_trace = j;
968 i = t = first_cold_trace;
969 cold_connected = true;
972 if (connected[t])
973 continue;
975 connected[t] = true;
976 if (unconnected_hot_trace_count > 0)
977 unconnected_hot_trace_count--;
979 /* Find the predecessor traces. */
980 for (t2 = t; t2 > 0;)
982 edge_iterator ei;
983 best = NULL;
984 best_len = 0;
985 FOR_EACH_EDGE (e, ei, traces[t2].first->preds)
987 int si = e->src->index;
989 if (e->src != ENTRY_BLOCK_PTR
990 && (e->flags & EDGE_CAN_FALLTHRU)
991 && !(e->flags & EDGE_COMPLEX)
992 && bbd[si].end_of_trace >= 0
993 && !connected[bbd[si].end_of_trace]
994 && (!best
995 || e->probability > best->probability
996 || (e->probability == best->probability
997 && traces[bbd[si].end_of_trace].length > best_len)))
999 best = e;
1000 best_len = traces[bbd[si].end_of_trace].length;
1003 if (best)
1005 best->src->rbi->next = best->dest;
1006 t2 = bbd[best->src->index].end_of_trace;
1007 connected[t2] = true;
1009 if (unconnected_hot_trace_count > 0)
1010 unconnected_hot_trace_count--;
1012 if (dump_file)
1014 fprintf (dump_file, "Connection: %d %d\n",
1015 best->src->index, best->dest->index);
1018 else
1019 break;
1022 if (last_trace >= 0)
1023 traces[last_trace].last->rbi->next = traces[t2].first;
1024 last_trace = t;
1026 /* Find the successor traces. */
1027 while (1)
1029 /* Find the continuation of the chain. */
1030 edge_iterator ei;
1031 best = NULL;
1032 best_len = 0;
1033 FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1035 int di = e->dest->index;
1037 if (e->dest != EXIT_BLOCK_PTR
1038 && (e->flags & EDGE_CAN_FALLTHRU)
1039 && !(e->flags & EDGE_COMPLEX)
1040 && bbd[di].start_of_trace >= 0
1041 && !connected[bbd[di].start_of_trace]
1042 && (!best
1043 || e->probability > best->probability
1044 || (e->probability == best->probability
1045 && traces[bbd[di].start_of_trace].length > best_len)))
1047 best = e;
1048 best_len = traces[bbd[di].start_of_trace].length;
1052 if (best)
1054 if (dump_file)
1056 fprintf (dump_file, "Connection: %d %d\n",
1057 best->src->index, best->dest->index);
1059 t = bbd[best->dest->index].start_of_trace;
1060 traces[last_trace].last->rbi->next = traces[t].first;
1061 connected[t] = true;
1062 if (unconnected_hot_trace_count > 0)
1063 unconnected_hot_trace_count--;
1064 last_trace = t;
1066 else
1068 /* Try to connect the traces by duplication of 1 block. */
1069 edge e2;
1070 basic_block next_bb = NULL;
1071 bool try_copy = false;
1073 FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1074 if (e->dest != EXIT_BLOCK_PTR
1075 && (e->flags & EDGE_CAN_FALLTHRU)
1076 && !(e->flags & EDGE_COMPLEX)
1077 && (!best || e->probability > best->probability))
1079 edge_iterator ei;
1080 edge best2 = NULL;
1081 int best2_len = 0;
1083 /* If the destination is a start of a trace which is only
1084 one block long, then no need to search the successor
1085 blocks of the trace. Accept it. */
1086 if (bbd[e->dest->index].start_of_trace >= 0
1087 && traces[bbd[e->dest->index].start_of_trace].length
1088 == 1)
1090 best = e;
1091 try_copy = true;
1092 continue;
1095 FOR_EACH_EDGE (e2, ei, e->dest->succs)
1097 int di = e2->dest->index;
1099 if (e2->dest == EXIT_BLOCK_PTR
1100 || ((e2->flags & EDGE_CAN_FALLTHRU)
1101 && !(e2->flags & EDGE_COMPLEX)
1102 && bbd[di].start_of_trace >= 0
1103 && !connected[bbd[di].start_of_trace]
1104 && (EDGE_FREQUENCY (e2) >= freq_threshold)
1105 && (e2->count >= count_threshold)
1106 && (!best2
1107 || e2->probability > best2->probability
1108 || (e2->probability == best2->probability
1109 && traces[bbd[di].start_of_trace].length
1110 > best2_len))))
1112 best = e;
1113 best2 = e2;
1114 if (e2->dest != EXIT_BLOCK_PTR)
1115 best2_len = traces[bbd[di].start_of_trace].length;
1116 else
1117 best2_len = INT_MAX;
1118 next_bb = e2->dest;
1119 try_copy = true;
1124 if (flag_reorder_blocks_and_partition)
1125 try_copy = false;
1127 /* Copy tiny blocks always; copy larger blocks only when the
1128 edge is traversed frequently enough. */
1129 if (try_copy
1130 && copy_bb_p (best->dest,
1131 !optimize_size
1132 && EDGE_FREQUENCY (best) >= freq_threshold
1133 && best->count >= count_threshold))
1135 basic_block new_bb;
1137 if (dump_file)
1139 fprintf (dump_file, "Connection: %d %d ",
1140 traces[t].last->index, best->dest->index);
1141 if (!next_bb)
1142 fputc ('\n', dump_file);
1143 else if (next_bb == EXIT_BLOCK_PTR)
1144 fprintf (dump_file, "exit\n");
1145 else
1146 fprintf (dump_file, "%d\n", next_bb->index);
1149 new_bb = copy_bb (best->dest, best, traces[t].last, t);
1150 traces[t].last = new_bb;
1151 if (next_bb && next_bb != EXIT_BLOCK_PTR)
1153 t = bbd[next_bb->index].start_of_trace;
1154 traces[last_trace].last->rbi->next = traces[t].first;
1155 connected[t] = true;
1156 if (unconnected_hot_trace_count > 0)
1157 unconnected_hot_trace_count--;
1158 last_trace = t;
1160 else
1161 break; /* Stop finding the successor traces. */
1163 else
1164 break; /* Stop finding the successor traces. */
1169 if (dump_file)
1171 basic_block bb;
1173 fprintf (dump_file, "Final order:\n");
1174 for (bb = traces[0].first; bb; bb = bb->rbi->next)
1175 fprintf (dump_file, "%d ", bb->index);
1176 fprintf (dump_file, "\n");
1177 fflush (dump_file);
1180 FREE (connected);
1181 FREE (cold_traces);
1184 /* Return true when BB can and should be copied. CODE_MAY_GROW is true
1185 when code size is allowed to grow by duplication. */
1187 static bool
1188 copy_bb_p (basic_block bb, int code_may_grow)
1190 int size = 0;
1191 int max_size = uncond_jump_length;
1192 rtx insn;
1194 if (!bb->frequency)
1195 return false;
1196 if (EDGE_COUNT (bb->preds) < 2)
1197 return false;
1198 if (!can_duplicate_block_p (bb))
1199 return false;
1201 /* Avoid duplicating blocks which have many successors (PR/13430). */
1202 if (EDGE_COUNT (bb->succs) > 8)
1203 return false;
1205 if (code_may_grow && maybe_hot_bb_p (bb))
1206 max_size *= 8;
1208 FOR_BB_INSNS (bb, insn)
1210 if (INSN_P (insn))
1211 size += get_attr_length (insn);
1214 if (size <= max_size)
1215 return true;
1217 if (dump_file)
1219 fprintf (dump_file,
1220 "Block %d can't be copied because its size = %d.\n",
1221 bb->index, size);
1224 return false;
1227 /* Return the length of unconditional jump instruction. */
1229 static int
1230 get_uncond_jump_length (void)
1232 rtx label, jump;
1233 int length;
1235 label = emit_label_before (gen_label_rtx (), get_insns ());
1236 jump = emit_jump_insn (gen_jump (label));
1238 length = get_attr_length (jump);
1240 delete_insn (jump);
1241 delete_insn (label);
1242 return length;
1245 static void
1246 add_unlikely_executed_notes (void)
1248 basic_block bb;
1250 /* Add the UNLIKELY_EXECUTED_NOTES to each cold basic block. */
1252 FOR_EACH_BB (bb)
1253 if (BB_PARTITION (bb) == BB_COLD_PARTITION)
1254 mark_bb_for_unlikely_executed_section (bb);
1257 /* Find the basic blocks that are rarely executed and need to be moved to
1258 a separate section of the .o file (to cut down on paging and improve
1259 cache locality). */
1261 static void
1262 find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges,
1263 int *n_crossing_edges,
1264 int *max_idx)
1266 basic_block bb;
1267 bool has_hot_blocks = false;
1268 edge e;
1269 int i;
1270 edge_iterator ei;
1272 /* Mark which partition (hot/cold) each basic block belongs in. */
1274 FOR_EACH_BB (bb)
1276 if (probably_never_executed_bb_p (bb))
1277 BB_SET_PARTITION (bb, BB_COLD_PARTITION);
1278 else
1280 BB_SET_PARTITION (bb, BB_HOT_PARTITION);
1281 has_hot_blocks = true;
1285 /* Since all "hot" basic blocks will eventually be scheduled before all
1286 cold basic blocks, make *sure* the real function entry block is in
1287 the hot partition (if there is one). */
1289 if (has_hot_blocks)
1290 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
1291 if (e->dest->index >= 0)
1293 BB_SET_PARTITION (e->dest, BB_HOT_PARTITION);
1294 break;
1297 /* Mark every edge that crosses between sections. */
1299 i = 0;
1300 if (targetm.have_named_sections)
1302 FOR_EACH_BB (bb)
1303 FOR_EACH_EDGE (e, ei, bb->succs)
1305 if (e->src != ENTRY_BLOCK_PTR
1306 && e->dest != EXIT_BLOCK_PTR
1307 && BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1309 e->flags |= EDGE_CROSSING;
1310 if (i == *max_idx)
1312 *max_idx *= 2;
1313 crossing_edges = xrealloc (crossing_edges,
1314 (*max_idx) * sizeof (edge));
1316 crossing_edges[i++] = e;
1318 else
1319 e->flags &= ~EDGE_CROSSING;
1322 *n_crossing_edges = i;
1325 /* Add NOTE_INSN_UNLIKELY_EXECUTED_CODE to top of basic block. This note
1326 is later used to mark the basic block to be put in the
1327 unlikely-to-be-executed section of the .o file. */
1329 static void
1330 mark_bb_for_unlikely_executed_section (basic_block bb)
1332 rtx cur_insn;
1333 rtx insert_insn = NULL;
1334 rtx new_note;
1336 /* Insert new NOTE immediately after BASIC_BLOCK note. */
1338 for (cur_insn = BB_HEAD (bb); cur_insn != NEXT_INSN (BB_END (bb));
1339 cur_insn = NEXT_INSN (cur_insn))
1340 if (GET_CODE (cur_insn) == NOTE
1341 && NOTE_LINE_NUMBER (cur_insn) == NOTE_INSN_BASIC_BLOCK)
1343 insert_insn = cur_insn;
1344 break;
1347 /* If basic block does not contain a NOTE_INSN_BASIC_BLOCK, there is
1348 a major problem. */
1349 gcc_assert (insert_insn);
1351 /* Insert note and assign basic block number to it. */
1353 new_note = emit_note_after (NOTE_INSN_UNLIKELY_EXECUTED_CODE,
1354 insert_insn);
1355 NOTE_BASIC_BLOCK (new_note) = bb;
1358 /* If any destination of a crossing edge does not have a label, add label;
1359 Convert any fall-through crossing edges (for blocks that do not contain
1360 a jump) to unconditional jumps. */
1362 static void
1363 add_labels_and_missing_jumps (edge *crossing_edges, int n_crossing_edges)
1365 int i;
1366 basic_block src;
1367 basic_block dest;
1368 rtx label;
1369 rtx barrier;
1370 rtx new_jump;
1372 for (i=0; i < n_crossing_edges; i++)
1374 if (crossing_edges[i])
1376 src = crossing_edges[i]->src;
1377 dest = crossing_edges[i]->dest;
1379 /* Make sure dest has a label. */
1381 if (dest && (dest != EXIT_BLOCK_PTR))
1383 label = block_label (dest);
1385 /* Make sure source block ends with a jump. */
1387 if (src && (src != ENTRY_BLOCK_PTR))
1389 if (!JUMP_P (BB_END (src)))
1390 /* bb just falls through. */
1392 /* make sure there's only one successor */
1393 gcc_assert (single_succ_p (src));
1395 /* Find label in dest block. */
1396 label = block_label (dest);
1398 new_jump = emit_jump_insn_after (gen_jump (label),
1399 BB_END (src));
1400 barrier = emit_barrier_after (new_jump);
1401 JUMP_LABEL (new_jump) = label;
1402 LABEL_NUSES (label) += 1;
1403 src->rbi->footer = unlink_insn_chain (barrier, barrier);
1404 /* Mark edge as non-fallthru. */
1405 crossing_edges[i]->flags &= ~EDGE_FALLTHRU;
1406 } /* end: 'if (GET_CODE ... ' */
1407 } /* end: 'if (src && src->index...' */
1408 } /* end: 'if (dest && dest->index...' */
1409 } /* end: 'if (crossing_edges[i]...' */
1410 } /* end for loop */
1413 /* Find any bb's where the fall-through edge is a crossing edge (note that
1414 these bb's must also contain a conditional jump; we've already
1415 dealt with fall-through edges for blocks that didn't have a
1416 conditional jump in the call to add_labels_and_missing_jumps).
1417 Convert the fall-through edge to non-crossing edge by inserting a
1418 new bb to fall-through into. The new bb will contain an
1419 unconditional jump (crossing edge) to the original fall through
1420 destination. */
1422 static void
1423 fix_up_fall_thru_edges (void)
1425 basic_block cur_bb;
1426 basic_block new_bb;
1427 edge succ1;
1428 edge succ2;
1429 edge fall_thru;
1430 edge cond_jump = NULL;
1431 edge e;
1432 bool cond_jump_crosses;
1433 int invert_worked;
1434 rtx old_jump;
1435 rtx fall_thru_label;
1436 rtx barrier;
1438 FOR_EACH_BB (cur_bb)
1440 fall_thru = NULL;
1441 if (EDGE_COUNT (cur_bb->succs) > 0)
1442 succ1 = EDGE_SUCC (cur_bb, 0);
1443 else
1444 succ1 = NULL;
1446 if (EDGE_COUNT (cur_bb->succs) > 1)
1447 succ2 = EDGE_SUCC (cur_bb, 1);
1448 else
1449 succ2 = NULL;
1451 /* Find the fall-through edge. */
1453 if (succ1
1454 && (succ1->flags & EDGE_FALLTHRU))
1456 fall_thru = succ1;
1457 cond_jump = succ2;
1459 else if (succ2
1460 && (succ2->flags & EDGE_FALLTHRU))
1462 fall_thru = succ2;
1463 cond_jump = succ1;
1466 if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR))
1468 /* Check to see if the fall-thru edge is a crossing edge. */
1470 if (fall_thru->flags & EDGE_CROSSING)
1472 /* The fall_thru edge crosses; now check the cond jump edge, if
1473 it exists. */
1475 cond_jump_crosses = true;
1476 invert_worked = 0;
1477 old_jump = BB_END (cur_bb);
1479 /* Find the jump instruction, if there is one. */
1481 if (cond_jump)
1483 if (!(cond_jump->flags & EDGE_CROSSING))
1484 cond_jump_crosses = false;
1486 /* We know the fall-thru edge crosses; if the cond
1487 jump edge does NOT cross, and its destination is the
1488 next block in the bb order, invert the jump
1489 (i.e. fix it so the fall thru does not cross and
1490 the cond jump does). */
1492 if (!cond_jump_crosses
1493 && cur_bb->rbi->next == cond_jump->dest)
1495 /* Find label in fall_thru block. We've already added
1496 any missing labels, so there must be one. */
1498 fall_thru_label = block_label (fall_thru->dest);
1500 if (old_jump && fall_thru_label)
1501 invert_worked = invert_jump (old_jump,
1502 fall_thru_label,0);
1503 if (invert_worked)
1505 fall_thru->flags &= ~EDGE_FALLTHRU;
1506 cond_jump->flags |= EDGE_FALLTHRU;
1507 update_br_prob_note (cur_bb);
1508 e = fall_thru;
1509 fall_thru = cond_jump;
1510 cond_jump = e;
1511 cond_jump->flags |= EDGE_CROSSING;
1512 fall_thru->flags &= ~EDGE_CROSSING;
1517 if (cond_jump_crosses || !invert_worked)
1519 /* This is the case where both edges out of the basic
1520 block are crossing edges. Here we will fix up the
1521 fall through edge. The jump edge will be taken care
1522 of later. */
1524 new_bb = force_nonfallthru (fall_thru);
1526 if (new_bb)
1528 new_bb->rbi->next = cur_bb->rbi->next;
1529 cur_bb->rbi->next = new_bb;
1531 /* Make sure new fall-through bb is in same
1532 partition as bb it's falling through from. */
1534 BB_COPY_PARTITION (new_bb, cur_bb);
1535 single_succ_edge (new_bb)->flags |= EDGE_CROSSING;
1538 /* Add barrier after new jump */
1540 if (new_bb)
1542 barrier = emit_barrier_after (BB_END (new_bb));
1543 new_bb->rbi->footer = unlink_insn_chain (barrier,
1544 barrier);
1546 else
1548 barrier = emit_barrier_after (BB_END (cur_bb));
1549 cur_bb->rbi->footer = unlink_insn_chain (barrier,
1550 barrier);
1558 /* This function checks the destination blockof a "crossing jump" to
1559 see if it has any crossing predecessors that begin with a code label
1560 and end with an unconditional jump. If so, it returns that predecessor
1561 block. (This is to avoid creating lots of new basic blocks that all
1562 contain unconditional jumps to the same destination). */
1564 static basic_block
1565 find_jump_block (basic_block jump_dest)
1567 basic_block source_bb = NULL;
1568 edge e;
1569 rtx insn;
1570 edge_iterator ei;
1572 FOR_EACH_EDGE (e, ei, jump_dest->preds)
1573 if (e->flags & EDGE_CROSSING)
1575 basic_block src = e->src;
1577 /* Check each predecessor to see if it has a label, and contains
1578 only one executable instruction, which is an unconditional jump.
1579 If so, we can use it. */
1581 if (LABEL_P (BB_HEAD (src)))
1582 for (insn = BB_HEAD (src);
1583 !INSN_P (insn) && insn != NEXT_INSN (BB_END (src));
1584 insn = NEXT_INSN (insn))
1586 if (INSN_P (insn)
1587 && insn == BB_END (src)
1588 && JUMP_P (insn)
1589 && !any_condjump_p (insn))
1591 source_bb = src;
1592 break;
1596 if (source_bb)
1597 break;
1600 return source_bb;
1603 /* Find all BB's with conditional jumps that are crossing edges;
1604 insert a new bb and make the conditional jump branch to the new
1605 bb instead (make the new bb same color so conditional branch won't
1606 be a 'crossing' edge). Insert an unconditional jump from the
1607 new bb to the original destination of the conditional jump. */
1609 static void
1610 fix_crossing_conditional_branches (void)
1612 basic_block cur_bb;
1613 basic_block new_bb;
1614 basic_block last_bb;
1615 basic_block dest;
1616 basic_block prev_bb;
1617 edge succ1;
1618 edge succ2;
1619 edge crossing_edge;
1620 edge new_edge;
1621 rtx old_jump;
1622 rtx set_src;
1623 rtx old_label = NULL_RTX;
1624 rtx new_label;
1625 rtx new_jump;
1626 rtx barrier;
1628 last_bb = EXIT_BLOCK_PTR->prev_bb;
1630 FOR_EACH_BB (cur_bb)
1632 crossing_edge = NULL;
1633 if (EDGE_COUNT (cur_bb->succs) > 0)
1634 succ1 = EDGE_SUCC (cur_bb, 0);
1635 else
1636 succ1 = NULL;
1638 if (EDGE_COUNT (cur_bb->succs) > 1)
1639 succ2 = EDGE_SUCC (cur_bb, 1);
1640 else
1641 succ2 = NULL;
1643 /* We already took care of fall-through edges, so only one successor
1644 can be a crossing edge. */
1646 if (succ1 && (succ1->flags & EDGE_CROSSING))
1647 crossing_edge = succ1;
1648 else if (succ2 && (succ2->flags & EDGE_CROSSING))
1649 crossing_edge = succ2;
1651 if (crossing_edge)
1653 old_jump = BB_END (cur_bb);
1655 /* Check to make sure the jump instruction is a
1656 conditional jump. */
1658 set_src = NULL_RTX;
1660 if (any_condjump_p (old_jump))
1662 if (GET_CODE (PATTERN (old_jump)) == SET)
1663 set_src = SET_SRC (PATTERN (old_jump));
1664 else if (GET_CODE (PATTERN (old_jump)) == PARALLEL)
1666 set_src = XVECEXP (PATTERN (old_jump), 0,0);
1667 if (GET_CODE (set_src) == SET)
1668 set_src = SET_SRC (set_src);
1669 else
1670 set_src = NULL_RTX;
1674 if (set_src && (GET_CODE (set_src) == IF_THEN_ELSE))
1676 if (GET_CODE (XEXP (set_src, 1)) == PC)
1677 old_label = XEXP (set_src, 2);
1678 else if (GET_CODE (XEXP (set_src, 2)) == PC)
1679 old_label = XEXP (set_src, 1);
1681 /* Check to see if new bb for jumping to that dest has
1682 already been created; if so, use it; if not, create
1683 a new one. */
1685 new_bb = find_jump_block (crossing_edge->dest);
1687 if (new_bb)
1688 new_label = block_label (new_bb);
1689 else
1691 /* Create new basic block to be dest for
1692 conditional jump. */
1694 new_bb = create_basic_block (NULL, NULL, last_bb);
1695 new_bb->rbi->next = last_bb->rbi->next;
1696 last_bb->rbi->next = new_bb;
1697 prev_bb = last_bb;
1698 last_bb = new_bb;
1700 /* Update register liveness information. */
1702 new_bb->global_live_at_start = ALLOC_REG_SET (&reg_obstack);
1703 new_bb->global_live_at_end = ALLOC_REG_SET (&reg_obstack);
1704 COPY_REG_SET (new_bb->global_live_at_end,
1705 prev_bb->global_live_at_end);
1706 COPY_REG_SET (new_bb->global_live_at_start,
1707 prev_bb->global_live_at_end);
1709 /* Put appropriate instructions in new bb. */
1711 new_label = gen_label_rtx ();
1712 emit_label_before (new_label, BB_HEAD (new_bb));
1713 BB_HEAD (new_bb) = new_label;
1715 if (GET_CODE (old_label) == LABEL_REF)
1717 old_label = JUMP_LABEL (old_jump);
1718 new_jump = emit_jump_insn_after (gen_jump
1719 (old_label),
1720 BB_END (new_bb));
1722 else
1724 gcc_assert (HAVE_return
1725 && GET_CODE (old_label) == RETURN);
1726 new_jump = emit_jump_insn_after (gen_return (),
1727 BB_END (new_bb));
1730 barrier = emit_barrier_after (new_jump);
1731 JUMP_LABEL (new_jump) = old_label;
1732 new_bb->rbi->footer = unlink_insn_chain (barrier,
1733 barrier);
1735 /* Make sure new bb is in same partition as source
1736 of conditional branch. */
1737 BB_COPY_PARTITION (new_bb, cur_bb);
1740 /* Make old jump branch to new bb. */
1742 redirect_jump (old_jump, new_label, 0);
1744 /* Remove crossing_edge as predecessor of 'dest'. */
1746 dest = crossing_edge->dest;
1748 redirect_edge_succ (crossing_edge, new_bb);
1750 /* Make a new edge from new_bb to old dest; new edge
1751 will be a successor for new_bb and a predecessor
1752 for 'dest'. */
1754 if (EDGE_COUNT (new_bb->succs) == 0)
1755 new_edge = make_edge (new_bb, dest, 0);
1756 else
1757 new_edge = EDGE_SUCC (new_bb, 0);
1759 crossing_edge->flags &= ~EDGE_CROSSING;
1760 new_edge->flags |= EDGE_CROSSING;
1766 /* Find any unconditional branches that cross between hot and cold
1767 sections. Convert them into indirect jumps instead. */
1769 static void
1770 fix_crossing_unconditional_branches (void)
1772 basic_block cur_bb;
1773 rtx last_insn;
1774 rtx label;
1775 rtx label_addr;
1776 rtx indirect_jump_sequence;
1777 rtx jump_insn = NULL_RTX;
1778 rtx new_reg;
1779 rtx cur_insn;
1780 edge succ;
1782 FOR_EACH_BB (cur_bb)
1784 last_insn = BB_END (cur_bb);
1785 succ = EDGE_SUCC (cur_bb, 0);
1787 /* Check to see if bb ends in a crossing (unconditional) jump. At
1788 this point, no crossing jumps should be conditional. */
1790 if (JUMP_P (last_insn)
1791 && (succ->flags & EDGE_CROSSING))
1793 rtx label2, table;
1795 gcc_assert (!any_condjump_p (last_insn));
1797 /* Make sure the jump is not already an indirect or table jump. */
1799 if (!computed_jump_p (last_insn)
1800 && !tablejump_p (last_insn, &label2, &table))
1802 /* We have found a "crossing" unconditional branch. Now
1803 we must convert it to an indirect jump. First create
1804 reference of label, as target for jump. */
1806 label = JUMP_LABEL (last_insn);
1807 label_addr = gen_rtx_LABEL_REF (Pmode, label);
1808 LABEL_NUSES (label) += 1;
1810 /* Get a register to use for the indirect jump. */
1812 new_reg = gen_reg_rtx (Pmode);
1814 /* Generate indirect the jump sequence. */
1816 start_sequence ();
1817 emit_move_insn (new_reg, label_addr);
1818 emit_indirect_jump (new_reg);
1819 indirect_jump_sequence = get_insns ();
1820 end_sequence ();
1822 /* Make sure every instruction in the new jump sequence has
1823 its basic block set to be cur_bb. */
1825 for (cur_insn = indirect_jump_sequence; cur_insn;
1826 cur_insn = NEXT_INSN (cur_insn))
1828 BLOCK_FOR_INSN (cur_insn) = cur_bb;
1829 if (JUMP_P (cur_insn))
1830 jump_insn = cur_insn;
1833 /* Insert the new (indirect) jump sequence immediately before
1834 the unconditional jump, then delete the unconditional jump. */
1836 emit_insn_before (indirect_jump_sequence, last_insn);
1837 delete_insn (last_insn);
1839 /* Make BB_END for cur_bb be the jump instruction (NOT the
1840 barrier instruction at the end of the sequence...). */
1842 BB_END (cur_bb) = jump_insn;
1848 /* Add REG_CROSSING_JUMP note to all crossing jump insns. */
1850 static void
1851 add_reg_crossing_jump_notes (void)
1853 basic_block bb;
1854 edge e;
1855 edge_iterator ei;
1857 FOR_EACH_BB (bb)
1858 FOR_EACH_EDGE (e, ei, bb->succs)
1859 if ((e->flags & EDGE_CROSSING)
1860 && JUMP_P (BB_END (e->src)))
1861 REG_NOTES (BB_END (e->src)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
1862 NULL_RTX,
1863 REG_NOTES (BB_END
1864 (e->src)));
1867 /* Basic blocks containing NOTE_INSN_UNLIKELY_EXECUTED_CODE will be
1868 put in a separate section of the .o file, to reduce paging and
1869 improve cache performance (hopefully). This can result in bits of
1870 code from the same function being widely separated in the .o file.
1871 However this is not obvious to the current bb structure. Therefore
1872 we must take care to ensure that: 1). There are no fall_thru edges
1873 that cross between sections; 2). For those architectures which
1874 have "short" conditional branches, all conditional branches that
1875 attempt to cross between sections are converted to unconditional
1876 branches; and, 3). For those architectures which have "short"
1877 unconditional branches, all unconditional branches that attempt
1878 to cross between sections are converted to indirect jumps.
1880 The code for fixing up fall_thru edges that cross between hot and
1881 cold basic blocks does so by creating new basic blocks containing
1882 unconditional branches to the appropriate label in the "other"
1883 section. The new basic block is then put in the same (hot or cold)
1884 section as the original conditional branch, and the fall_thru edge
1885 is modified to fall into the new basic block instead. By adding
1886 this level of indirection we end up with only unconditional branches
1887 crossing between hot and cold sections.
1889 Conditional branches are dealt with by adding a level of indirection.
1890 A new basic block is added in the same (hot/cold) section as the
1891 conditional branch, and the conditional branch is retargeted to the
1892 new basic block. The new basic block contains an unconditional branch
1893 to the original target of the conditional branch (in the other section).
1895 Unconditional branches are dealt with by converting them into
1896 indirect jumps. */
1898 static void
1899 fix_edges_for_rarely_executed_code (edge *crossing_edges,
1900 int n_crossing_edges)
1902 /* Make sure the source of any crossing edge ends in a jump and the
1903 destination of any crossing edge has a label. */
1905 add_labels_and_missing_jumps (crossing_edges, n_crossing_edges);
1907 /* Convert all crossing fall_thru edges to non-crossing fall
1908 thrus to unconditional jumps (that jump to the original fall
1909 thru dest). */
1911 fix_up_fall_thru_edges ();
1913 /* Only do the parts necessary for writing separate sections if
1914 the target architecture has the ability to write separate sections
1915 (i.e. it has named sections). Otherwise, the hot/cold partitioning
1916 information will be used when reordering blocks to try to put all
1917 the hot blocks together, then all the cold blocks, but no actual
1918 section partitioning will be done. */
1920 if (targetm.have_named_sections)
1922 /* If the architecture does not have conditional branches that can
1923 span all of memory, convert crossing conditional branches into
1924 crossing unconditional branches. */
1926 if (!HAS_LONG_COND_BRANCH)
1927 fix_crossing_conditional_branches ();
1929 /* If the architecture does not have unconditional branches that
1930 can span all of memory, convert crossing unconditional branches
1931 into indirect jumps. Since adding an indirect jump also adds
1932 a new register usage, update the register usage information as
1933 well. */
1935 if (!HAS_LONG_UNCOND_BRANCH)
1937 fix_crossing_unconditional_branches ();
1938 reg_scan (get_insns(), max_reg_num ());
1941 add_reg_crossing_jump_notes ();
1945 /* Reorder basic blocks. The main entry point to this file. FLAGS is
1946 the set of flags to pass to cfg_layout_initialize(). */
1948 void
1949 reorder_basic_blocks (unsigned int flags)
1951 int n_traces;
1952 int i;
1953 struct trace *traces;
1955 if (n_basic_blocks <= 1)
1956 return;
1958 if (targetm.cannot_modify_jumps_p ())
1959 return;
1961 timevar_push (TV_REORDER_BLOCKS);
1963 cfg_layout_initialize (flags);
1965 set_edge_can_fallthru_flag ();
1966 mark_dfs_back_edges ();
1968 /* We are estimating the length of uncond jump insn only once since the code
1969 for getting the insn length always returns the minimal length now. */
1970 if (uncond_jump_length == 0)
1971 uncond_jump_length = get_uncond_jump_length ();
1973 /* We need to know some information for each basic block. */
1974 array_size = GET_ARRAY_SIZE (last_basic_block);
1975 bbd = xmalloc (array_size * sizeof (bbro_basic_block_data));
1976 for (i = 0; i < array_size; i++)
1978 bbd[i].start_of_trace = -1;
1979 bbd[i].end_of_trace = -1;
1980 bbd[i].heap = NULL;
1981 bbd[i].node = NULL;
1984 traces = xmalloc (n_basic_blocks * sizeof (struct trace));
1985 n_traces = 0;
1986 find_traces (&n_traces, traces);
1987 connect_traces (n_traces, traces);
1988 FREE (traces);
1989 FREE (bbd);
1991 if (dump_file)
1992 dump_flow_info (dump_file);
1994 if (flag_reorder_blocks_and_partition
1995 && targetm.have_named_sections)
1996 add_unlikely_executed_notes ();
1998 cfg_layout_finalize ();
2000 timevar_pop (TV_REORDER_BLOCKS);
2003 /* Duplicate the blocks containing computed gotos. This basically unfactors
2004 computed gotos that were factored early on in the compilation process to
2005 speed up edge based data flow. We used to not unfactoring them again,
2006 which can seriously pessimize code with many computed jumps in the source
2007 code, such as interpreters. See e.g. PR15242. */
2009 void
2010 duplicate_computed_gotos (void)
2012 basic_block bb, new_bb;
2013 bitmap candidates;
2014 int max_size;
2016 if (n_basic_blocks <= 1)
2017 return;
2019 if (targetm.cannot_modify_jumps_p ())
2020 return;
2022 timevar_push (TV_REORDER_BLOCKS);
2024 cfg_layout_initialize (0);
2026 /* We are estimating the length of uncond jump insn only once
2027 since the code for getting the insn length always returns
2028 the minimal length now. */
2029 if (uncond_jump_length == 0)
2030 uncond_jump_length = get_uncond_jump_length ();
2032 max_size = uncond_jump_length * PARAM_VALUE (PARAM_MAX_GOTO_DUPLICATION_INSNS);
2033 candidates = BITMAP_ALLOC (NULL);
2035 /* Build the reorder chain for the original order of blocks.
2036 Look for a computed jump while we are at it. */
2037 FOR_EACH_BB (bb)
2039 if (bb->next_bb != EXIT_BLOCK_PTR)
2040 bb->rbi->next = bb->next_bb;
2042 /* If the block ends in a computed jump and it is small enough,
2043 make it a candidate for duplication. */
2044 if (computed_jump_p (BB_END (bb)))
2046 rtx insn;
2047 int size = 0;
2049 FOR_BB_INSNS (bb, insn)
2051 if (INSN_P (insn))
2053 /* If the insn isn't copyable, don't duplicate
2054 the block. */
2055 if (targetm.cannot_copy_insn_p
2056 && targetm.cannot_copy_insn_p (insn))
2058 size = max_size + 1;
2059 break;
2061 size += get_attr_length (insn);
2063 if (size > max_size)
2064 break;
2067 if (size <= max_size)
2068 bitmap_set_bit (candidates, bb->index);
2072 /* Nothing to do if there is no computed jump here. */
2073 if (bitmap_empty_p (candidates))
2074 goto done;
2076 /* Duplicate computed gotos. */
2077 FOR_EACH_BB (bb)
2079 if (bb->rbi->visited)
2080 continue;
2082 bb->rbi->visited = 1;
2084 /* BB must have one outgoing edge. That edge must not lead to
2085 the exit block or the next block.
2086 The destination must have more than one predecessor. */
2087 if (!single_succ_p (bb)
2088 || single_succ (bb) == EXIT_BLOCK_PTR
2089 || single_succ (bb) == bb->next_bb
2090 || single_pred_p (single_succ (bb)))
2091 continue;
2093 /* The successor block has to be a duplication candidate. */
2094 if (!bitmap_bit_p (candidates, single_succ (bb)->index))
2095 continue;
2097 new_bb = duplicate_block (single_succ (bb), single_succ_edge (bb));
2098 new_bb->rbi->next = bb->rbi->next;
2099 bb->rbi->next = new_bb;
2100 new_bb->rbi->visited = 1;
2103 done:
2104 cfg_layout_finalize ();
2106 BITMAP_FREE (candidates);
2108 timevar_pop (TV_REORDER_BLOCKS);
2111 /* This function is the main 'entrance' for the optimization that
2112 partitions hot and cold basic blocks into separate sections of the
2113 .o file (to improve performance and cache locality). Ideally it
2114 would be called after all optimizations that rearrange the CFG have
2115 been called. However part of this optimization may introduce new
2116 register usage, so it must be called before register allocation has
2117 occurred. This means that this optimization is actually called
2118 well before the optimization that reorders basic blocks (see
2119 function above).
2121 This optimization checks the feedback information to determine
2122 which basic blocks are hot/cold and causes reorder_basic_blocks to
2123 add NOTE_INSN_UNLIKELY_EXECUTED_CODE to non-hot basic blocks. The
2124 presence or absence of this note is later used for writing out
2125 sections in the .o file. Because hot and cold sections can be
2126 arbitrarily large (within the bounds of memory), far beyond the
2127 size of a single function, it is necessary to fix up all edges that
2128 cross section boundaries, to make sure the instructions used can
2129 actually span the required distance. The fixes are described
2130 below.
2132 Fall-through edges must be changed into jumps; it is not safe or
2133 legal to fall through across a section boundary. Whenever a
2134 fall-through edge crossing a section boundary is encountered, a new
2135 basic block is inserted (in the same section as the fall-through
2136 source), and the fall through edge is redirected to the new basic
2137 block. The new basic block contains an unconditional jump to the
2138 original fall-through target. (If the unconditional jump is
2139 insufficient to cross section boundaries, that is dealt with a
2140 little later, see below).
2142 In order to deal with architectures that have short conditional
2143 branches (which cannot span all of memory) we take any conditional
2144 jump that attempts to cross a section boundary and add a level of
2145 indirection: it becomes a conditional jump to a new basic block, in
2146 the same section. The new basic block contains an unconditional
2147 jump to the original target, in the other section.
2149 For those architectures whose unconditional branch is also
2150 incapable of reaching all of memory, those unconditional jumps are
2151 converted into indirect jumps, through a register.
2153 IMPORTANT NOTE: This optimization causes some messy interactions
2154 with the cfg cleanup optimizations; those optimizations want to
2155 merge blocks wherever possible, and to collapse indirect jump
2156 sequences (change "A jumps to B jumps to C" directly into "A jumps
2157 to C"). Those optimizations can undo the jump fixes that
2158 partitioning is required to make (see above), in order to ensure
2159 that jumps attempting to cross section boundaries are really able
2160 to cover whatever distance the jump requires (on many architectures
2161 conditional or unconditional jumps are not able to reach all of
2162 memory). Therefore tests have to be inserted into each such
2163 optimization to make sure that it does not undo stuff necessary to
2164 cross partition boundaries. This would be much less of a problem
2165 if we could perform this optimization later in the compilation, but
2166 unfortunately the fact that we may need to create indirect jumps
2167 (through registers) requires that this optimization be performed
2168 before register allocation. */
2170 void
2171 partition_hot_cold_basic_blocks (void)
2173 basic_block cur_bb;
2174 edge *crossing_edges;
2175 int n_crossing_edges;
2176 int max_edges = 2 * last_basic_block;
2178 if (n_basic_blocks <= 1)
2179 return;
2181 crossing_edges = xcalloc (max_edges, sizeof (edge));
2183 cfg_layout_initialize (0);
2185 FOR_EACH_BB (cur_bb)
2186 if (cur_bb->index >= 0
2187 && cur_bb->next_bb->index >= 0)
2188 cur_bb->rbi->next = cur_bb->next_bb;
2190 find_rarely_executed_basic_blocks_and_crossing_edges (crossing_edges,
2191 &n_crossing_edges,
2192 &max_edges);
2194 if (n_crossing_edges > 0)
2195 fix_edges_for_rarely_executed_code (crossing_edges, n_crossing_edges);
2197 free (crossing_edges);
2199 cfg_layout_finalize();