toplev.c (no_new_pseudos): Deleted.
[official-gcc.git] / gcc / bb-reorder.c
blobdd6d30e4875794cd5e5690e29e8fc5f412a7e076
1 /* Basic block reordering routines for the GNU compiler.
2 Copyright (C) 2000, 2002, 2003, 2004, 2005, 2006, 2007
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
22 /* This (greedy) algorithm constructs traces in several rounds.
23 The construction starts from "seeds". The seed for the first round
24 is the entry point of function. When there are more than one seed
25 that one is selected first that has the lowest key in the heap
26 (see function bb_to_key). Then the algorithm repeatedly adds the most
27 probable successor to the end of a trace. Finally it connects the traces.
29 There are two parameters: Branch Threshold and Exec Threshold.
30 If the edge to a successor of the actual basic block is lower than
31 Branch Threshold or the frequency of the successor is lower than
32 Exec Threshold the successor will be the seed in one of the next rounds.
33 Each round has these parameters lower than the previous one.
34 The last round has to have these parameters set to zero
35 so that the remaining blocks are picked up.
37 The algorithm selects the most probable successor from all unvisited
38 successors and successors that have been added to this trace.
39 The other successors (that has not been "sent" to the next round) will be
40 other seeds for this round and the secondary traces will start in them.
41 If the successor has not been visited in this trace it is added to the trace
42 (however, there is some heuristic for simple branches).
43 If the successor has been visited in this trace the loop has been found.
44 If the loop has many iterations the loop is rotated so that the
45 source block of the most probable edge going out from the loop
46 is the last block of the trace.
47 If the loop has few iterations and there is no edge from the last block of
48 the loop going out from loop the loop header is duplicated.
49 Finally, the construction of the trace is terminated.
51 When connecting traces it first checks whether there is an edge from the
52 last block of one trace to the first block of another trace.
53 When there are still some unconnected traces it checks whether there exists
54 a basic block BB such that BB is a successor of the last bb of one trace
55 and BB is a predecessor of the first block of another trace. In this case,
56 BB is duplicated and the traces are connected through this duplicate.
57 The rest of traces are simply connected so there will be a jump to the
58 beginning of the rest of trace.
61 References:
63 "Software Trace Cache"
64 A. Ramirez, J. Larriba-Pey, C. Navarro, J. Torrellas and M. Valero; 1999
65 http://citeseer.nj.nec.com/15361.html
69 #include "config.h"
70 #include "system.h"
71 #include "coretypes.h"
72 #include "tm.h"
73 #include "rtl.h"
74 #include "regs.h"
75 #include "flags.h"
76 #include "timevar.h"
77 #include "output.h"
78 #include "cfglayout.h"
79 #include "fibheap.h"
80 #include "target.h"
81 #include "function.h"
82 #include "tm_p.h"
83 #include "obstack.h"
84 #include "expr.h"
85 #include "params.h"
86 #include "toplev.h"
87 #include "tree-pass.h"
88 #include "df.h"
90 #ifndef HAVE_conditional_execution
91 #define HAVE_conditional_execution 0
92 #endif
94 /* The number of rounds. In most cases there will only be 4 rounds, but
95 when partitioning hot and cold basic blocks into separate sections of
96 the .o file there will be an extra round.*/
97 #define N_ROUNDS 5
99 /* Stubs in case we don't have a return insn.
100 We have to check at runtime too, not only compiletime. */
102 #ifndef HAVE_return
103 #define HAVE_return 0
104 #define gen_return() NULL_RTX
105 #endif
108 /* Branch thresholds in thousandths (per mille) of the REG_BR_PROB_BASE. */
109 static int branch_threshold[N_ROUNDS] = {400, 200, 100, 0, 0};
111 /* Exec thresholds in thousandths (per mille) of the frequency of bb 0. */
112 static int exec_threshold[N_ROUNDS] = {500, 200, 50, 0, 0};
114 /* If edge frequency is lower than DUPLICATION_THRESHOLD per mille of entry
115 block the edge destination is not duplicated while connecting traces. */
116 #define DUPLICATION_THRESHOLD 100
118 /* Length of unconditional jump instruction. */
119 static int uncond_jump_length;
121 /* Structure to hold needed information for each basic block. */
122 typedef struct bbro_basic_block_data_def
124 /* Which trace is the bb start of (-1 means it is not a start of a trace). */
125 int start_of_trace;
127 /* Which trace is the bb end of (-1 means it is not an end of a trace). */
128 int end_of_trace;
130 /* Which trace is the bb in? */
131 int in_trace;
133 /* Which heap is BB in (if any)? */
134 fibheap_t heap;
136 /* Which heap node is BB in (if any)? */
137 fibnode_t node;
138 } bbro_basic_block_data;
140 /* The current size of the following dynamic array. */
141 static int array_size;
143 /* The array which holds needed information for basic blocks. */
144 static bbro_basic_block_data *bbd;
146 /* To avoid frequent reallocation the size of arrays is greater than needed,
147 the number of elements is (not less than) 1.25 * size_wanted. */
148 #define GET_ARRAY_SIZE(X) ((((X) / 4) + 1) * 5)
150 /* Free the memory and set the pointer to NULL. */
151 #define FREE(P) (gcc_assert (P), free (P), P = 0)
153 /* Structure for holding information about a trace. */
154 struct trace
156 /* First and last basic block of the trace. */
157 basic_block first, last;
159 /* The round of the STC creation which this trace was found in. */
160 int round;
162 /* The length (i.e. the number of basic blocks) of the trace. */
163 int length;
166 /* Maximum frequency and count of one of the entry blocks. */
167 static int max_entry_frequency;
168 static gcov_type max_entry_count;
170 /* Local function prototypes. */
171 static void find_traces (int *, struct trace *);
172 static basic_block rotate_loop (edge, struct trace *, int);
173 static void mark_bb_visited (basic_block, int);
174 static void find_traces_1_round (int, int, gcov_type, struct trace *, int *,
175 int, fibheap_t *, int);
176 static basic_block copy_bb (basic_block, edge, basic_block, int);
177 static fibheapkey_t bb_to_key (basic_block);
178 static bool better_edge_p (basic_block, edge, int, int, int, int, edge);
179 static void connect_traces (int, struct trace *);
180 static bool copy_bb_p (basic_block, int);
181 static int get_uncond_jump_length (void);
182 static bool push_to_next_round_p (basic_block, int, int, int, gcov_type);
183 static void find_rarely_executed_basic_blocks_and_crossing_edges (edge *,
184 int *,
185 int *);
186 static void add_labels_and_missing_jumps (edge *, int);
187 static void add_reg_crossing_jump_notes (void);
188 static void fix_up_fall_thru_edges (void);
189 static void fix_edges_for_rarely_executed_code (edge *, int);
190 static void fix_crossing_conditional_branches (void);
191 static void fix_crossing_unconditional_branches (void);
193 /* Check to see if bb should be pushed into the next round of trace
194 collections or not. Reasons for pushing the block forward are 1).
195 If the block is cold, we are doing partitioning, and there will be
196 another round (cold partition blocks are not supposed to be
197 collected into traces until the very last round); or 2). There will
198 be another round, and the basic block is not "hot enough" for the
199 current round of trace collection. */
201 static bool
202 push_to_next_round_p (basic_block bb, int round, int number_of_rounds,
203 int exec_th, gcov_type count_th)
205 bool there_exists_another_round;
206 bool block_not_hot_enough;
208 there_exists_another_round = round < number_of_rounds - 1;
210 block_not_hot_enough = (bb->frequency < exec_th
211 || bb->count < count_th
212 || probably_never_executed_bb_p (bb));
214 if (there_exists_another_round
215 && block_not_hot_enough)
216 return true;
217 else
218 return false;
221 /* Find the traces for Software Trace Cache. Chain each trace through
222 RBI()->next. Store the number of traces to N_TRACES and description of
223 traces to TRACES. */
225 static void
226 find_traces (int *n_traces, struct trace *traces)
228 int i;
229 int number_of_rounds;
230 edge e;
231 edge_iterator ei;
232 fibheap_t heap;
234 /* Add one extra round of trace collection when partitioning hot/cold
235 basic blocks into separate sections. The last round is for all the
236 cold blocks (and ONLY the cold blocks). */
238 number_of_rounds = N_ROUNDS - 1;
240 /* Insert entry points of function into heap. */
241 heap = fibheap_new ();
242 max_entry_frequency = 0;
243 max_entry_count = 0;
244 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
246 bbd[e->dest->index].heap = heap;
247 bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest),
248 e->dest);
249 if (e->dest->frequency > max_entry_frequency)
250 max_entry_frequency = e->dest->frequency;
251 if (e->dest->count > max_entry_count)
252 max_entry_count = e->dest->count;
255 /* Find the traces. */
256 for (i = 0; i < number_of_rounds; i++)
258 gcov_type count_threshold;
260 if (dump_file)
261 fprintf (dump_file, "STC - round %d\n", i + 1);
263 if (max_entry_count < INT_MAX / 1000)
264 count_threshold = max_entry_count * exec_threshold[i] / 1000;
265 else
266 count_threshold = max_entry_count / 1000 * exec_threshold[i];
268 find_traces_1_round (REG_BR_PROB_BASE * branch_threshold[i] / 1000,
269 max_entry_frequency * exec_threshold[i] / 1000,
270 count_threshold, traces, n_traces, i, &heap,
271 number_of_rounds);
273 fibheap_delete (heap);
275 if (dump_file)
277 for (i = 0; i < *n_traces; i++)
279 basic_block bb;
280 fprintf (dump_file, "Trace %d (round %d): ", i + 1,
281 traces[i].round + 1);
282 for (bb = traces[i].first; bb != traces[i].last; bb = bb->aux)
283 fprintf (dump_file, "%d [%d] ", bb->index, bb->frequency);
284 fprintf (dump_file, "%d [%d]\n", bb->index, bb->frequency);
286 fflush (dump_file);
290 /* Rotate loop whose back edge is BACK_EDGE in the tail of trace TRACE
291 (with sequential number TRACE_N). */
293 static basic_block
294 rotate_loop (edge back_edge, struct trace *trace, int trace_n)
296 basic_block bb;
298 /* Information about the best end (end after rotation) of the loop. */
299 basic_block best_bb = NULL;
300 edge best_edge = NULL;
301 int best_freq = -1;
302 gcov_type best_count = -1;
303 /* The best edge is preferred when its destination is not visited yet
304 or is a start block of some trace. */
305 bool is_preferred = false;
307 /* Find the most frequent edge that goes out from current trace. */
308 bb = back_edge->dest;
311 edge e;
312 edge_iterator ei;
314 FOR_EACH_EDGE (e, ei, bb->succs)
315 if (e->dest != EXIT_BLOCK_PTR
316 && e->dest->il.rtl->visited != trace_n
317 && (e->flags & EDGE_CAN_FALLTHRU)
318 && !(e->flags & EDGE_COMPLEX))
320 if (is_preferred)
322 /* The best edge is preferred. */
323 if (!e->dest->il.rtl->visited
324 || bbd[e->dest->index].start_of_trace >= 0)
326 /* The current edge E is also preferred. */
327 int freq = EDGE_FREQUENCY (e);
328 if (freq > best_freq || e->count > best_count)
330 best_freq = freq;
331 best_count = e->count;
332 best_edge = e;
333 best_bb = bb;
337 else
339 if (!e->dest->il.rtl->visited
340 || bbd[e->dest->index].start_of_trace >= 0)
342 /* The current edge E is preferred. */
343 is_preferred = true;
344 best_freq = EDGE_FREQUENCY (e);
345 best_count = e->count;
346 best_edge = e;
347 best_bb = bb;
349 else
351 int freq = EDGE_FREQUENCY (e);
352 if (!best_edge || freq > best_freq || e->count > best_count)
354 best_freq = freq;
355 best_count = e->count;
356 best_edge = e;
357 best_bb = bb;
362 bb = bb->aux;
364 while (bb != back_edge->dest);
366 if (best_bb)
368 /* Rotate the loop so that the BEST_EDGE goes out from the last block of
369 the trace. */
370 if (back_edge->dest == trace->first)
372 trace->first = best_bb->aux;
374 else
376 basic_block prev_bb;
378 for (prev_bb = trace->first;
379 prev_bb->aux != back_edge->dest;
380 prev_bb = prev_bb->aux)
382 prev_bb->aux = best_bb->aux;
384 /* Try to get rid of uncond jump to cond jump. */
385 if (single_succ_p (prev_bb))
387 basic_block header = single_succ (prev_bb);
389 /* Duplicate HEADER if it is a small block containing cond jump
390 in the end. */
391 if (any_condjump_p (BB_END (header)) && copy_bb_p (header, 0)
392 && !find_reg_note (BB_END (header), REG_CROSSING_JUMP,
393 NULL_RTX))
394 copy_bb (header, single_succ_edge (prev_bb), prev_bb, trace_n);
398 else
400 /* We have not found suitable loop tail so do no rotation. */
401 best_bb = back_edge->src;
403 best_bb->aux = NULL;
404 return best_bb;
407 /* This function marks BB that it was visited in trace number TRACE. */
409 static void
410 mark_bb_visited (basic_block bb, int trace)
412 bb->il.rtl->visited = trace;
413 if (bbd[bb->index].heap)
415 fibheap_delete_node (bbd[bb->index].heap, bbd[bb->index].node);
416 bbd[bb->index].heap = NULL;
417 bbd[bb->index].node = NULL;
421 /* One round of finding traces. Find traces for BRANCH_TH and EXEC_TH i.e. do
422 not include basic blocks their probability is lower than BRANCH_TH or their
423 frequency is lower than EXEC_TH into traces (or count is lower than
424 COUNT_TH). It stores the new traces into TRACES and modifies the number of
425 traces *N_TRACES. Sets the round (which the trace belongs to) to ROUND. It
426 expects that starting basic blocks are in *HEAP and at the end it deletes
427 *HEAP and stores starting points for the next round into new *HEAP. */
429 static void
430 find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
431 struct trace *traces, int *n_traces, int round,
432 fibheap_t *heap, int number_of_rounds)
434 /* Heap for discarded basic blocks which are possible starting points for
435 the next round. */
436 fibheap_t new_heap = fibheap_new ();
438 while (!fibheap_empty (*heap))
440 basic_block bb;
441 struct trace *trace;
442 edge best_edge, e;
443 fibheapkey_t key;
444 edge_iterator ei;
446 bb = fibheap_extract_min (*heap);
447 bbd[bb->index].heap = NULL;
448 bbd[bb->index].node = NULL;
450 if (dump_file)
451 fprintf (dump_file, "Getting bb %d\n", bb->index);
453 /* If the BB's frequency is too low send BB to the next round. When
454 partitioning hot/cold blocks into separate sections, make sure all
455 the cold blocks (and ONLY the cold blocks) go into the (extra) final
456 round. */
458 if (push_to_next_round_p (bb, round, number_of_rounds, exec_th,
459 count_th))
461 int key = bb_to_key (bb);
462 bbd[bb->index].heap = new_heap;
463 bbd[bb->index].node = fibheap_insert (new_heap, key, bb);
465 if (dump_file)
466 fprintf (dump_file,
467 " Possible start point of next round: %d (key: %d)\n",
468 bb->index, key);
469 continue;
472 trace = traces + *n_traces;
473 trace->first = bb;
474 trace->round = round;
475 trace->length = 0;
476 bbd[bb->index].in_trace = *n_traces;
477 (*n_traces)++;
481 int prob, freq;
482 bool ends_in_call;
484 /* The probability and frequency of the best edge. */
485 int best_prob = INT_MIN / 2;
486 int best_freq = INT_MIN / 2;
488 best_edge = NULL;
489 mark_bb_visited (bb, *n_traces);
490 trace->length++;
492 if (dump_file)
493 fprintf (dump_file, "Basic block %d was visited in trace %d\n",
494 bb->index, *n_traces - 1);
496 ends_in_call = block_ends_with_call_p (bb);
498 /* Select the successor that will be placed after BB. */
499 FOR_EACH_EDGE (e, ei, bb->succs)
501 gcc_assert (!(e->flags & EDGE_FAKE));
503 if (e->dest == EXIT_BLOCK_PTR)
504 continue;
506 if (e->dest->il.rtl->visited
507 && e->dest->il.rtl->visited != *n_traces)
508 continue;
510 if (BB_PARTITION (e->dest) != BB_PARTITION (bb))
511 continue;
513 prob = e->probability;
514 freq = e->dest->frequency;
516 /* The only sensible preference for a call instruction is the
517 fallthru edge. Don't bother selecting anything else. */
518 if (ends_in_call)
520 if (e->flags & EDGE_CAN_FALLTHRU)
522 best_edge = e;
523 best_prob = prob;
524 best_freq = freq;
526 continue;
529 /* Edge that cannot be fallthru or improbable or infrequent
530 successor (i.e. it is unsuitable successor). */
531 if (!(e->flags & EDGE_CAN_FALLTHRU) || (e->flags & EDGE_COMPLEX)
532 || prob < branch_th || EDGE_FREQUENCY (e) < exec_th
533 || e->count < count_th)
534 continue;
536 /* If partitioning hot/cold basic blocks, don't consider edges
537 that cross section boundaries. */
539 if (better_edge_p (bb, e, prob, freq, best_prob, best_freq,
540 best_edge))
542 best_edge = e;
543 best_prob = prob;
544 best_freq = freq;
548 /* If the best destination has multiple predecessors, and can be
549 duplicated cheaper than a jump, don't allow it to be added
550 to a trace. We'll duplicate it when connecting traces. */
551 if (best_edge && EDGE_COUNT (best_edge->dest->preds) >= 2
552 && copy_bb_p (best_edge->dest, 0))
553 best_edge = NULL;
555 /* Add all non-selected successors to the heaps. */
556 FOR_EACH_EDGE (e, ei, bb->succs)
558 if (e == best_edge
559 || e->dest == EXIT_BLOCK_PTR
560 || e->dest->il.rtl->visited)
561 continue;
563 key = bb_to_key (e->dest);
565 if (bbd[e->dest->index].heap)
567 /* E->DEST is already in some heap. */
568 if (key != bbd[e->dest->index].node->key)
570 if (dump_file)
572 fprintf (dump_file,
573 "Changing key for bb %d from %ld to %ld.\n",
574 e->dest->index,
575 (long) bbd[e->dest->index].node->key,
576 key);
578 fibheap_replace_key (bbd[e->dest->index].heap,
579 bbd[e->dest->index].node, key);
582 else
584 fibheap_t which_heap = *heap;
586 prob = e->probability;
587 freq = EDGE_FREQUENCY (e);
589 if (!(e->flags & EDGE_CAN_FALLTHRU)
590 || (e->flags & EDGE_COMPLEX)
591 || prob < branch_th || freq < exec_th
592 || e->count < count_th)
594 /* When partitioning hot/cold basic blocks, make sure
595 the cold blocks (and only the cold blocks) all get
596 pushed to the last round of trace collection. */
598 if (push_to_next_round_p (e->dest, round,
599 number_of_rounds,
600 exec_th, count_th))
601 which_heap = new_heap;
604 bbd[e->dest->index].heap = which_heap;
605 bbd[e->dest->index].node = fibheap_insert (which_heap,
606 key, e->dest);
608 if (dump_file)
610 fprintf (dump_file,
611 " Possible start of %s round: %d (key: %ld)\n",
612 (which_heap == new_heap) ? "next" : "this",
613 e->dest->index, (long) key);
619 if (best_edge) /* Suitable successor was found. */
621 if (best_edge->dest->il.rtl->visited == *n_traces)
623 /* We do nothing with one basic block loops. */
624 if (best_edge->dest != bb)
626 if (EDGE_FREQUENCY (best_edge)
627 > 4 * best_edge->dest->frequency / 5)
629 /* The loop has at least 4 iterations. If the loop
630 header is not the first block of the function
631 we can rotate the loop. */
633 if (best_edge->dest != ENTRY_BLOCK_PTR->next_bb)
635 if (dump_file)
637 fprintf (dump_file,
638 "Rotating loop %d - %d\n",
639 best_edge->dest->index, bb->index);
641 bb->aux = best_edge->dest;
642 bbd[best_edge->dest->index].in_trace =
643 (*n_traces) - 1;
644 bb = rotate_loop (best_edge, trace, *n_traces);
647 else
649 /* The loop has less than 4 iterations. */
651 if (single_succ_p (bb)
652 && copy_bb_p (best_edge->dest, !optimize_size))
654 bb = copy_bb (best_edge->dest, best_edge, bb,
655 *n_traces);
656 trace->length++;
661 /* Terminate the trace. */
662 break;
664 else
666 /* Check for a situation
674 where
675 EDGE_FREQUENCY (AB) + EDGE_FREQUENCY (BC)
676 >= EDGE_FREQUENCY (AC).
677 (i.e. 2 * B->frequency >= EDGE_FREQUENCY (AC) )
678 Best ordering is then A B C.
680 This situation is created for example by:
682 if (A) B;
687 FOR_EACH_EDGE (e, ei, bb->succs)
688 if (e != best_edge
689 && (e->flags & EDGE_CAN_FALLTHRU)
690 && !(e->flags & EDGE_COMPLEX)
691 && !e->dest->il.rtl->visited
692 && single_pred_p (e->dest)
693 && !(e->flags & EDGE_CROSSING)
694 && single_succ_p (e->dest)
695 && (single_succ_edge (e->dest)->flags
696 & EDGE_CAN_FALLTHRU)
697 && !(single_succ_edge (e->dest)->flags & EDGE_COMPLEX)
698 && single_succ (e->dest) == best_edge->dest
699 && 2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge))
701 best_edge = e;
702 if (dump_file)
703 fprintf (dump_file, "Selecting BB %d\n",
704 best_edge->dest->index);
705 break;
708 bb->aux = best_edge->dest;
709 bbd[best_edge->dest->index].in_trace = (*n_traces) - 1;
710 bb = best_edge->dest;
714 while (best_edge);
715 trace->last = bb;
716 bbd[trace->first->index].start_of_trace = *n_traces - 1;
717 bbd[trace->last->index].end_of_trace = *n_traces - 1;
719 /* The trace is terminated so we have to recount the keys in heap
720 (some block can have a lower key because now one of its predecessors
721 is an end of the trace). */
722 FOR_EACH_EDGE (e, ei, bb->succs)
724 if (e->dest == EXIT_BLOCK_PTR
725 || e->dest->il.rtl->visited)
726 continue;
728 if (bbd[e->dest->index].heap)
730 key = bb_to_key (e->dest);
731 if (key != bbd[e->dest->index].node->key)
733 if (dump_file)
735 fprintf (dump_file,
736 "Changing key for bb %d from %ld to %ld.\n",
737 e->dest->index,
738 (long) bbd[e->dest->index].node->key, key);
740 fibheap_replace_key (bbd[e->dest->index].heap,
741 bbd[e->dest->index].node,
742 key);
748 fibheap_delete (*heap);
750 /* "Return" the new heap. */
751 *heap = new_heap;
754 /* Create a duplicate of the basic block OLD_BB and redirect edge E to it, add
755 it to trace after BB, mark OLD_BB visited and update pass' data structures
756 (TRACE is a number of trace which OLD_BB is duplicated to). */
758 static basic_block
759 copy_bb (basic_block old_bb, edge e, basic_block bb, int trace)
761 basic_block new_bb;
763 new_bb = duplicate_block (old_bb, e, bb);
764 BB_COPY_PARTITION (new_bb, old_bb);
766 gcc_assert (e->dest == new_bb);
767 gcc_assert (!e->dest->il.rtl->visited);
769 if (dump_file)
770 fprintf (dump_file,
771 "Duplicated bb %d (created bb %d)\n",
772 old_bb->index, new_bb->index);
773 new_bb->il.rtl->visited = trace;
774 new_bb->aux = bb->aux;
775 bb->aux = new_bb;
777 if (new_bb->index >= array_size || last_basic_block > array_size)
779 int i;
780 int new_size;
782 new_size = MAX (last_basic_block, new_bb->index + 1);
783 new_size = GET_ARRAY_SIZE (new_size);
784 bbd = xrealloc (bbd, new_size * sizeof (bbro_basic_block_data));
785 for (i = array_size; i < new_size; i++)
787 bbd[i].start_of_trace = -1;
788 bbd[i].in_trace = -1;
789 bbd[i].end_of_trace = -1;
790 bbd[i].heap = NULL;
791 bbd[i].node = NULL;
793 array_size = new_size;
795 if (dump_file)
797 fprintf (dump_file,
798 "Growing the dynamic array to %d elements.\n",
799 array_size);
803 bbd[new_bb->index].in_trace = trace;
805 return new_bb;
808 /* Compute and return the key (for the heap) of the basic block BB. */
810 static fibheapkey_t
811 bb_to_key (basic_block bb)
813 edge e;
814 edge_iterator ei;
815 int priority = 0;
817 /* Do not start in probably never executed blocks. */
819 if (BB_PARTITION (bb) == BB_COLD_PARTITION
820 || probably_never_executed_bb_p (bb))
821 return BB_FREQ_MAX;
823 /* Prefer blocks whose predecessor is an end of some trace
824 or whose predecessor edge is EDGE_DFS_BACK. */
825 FOR_EACH_EDGE (e, ei, bb->preds)
827 if ((e->src != ENTRY_BLOCK_PTR && bbd[e->src->index].end_of_trace >= 0)
828 || (e->flags & EDGE_DFS_BACK))
830 int edge_freq = EDGE_FREQUENCY (e);
832 if (edge_freq > priority)
833 priority = edge_freq;
837 if (priority)
838 /* The block with priority should have significantly lower key. */
839 return -(100 * BB_FREQ_MAX + 100 * priority + bb->frequency);
840 return -bb->frequency;
843 /* Return true when the edge E from basic block BB is better than the temporary
844 best edge (details are in function). The probability of edge E is PROB. The
845 frequency of the successor is FREQ. The current best probability is
846 BEST_PROB, the best frequency is BEST_FREQ.
847 The edge is considered to be equivalent when PROB does not differ much from
848 BEST_PROB; similarly for frequency. */
850 static bool
851 better_edge_p (basic_block bb, edge e, int prob, int freq, int best_prob,
852 int best_freq, edge cur_best_edge)
854 bool is_better_edge;
856 /* The BEST_* values do not have to be best, but can be a bit smaller than
857 maximum values. */
858 int diff_prob = best_prob / 10;
859 int diff_freq = best_freq / 10;
861 if (prob > best_prob + diff_prob)
862 /* The edge has higher probability than the temporary best edge. */
863 is_better_edge = true;
864 else if (prob < best_prob - diff_prob)
865 /* The edge has lower probability than the temporary best edge. */
866 is_better_edge = false;
867 else if (freq < best_freq - diff_freq)
868 /* The edge and the temporary best edge have almost equivalent
869 probabilities. The higher frequency of a successor now means
870 that there is another edge going into that successor.
871 This successor has lower frequency so it is better. */
872 is_better_edge = true;
873 else if (freq > best_freq + diff_freq)
874 /* This successor has higher frequency so it is worse. */
875 is_better_edge = false;
876 else if (e->dest->prev_bb == bb)
877 /* The edges have equivalent probabilities and the successors
878 have equivalent frequencies. Select the previous successor. */
879 is_better_edge = true;
880 else
881 is_better_edge = false;
883 /* If we are doing hot/cold partitioning, make sure that we always favor
884 non-crossing edges over crossing edges. */
886 if (!is_better_edge
887 && flag_reorder_blocks_and_partition
888 && cur_best_edge
889 && (cur_best_edge->flags & EDGE_CROSSING)
890 && !(e->flags & EDGE_CROSSING))
891 is_better_edge = true;
893 return is_better_edge;
896 /* Connect traces in array TRACES, N_TRACES is the count of traces. */
898 static void
899 connect_traces (int n_traces, struct trace *traces)
901 int i;
902 bool *connected;
903 bool two_passes;
904 int last_trace;
905 int current_pass;
906 int current_partition;
907 int freq_threshold;
908 gcov_type count_threshold;
910 freq_threshold = max_entry_frequency * DUPLICATION_THRESHOLD / 1000;
911 if (max_entry_count < INT_MAX / 1000)
912 count_threshold = max_entry_count * DUPLICATION_THRESHOLD / 1000;
913 else
914 count_threshold = max_entry_count / 1000 * DUPLICATION_THRESHOLD;
916 connected = XCNEWVEC (bool, n_traces);
917 last_trace = -1;
918 current_pass = 1;
919 current_partition = BB_PARTITION (traces[0].first);
920 two_passes = false;
922 if (flag_reorder_blocks_and_partition)
923 for (i = 0; i < n_traces && !two_passes; i++)
924 if (BB_PARTITION (traces[0].first)
925 != BB_PARTITION (traces[i].first))
926 two_passes = true;
928 for (i = 0; i < n_traces || (two_passes && current_pass == 1) ; i++)
930 int t = i;
931 int t2;
932 edge e, best;
933 int best_len;
935 if (i >= n_traces)
937 gcc_assert (two_passes && current_pass == 1);
938 i = 0;
939 t = i;
940 current_pass = 2;
941 if (current_partition == BB_HOT_PARTITION)
942 current_partition = BB_COLD_PARTITION;
943 else
944 current_partition = BB_HOT_PARTITION;
947 if (connected[t])
948 continue;
950 if (two_passes
951 && BB_PARTITION (traces[t].first) != current_partition)
952 continue;
954 connected[t] = true;
956 /* Find the predecessor traces. */
957 for (t2 = t; t2 > 0;)
959 edge_iterator ei;
960 best = NULL;
961 best_len = 0;
962 FOR_EACH_EDGE (e, ei, traces[t2].first->preds)
964 int si = e->src->index;
966 if (e->src != ENTRY_BLOCK_PTR
967 && (e->flags & EDGE_CAN_FALLTHRU)
968 && !(e->flags & EDGE_COMPLEX)
969 && bbd[si].end_of_trace >= 0
970 && !connected[bbd[si].end_of_trace]
971 && (BB_PARTITION (e->src) == current_partition)
972 && (!best
973 || e->probability > best->probability
974 || (e->probability == best->probability
975 && traces[bbd[si].end_of_trace].length > best_len)))
977 best = e;
978 best_len = traces[bbd[si].end_of_trace].length;
981 if (best)
983 best->src->aux = best->dest;
984 t2 = bbd[best->src->index].end_of_trace;
985 connected[t2] = true;
987 if (dump_file)
989 fprintf (dump_file, "Connection: %d %d\n",
990 best->src->index, best->dest->index);
993 else
994 break;
997 if (last_trace >= 0)
998 traces[last_trace].last->aux = traces[t2].first;
999 last_trace = t;
1001 /* Find the successor traces. */
1002 while (1)
1004 /* Find the continuation of the chain. */
1005 edge_iterator ei;
1006 best = NULL;
1007 best_len = 0;
1008 FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1010 int di = e->dest->index;
1012 if (e->dest != EXIT_BLOCK_PTR
1013 && (e->flags & EDGE_CAN_FALLTHRU)
1014 && !(e->flags & EDGE_COMPLEX)
1015 && bbd[di].start_of_trace >= 0
1016 && !connected[bbd[di].start_of_trace]
1017 && (BB_PARTITION (e->dest) == current_partition)
1018 && (!best
1019 || e->probability > best->probability
1020 || (e->probability == best->probability
1021 && traces[bbd[di].start_of_trace].length > best_len)))
1023 best = e;
1024 best_len = traces[bbd[di].start_of_trace].length;
1028 if (best)
1030 if (dump_file)
1032 fprintf (dump_file, "Connection: %d %d\n",
1033 best->src->index, best->dest->index);
1035 t = bbd[best->dest->index].start_of_trace;
1036 traces[last_trace].last->aux = traces[t].first;
1037 connected[t] = true;
1038 last_trace = t;
1040 else
1042 /* Try to connect the traces by duplication of 1 block. */
1043 edge e2;
1044 basic_block next_bb = NULL;
1045 bool try_copy = false;
1047 FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1048 if (e->dest != EXIT_BLOCK_PTR
1049 && (e->flags & EDGE_CAN_FALLTHRU)
1050 && !(e->flags & EDGE_COMPLEX)
1051 && (!best || e->probability > best->probability))
1053 edge_iterator ei;
1054 edge best2 = NULL;
1055 int best2_len = 0;
1057 /* If the destination is a start of a trace which is only
1058 one block long, then no need to search the successor
1059 blocks of the trace. Accept it. */
1060 if (bbd[e->dest->index].start_of_trace >= 0
1061 && traces[bbd[e->dest->index].start_of_trace].length
1062 == 1)
1064 best = e;
1065 try_copy = true;
1066 continue;
1069 FOR_EACH_EDGE (e2, ei, e->dest->succs)
1071 int di = e2->dest->index;
1073 if (e2->dest == EXIT_BLOCK_PTR
1074 || ((e2->flags & EDGE_CAN_FALLTHRU)
1075 && !(e2->flags & EDGE_COMPLEX)
1076 && bbd[di].start_of_trace >= 0
1077 && !connected[bbd[di].start_of_trace]
1078 && (BB_PARTITION (e2->dest) == current_partition)
1079 && (EDGE_FREQUENCY (e2) >= freq_threshold)
1080 && (e2->count >= count_threshold)
1081 && (!best2
1082 || e2->probability > best2->probability
1083 || (e2->probability == best2->probability
1084 && traces[bbd[di].start_of_trace].length
1085 > best2_len))))
1087 best = e;
1088 best2 = e2;
1089 if (e2->dest != EXIT_BLOCK_PTR)
1090 best2_len = traces[bbd[di].start_of_trace].length;
1091 else
1092 best2_len = INT_MAX;
1093 next_bb = e2->dest;
1094 try_copy = true;
1099 if (flag_reorder_blocks_and_partition)
1100 try_copy = false;
1102 /* Copy tiny blocks always; copy larger blocks only when the
1103 edge is traversed frequently enough. */
1104 if (try_copy
1105 && copy_bb_p (best->dest,
1106 !optimize_size
1107 && EDGE_FREQUENCY (best) >= freq_threshold
1108 && best->count >= count_threshold))
1110 basic_block new_bb;
1112 if (dump_file)
1114 fprintf (dump_file, "Connection: %d %d ",
1115 traces[t].last->index, best->dest->index);
1116 if (!next_bb)
1117 fputc ('\n', dump_file);
1118 else if (next_bb == EXIT_BLOCK_PTR)
1119 fprintf (dump_file, "exit\n");
1120 else
1121 fprintf (dump_file, "%d\n", next_bb->index);
1124 new_bb = copy_bb (best->dest, best, traces[t].last, t);
1125 traces[t].last = new_bb;
1126 if (next_bb && next_bb != EXIT_BLOCK_PTR)
1128 t = bbd[next_bb->index].start_of_trace;
1129 traces[last_trace].last->aux = traces[t].first;
1130 connected[t] = true;
1131 last_trace = t;
1133 else
1134 break; /* Stop finding the successor traces. */
1136 else
1137 break; /* Stop finding the successor traces. */
1142 if (dump_file)
1144 basic_block bb;
1146 fprintf (dump_file, "Final order:\n");
1147 for (bb = traces[0].first; bb; bb = bb->aux)
1148 fprintf (dump_file, "%d ", bb->index);
1149 fprintf (dump_file, "\n");
1150 fflush (dump_file);
1153 FREE (connected);
1156 /* Return true when BB can and should be copied. CODE_MAY_GROW is true
1157 when code size is allowed to grow by duplication. */
1159 static bool
1160 copy_bb_p (basic_block bb, int code_may_grow)
1162 int size = 0;
1163 int max_size = uncond_jump_length;
1164 rtx insn;
1166 if (!bb->frequency)
1167 return false;
1168 if (EDGE_COUNT (bb->preds) < 2)
1169 return false;
1170 if (!can_duplicate_block_p (bb))
1171 return false;
1173 /* Avoid duplicating blocks which have many successors (PR/13430). */
1174 if (EDGE_COUNT (bb->succs) > 8)
1175 return false;
1177 if (code_may_grow && maybe_hot_bb_p (bb))
1178 max_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
1180 FOR_BB_INSNS (bb, insn)
1182 if (INSN_P (insn))
1183 size += get_attr_min_length (insn);
1186 if (size <= max_size)
1187 return true;
1189 if (dump_file)
1191 fprintf (dump_file,
1192 "Block %d can't be copied because its size = %d.\n",
1193 bb->index, size);
1196 return false;
1199 /* Return the length of unconditional jump instruction. */
1201 static int
1202 get_uncond_jump_length (void)
1204 rtx label, jump;
1205 int length;
1207 label = emit_label_before (gen_label_rtx (), get_insns ());
1208 jump = emit_jump_insn (gen_jump (label));
1210 length = get_attr_min_length (jump);
1212 delete_insn (jump);
1213 delete_insn (label);
1214 return length;
1217 /* Find the basic blocks that are rarely executed and need to be moved to
1218 a separate section of the .o file (to cut down on paging and improve
1219 cache locality). */
1221 static void
1222 find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges,
1223 int *n_crossing_edges,
1224 int *max_idx)
1226 basic_block bb;
1227 bool has_hot_blocks = false;
1228 edge e;
1229 int i;
1230 edge_iterator ei;
1232 /* Mark which partition (hot/cold) each basic block belongs in. */
1234 FOR_EACH_BB (bb)
1236 if (probably_never_executed_bb_p (bb))
1237 BB_SET_PARTITION (bb, BB_COLD_PARTITION);
1238 else
1240 BB_SET_PARTITION (bb, BB_HOT_PARTITION);
1241 has_hot_blocks = true;
1245 /* Mark every edge that crosses between sections. */
1247 i = 0;
1248 FOR_EACH_BB (bb)
1249 FOR_EACH_EDGE (e, ei, bb->succs)
1251 if (e->src != ENTRY_BLOCK_PTR
1252 && e->dest != EXIT_BLOCK_PTR
1253 && BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1255 e->flags |= EDGE_CROSSING;
1256 if (i == *max_idx)
1258 *max_idx *= 2;
1259 crossing_edges = xrealloc (crossing_edges,
1260 (*max_idx) * sizeof (edge));
1262 crossing_edges[i++] = e;
1264 else
1265 e->flags &= ~EDGE_CROSSING;
1267 *n_crossing_edges = i;
1270 /* If any destination of a crossing edge does not have a label, add label;
1271 Convert any fall-through crossing edges (for blocks that do not contain
1272 a jump) to unconditional jumps. */
1274 static void
1275 add_labels_and_missing_jumps (edge *crossing_edges, int n_crossing_edges)
1277 int i;
1278 basic_block src;
1279 basic_block dest;
1280 rtx label;
1281 rtx barrier;
1282 rtx new_jump;
1284 for (i=0; i < n_crossing_edges; i++)
1286 if (crossing_edges[i])
1288 src = crossing_edges[i]->src;
1289 dest = crossing_edges[i]->dest;
1291 /* Make sure dest has a label. */
1293 if (dest && (dest != EXIT_BLOCK_PTR))
1295 label = block_label (dest);
1297 /* Make sure source block ends with a jump. */
1299 if (src && (src != ENTRY_BLOCK_PTR))
1301 if (!JUMP_P (BB_END (src)))
1302 /* bb just falls through. */
1304 /* make sure there's only one successor */
1305 gcc_assert (single_succ_p (src));
1307 /* Find label in dest block. */
1308 label = block_label (dest);
1310 new_jump = emit_jump_insn_after (gen_jump (label),
1311 BB_END (src));
1312 barrier = emit_barrier_after (new_jump);
1313 JUMP_LABEL (new_jump) = label;
1314 LABEL_NUSES (label) += 1;
1315 src->il.rtl->footer = unlink_insn_chain (barrier, barrier);
1316 /* Mark edge as non-fallthru. */
1317 crossing_edges[i]->flags &= ~EDGE_FALLTHRU;
1318 } /* end: 'if (GET_CODE ... ' */
1319 } /* end: 'if (src && src->index...' */
1320 } /* end: 'if (dest && dest->index...' */
1321 } /* end: 'if (crossing_edges[i]...' */
1322 } /* end for loop */
1325 /* Find any bb's where the fall-through edge is a crossing edge (note that
1326 these bb's must also contain a conditional jump; we've already
1327 dealt with fall-through edges for blocks that didn't have a
1328 conditional jump in the call to add_labels_and_missing_jumps).
1329 Convert the fall-through edge to non-crossing edge by inserting a
1330 new bb to fall-through into. The new bb will contain an
1331 unconditional jump (crossing edge) to the original fall through
1332 destination. */
1334 static void
1335 fix_up_fall_thru_edges (void)
1337 basic_block cur_bb;
1338 basic_block new_bb;
1339 edge succ1;
1340 edge succ2;
1341 edge fall_thru;
1342 edge cond_jump = NULL;
1343 edge e;
1344 bool cond_jump_crosses;
1345 int invert_worked;
1346 rtx old_jump;
1347 rtx fall_thru_label;
1348 rtx barrier;
1350 FOR_EACH_BB (cur_bb)
1352 fall_thru = NULL;
1353 if (EDGE_COUNT (cur_bb->succs) > 0)
1354 succ1 = EDGE_SUCC (cur_bb, 0);
1355 else
1356 succ1 = NULL;
1358 if (EDGE_COUNT (cur_bb->succs) > 1)
1359 succ2 = EDGE_SUCC (cur_bb, 1);
1360 else
1361 succ2 = NULL;
1363 /* Find the fall-through edge. */
1365 if (succ1
1366 && (succ1->flags & EDGE_FALLTHRU))
1368 fall_thru = succ1;
1369 cond_jump = succ2;
1371 else if (succ2
1372 && (succ2->flags & EDGE_FALLTHRU))
1374 fall_thru = succ2;
1375 cond_jump = succ1;
1378 if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR))
1380 /* Check to see if the fall-thru edge is a crossing edge. */
1382 if (fall_thru->flags & EDGE_CROSSING)
1384 /* The fall_thru edge crosses; now check the cond jump edge, if
1385 it exists. */
1387 cond_jump_crosses = true;
1388 invert_worked = 0;
1389 old_jump = BB_END (cur_bb);
1391 /* Find the jump instruction, if there is one. */
1393 if (cond_jump)
1395 if (!(cond_jump->flags & EDGE_CROSSING))
1396 cond_jump_crosses = false;
1398 /* We know the fall-thru edge crosses; if the cond
1399 jump edge does NOT cross, and its destination is the
1400 next block in the bb order, invert the jump
1401 (i.e. fix it so the fall thru does not cross and
1402 the cond jump does). */
1404 if (!cond_jump_crosses
1405 && cur_bb->aux == cond_jump->dest)
1407 /* Find label in fall_thru block. We've already added
1408 any missing labels, so there must be one. */
1410 fall_thru_label = block_label (fall_thru->dest);
1412 if (old_jump && fall_thru_label)
1413 invert_worked = invert_jump (old_jump,
1414 fall_thru_label,0);
1415 if (invert_worked)
1417 fall_thru->flags &= ~EDGE_FALLTHRU;
1418 cond_jump->flags |= EDGE_FALLTHRU;
1419 update_br_prob_note (cur_bb);
1420 e = fall_thru;
1421 fall_thru = cond_jump;
1422 cond_jump = e;
1423 cond_jump->flags |= EDGE_CROSSING;
1424 fall_thru->flags &= ~EDGE_CROSSING;
1429 if (cond_jump_crosses || !invert_worked)
1431 /* This is the case where both edges out of the basic
1432 block are crossing edges. Here we will fix up the
1433 fall through edge. The jump edge will be taken care
1434 of later. */
1436 new_bb = force_nonfallthru (fall_thru);
1438 if (new_bb)
1440 new_bb->aux = cur_bb->aux;
1441 cur_bb->aux = new_bb;
1443 /* Make sure new fall-through bb is in same
1444 partition as bb it's falling through from. */
1446 BB_COPY_PARTITION (new_bb, cur_bb);
1447 single_succ_edge (new_bb)->flags |= EDGE_CROSSING;
1450 /* Add barrier after new jump */
1452 if (new_bb)
1454 barrier = emit_barrier_after (BB_END (new_bb));
1455 new_bb->il.rtl->footer = unlink_insn_chain (barrier,
1456 barrier);
1458 else
1460 barrier = emit_barrier_after (BB_END (cur_bb));
1461 cur_bb->il.rtl->footer = unlink_insn_chain (barrier,
1462 barrier);
1470 /* This function checks the destination blockof a "crossing jump" to
1471 see if it has any crossing predecessors that begin with a code label
1472 and end with an unconditional jump. If so, it returns that predecessor
1473 block. (This is to avoid creating lots of new basic blocks that all
1474 contain unconditional jumps to the same destination). */
1476 static basic_block
1477 find_jump_block (basic_block jump_dest)
1479 basic_block source_bb = NULL;
1480 edge e;
1481 rtx insn;
1482 edge_iterator ei;
1484 FOR_EACH_EDGE (e, ei, jump_dest->preds)
1485 if (e->flags & EDGE_CROSSING)
1487 basic_block src = e->src;
1489 /* Check each predecessor to see if it has a label, and contains
1490 only one executable instruction, which is an unconditional jump.
1491 If so, we can use it. */
1493 if (LABEL_P (BB_HEAD (src)))
1494 for (insn = BB_HEAD (src);
1495 !INSN_P (insn) && insn != NEXT_INSN (BB_END (src));
1496 insn = NEXT_INSN (insn))
1498 if (INSN_P (insn)
1499 && insn == BB_END (src)
1500 && JUMP_P (insn)
1501 && !any_condjump_p (insn))
1503 source_bb = src;
1504 break;
1508 if (source_bb)
1509 break;
1512 return source_bb;
1515 /* Find all BB's with conditional jumps that are crossing edges;
1516 insert a new bb and make the conditional jump branch to the new
1517 bb instead (make the new bb same color so conditional branch won't
1518 be a 'crossing' edge). Insert an unconditional jump from the
1519 new bb to the original destination of the conditional jump. */
1521 static void
1522 fix_crossing_conditional_branches (void)
1524 basic_block cur_bb;
1525 basic_block new_bb;
1526 basic_block last_bb;
1527 basic_block dest;
1528 basic_block prev_bb;
1529 edge succ1;
1530 edge succ2;
1531 edge crossing_edge;
1532 edge new_edge;
1533 rtx old_jump;
1534 rtx set_src;
1535 rtx old_label = NULL_RTX;
1536 rtx new_label;
1537 rtx new_jump;
1538 rtx barrier;
1540 last_bb = EXIT_BLOCK_PTR->prev_bb;
1542 FOR_EACH_BB (cur_bb)
1544 crossing_edge = NULL;
1545 if (EDGE_COUNT (cur_bb->succs) > 0)
1546 succ1 = EDGE_SUCC (cur_bb, 0);
1547 else
1548 succ1 = NULL;
1550 if (EDGE_COUNT (cur_bb->succs) > 1)
1551 succ2 = EDGE_SUCC (cur_bb, 1);
1552 else
1553 succ2 = NULL;
1555 /* We already took care of fall-through edges, so only one successor
1556 can be a crossing edge. */
1558 if (succ1 && (succ1->flags & EDGE_CROSSING))
1559 crossing_edge = succ1;
1560 else if (succ2 && (succ2->flags & EDGE_CROSSING))
1561 crossing_edge = succ2;
1563 if (crossing_edge)
1565 old_jump = BB_END (cur_bb);
1567 /* Check to make sure the jump instruction is a
1568 conditional jump. */
1570 set_src = NULL_RTX;
1572 if (any_condjump_p (old_jump))
1574 if (GET_CODE (PATTERN (old_jump)) == SET)
1575 set_src = SET_SRC (PATTERN (old_jump));
1576 else if (GET_CODE (PATTERN (old_jump)) == PARALLEL)
1578 set_src = XVECEXP (PATTERN (old_jump), 0,0);
1579 if (GET_CODE (set_src) == SET)
1580 set_src = SET_SRC (set_src);
1581 else
1582 set_src = NULL_RTX;
1586 if (set_src && (GET_CODE (set_src) == IF_THEN_ELSE))
1588 if (GET_CODE (XEXP (set_src, 1)) == PC)
1589 old_label = XEXP (set_src, 2);
1590 else if (GET_CODE (XEXP (set_src, 2)) == PC)
1591 old_label = XEXP (set_src, 1);
1593 /* Check to see if new bb for jumping to that dest has
1594 already been created; if so, use it; if not, create
1595 a new one. */
1597 new_bb = find_jump_block (crossing_edge->dest);
1599 if (new_bb)
1600 new_label = block_label (new_bb);
1601 else
1603 /* Create new basic block to be dest for
1604 conditional jump. */
1606 new_bb = create_basic_block (NULL, NULL, last_bb);
1607 new_bb->aux = last_bb->aux;
1608 last_bb->aux = new_bb;
1609 prev_bb = last_bb;
1610 last_bb = new_bb;
1611 /* Put appropriate instructions in new bb. */
1613 new_label = gen_label_rtx ();
1614 emit_label_before (new_label, BB_HEAD (new_bb));
1615 BB_HEAD (new_bb) = new_label;
1617 if (GET_CODE (old_label) == LABEL_REF)
1619 old_label = JUMP_LABEL (old_jump);
1620 new_jump = emit_jump_insn_after (gen_jump
1621 (old_label),
1622 BB_END (new_bb));
1624 else
1626 gcc_assert (HAVE_return
1627 && GET_CODE (old_label) == RETURN);
1628 new_jump = emit_jump_insn_after (gen_return (),
1629 BB_END (new_bb));
1632 barrier = emit_barrier_after (new_jump);
1633 JUMP_LABEL (new_jump) = old_label;
1634 new_bb->il.rtl->footer = unlink_insn_chain (barrier,
1635 barrier);
1637 /* Make sure new bb is in same partition as source
1638 of conditional branch. */
1639 BB_COPY_PARTITION (new_bb, cur_bb);
1642 /* Make old jump branch to new bb. */
1644 redirect_jump (old_jump, new_label, 0);
1646 /* Remove crossing_edge as predecessor of 'dest'. */
1648 dest = crossing_edge->dest;
1650 redirect_edge_succ (crossing_edge, new_bb);
1652 /* Make a new edge from new_bb to old dest; new edge
1653 will be a successor for new_bb and a predecessor
1654 for 'dest'. */
1656 if (EDGE_COUNT (new_bb->succs) == 0)
1657 new_edge = make_edge (new_bb, dest, 0);
1658 else
1659 new_edge = EDGE_SUCC (new_bb, 0);
1661 crossing_edge->flags &= ~EDGE_CROSSING;
1662 new_edge->flags |= EDGE_CROSSING;
1668 /* Find any unconditional branches that cross between hot and cold
1669 sections. Convert them into indirect jumps instead. */
1671 static void
1672 fix_crossing_unconditional_branches (void)
1674 basic_block cur_bb;
1675 rtx last_insn;
1676 rtx label;
1677 rtx label_addr;
1678 rtx indirect_jump_sequence;
1679 rtx jump_insn = NULL_RTX;
1680 rtx new_reg;
1681 rtx cur_insn;
1682 edge succ;
1684 FOR_EACH_BB (cur_bb)
1686 last_insn = BB_END (cur_bb);
1688 if (EDGE_COUNT (cur_bb->succs) < 1)
1689 continue;
1691 succ = EDGE_SUCC (cur_bb, 0);
1693 /* Check to see if bb ends in a crossing (unconditional) jump. At
1694 this point, no crossing jumps should be conditional. */
1696 if (JUMP_P (last_insn)
1697 && (succ->flags & EDGE_CROSSING))
1699 rtx label2, table;
1701 gcc_assert (!any_condjump_p (last_insn));
1703 /* Make sure the jump is not already an indirect or table jump. */
1705 if (!computed_jump_p (last_insn)
1706 && !tablejump_p (last_insn, &label2, &table))
1708 /* We have found a "crossing" unconditional branch. Now
1709 we must convert it to an indirect jump. First create
1710 reference of label, as target for jump. */
1712 label = JUMP_LABEL (last_insn);
1713 label_addr = gen_rtx_LABEL_REF (Pmode, label);
1714 LABEL_NUSES (label) += 1;
1716 /* Get a register to use for the indirect jump. */
1718 new_reg = gen_reg_rtx (Pmode);
1720 /* Generate indirect the jump sequence. */
1722 start_sequence ();
1723 emit_move_insn (new_reg, label_addr);
1724 emit_indirect_jump (new_reg);
1725 indirect_jump_sequence = get_insns ();
1726 end_sequence ();
1728 /* Make sure every instruction in the new jump sequence has
1729 its basic block set to be cur_bb. */
1731 for (cur_insn = indirect_jump_sequence; cur_insn;
1732 cur_insn = NEXT_INSN (cur_insn))
1734 if (!BARRIER_P (cur_insn))
1735 BLOCK_FOR_INSN (cur_insn) = cur_bb;
1736 if (JUMP_P (cur_insn))
1737 jump_insn = cur_insn;
1740 /* Insert the new (indirect) jump sequence immediately before
1741 the unconditional jump, then delete the unconditional jump. */
1743 emit_insn_before (indirect_jump_sequence, last_insn);
1744 delete_insn (last_insn);
1746 /* Make BB_END for cur_bb be the jump instruction (NOT the
1747 barrier instruction at the end of the sequence...). */
1749 BB_END (cur_bb) = jump_insn;
1755 /* Add REG_CROSSING_JUMP note to all crossing jump insns. */
1757 static void
1758 add_reg_crossing_jump_notes (void)
1760 basic_block bb;
1761 edge e;
1762 edge_iterator ei;
1764 FOR_EACH_BB (bb)
1765 FOR_EACH_EDGE (e, ei, bb->succs)
1766 if ((e->flags & EDGE_CROSSING)
1767 && JUMP_P (BB_END (e->src)))
1768 REG_NOTES (BB_END (e->src)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
1769 NULL_RTX,
1770 REG_NOTES (BB_END
1771 (e->src)));
1774 /* Hot and cold basic blocks are partitioned and put in separate
1775 sections of the .o file, to reduce paging and improve cache
1776 performance (hopefully). This can result in bits of code from the
1777 same function being widely separated in the .o file. However this
1778 is not obvious to the current bb structure. Therefore we must take
1779 care to ensure that: 1). There are no fall_thru edges that cross
1780 between sections; 2). For those architectures which have "short"
1781 conditional branches, all conditional branches that attempt to
1782 cross between sections are converted to unconditional branches;
1783 and, 3). For those architectures which have "short" unconditional
1784 branches, all unconditional branches that attempt to cross between
1785 sections are converted to indirect jumps.
1787 The code for fixing up fall_thru edges that cross between hot and
1788 cold basic blocks does so by creating new basic blocks containing
1789 unconditional branches to the appropriate label in the "other"
1790 section. The new basic block is then put in the same (hot or cold)
1791 section as the original conditional branch, and the fall_thru edge
1792 is modified to fall into the new basic block instead. By adding
1793 this level of indirection we end up with only unconditional branches
1794 crossing between hot and cold sections.
1796 Conditional branches are dealt with by adding a level of indirection.
1797 A new basic block is added in the same (hot/cold) section as the
1798 conditional branch, and the conditional branch is retargeted to the
1799 new basic block. The new basic block contains an unconditional branch
1800 to the original target of the conditional branch (in the other section).
1802 Unconditional branches are dealt with by converting them into
1803 indirect jumps. */
1805 static void
1806 fix_edges_for_rarely_executed_code (edge *crossing_edges,
1807 int n_crossing_edges)
1809 /* Make sure the source of any crossing edge ends in a jump and the
1810 destination of any crossing edge has a label. */
1812 add_labels_and_missing_jumps (crossing_edges, n_crossing_edges);
1814 /* Convert all crossing fall_thru edges to non-crossing fall
1815 thrus to unconditional jumps (that jump to the original fall
1816 thru dest). */
1818 fix_up_fall_thru_edges ();
1820 /* If the architecture does not have conditional branches that can
1821 span all of memory, convert crossing conditional branches into
1822 crossing unconditional branches. */
1824 if (!HAS_LONG_COND_BRANCH)
1825 fix_crossing_conditional_branches ();
1827 /* If the architecture does not have unconditional branches that
1828 can span all of memory, convert crossing unconditional branches
1829 into indirect jumps. Since adding an indirect jump also adds
1830 a new register usage, update the register usage information as
1831 well. */
1833 if (!HAS_LONG_UNCOND_BRANCH)
1834 fix_crossing_unconditional_branches ();
1836 add_reg_crossing_jump_notes ();
1839 /* Verify, in the basic block chain, that there is at most one switch
1840 between hot/cold partitions. This is modelled on
1841 rtl_verify_flow_info_1, but it cannot go inside that function
1842 because this condition will not be true until after
1843 reorder_basic_blocks is called. */
1845 static void
1846 verify_hot_cold_block_grouping (void)
1848 basic_block bb;
1849 int err = 0;
1850 bool switched_sections = false;
1851 int current_partition = 0;
1853 FOR_EACH_BB (bb)
1855 if (!current_partition)
1856 current_partition = BB_PARTITION (bb);
1857 if (BB_PARTITION (bb) != current_partition)
1859 if (switched_sections)
1861 error ("multiple hot/cold transitions found (bb %i)",
1862 bb->index);
1863 err = 1;
1865 else
1867 switched_sections = true;
1868 current_partition = BB_PARTITION (bb);
1873 gcc_assert(!err);
1876 /* Reorder basic blocks. The main entry point to this file. FLAGS is
1877 the set of flags to pass to cfg_layout_initialize(). */
1879 void
1880 reorder_basic_blocks (void)
1882 int n_traces;
1883 int i;
1884 struct trace *traces;
1886 gcc_assert (current_ir_type () == IR_RTL_CFGLAYOUT);
1888 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
1889 return;
1891 set_edge_can_fallthru_flag ();
1892 mark_dfs_back_edges ();
1894 /* We are estimating the length of uncond jump insn only once since the code
1895 for getting the insn length always returns the minimal length now. */
1896 if (uncond_jump_length == 0)
1897 uncond_jump_length = get_uncond_jump_length ();
1899 /* We need to know some information for each basic block. */
1900 array_size = GET_ARRAY_SIZE (last_basic_block);
1901 bbd = XNEWVEC (bbro_basic_block_data, array_size);
1902 for (i = 0; i < array_size; i++)
1904 bbd[i].start_of_trace = -1;
1905 bbd[i].in_trace = -1;
1906 bbd[i].end_of_trace = -1;
1907 bbd[i].heap = NULL;
1908 bbd[i].node = NULL;
1911 traces = XNEWVEC (struct trace, n_basic_blocks);
1912 n_traces = 0;
1913 find_traces (&n_traces, traces);
1914 connect_traces (n_traces, traces);
1915 FREE (traces);
1916 FREE (bbd);
1918 relink_block_chain (/*stay_in_cfglayout_mode=*/true);
1920 if (dump_file)
1921 dump_flow_info (dump_file, dump_flags);
1923 if (flag_reorder_blocks_and_partition)
1924 verify_hot_cold_block_grouping ();
1927 /* Determine which partition the first basic block in the function
1928 belongs to, then find the first basic block in the current function
1929 that belongs to a different section, and insert a
1930 NOTE_INSN_SWITCH_TEXT_SECTIONS note immediately before it in the
1931 instruction stream. When writing out the assembly code,
1932 encountering this note will make the compiler switch between the
1933 hot and cold text sections. */
1935 static void
1936 insert_section_boundary_note (void)
1938 basic_block bb;
1939 rtx new_note;
1940 int first_partition = 0;
1942 if (flag_reorder_blocks_and_partition)
1943 FOR_EACH_BB (bb)
1945 if (!first_partition)
1946 first_partition = BB_PARTITION (bb);
1947 if (BB_PARTITION (bb) != first_partition)
1949 new_note = emit_note_before (NOTE_INSN_SWITCH_TEXT_SECTIONS,
1950 BB_HEAD (bb));
1951 break;
1956 /* Duplicate the blocks containing computed gotos. This basically unfactors
1957 computed gotos that were factored early on in the compilation process to
1958 speed up edge based data flow. We used to not unfactoring them again,
1959 which can seriously pessimize code with many computed jumps in the source
1960 code, such as interpreters. See e.g. PR15242. */
1962 static bool
1963 gate_duplicate_computed_gotos (void)
1965 if (targetm.cannot_modify_jumps_p ())
1966 return false;
1967 return (optimize > 0 && flag_expensive_optimizations && !optimize_size);
1971 static unsigned int
1972 duplicate_computed_gotos (void)
1974 basic_block bb, new_bb;
1975 bitmap candidates;
1976 int max_size;
1978 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
1979 return 0;
1981 cfg_layout_initialize (0);
1983 /* We are estimating the length of uncond jump insn only once
1984 since the code for getting the insn length always returns
1985 the minimal length now. */
1986 if (uncond_jump_length == 0)
1987 uncond_jump_length = get_uncond_jump_length ();
1989 max_size = uncond_jump_length * PARAM_VALUE (PARAM_MAX_GOTO_DUPLICATION_INSNS);
1990 candidates = BITMAP_ALLOC (NULL);
1992 /* Look for blocks that end in a computed jump, and see if such blocks
1993 are suitable for unfactoring. If a block is a candidate for unfactoring,
1994 mark it in the candidates. */
1995 FOR_EACH_BB (bb)
1997 rtx insn;
1998 edge e;
1999 edge_iterator ei;
2000 int size, all_flags;
2002 /* Build the reorder chain for the original order of blocks. */
2003 if (bb->next_bb != EXIT_BLOCK_PTR)
2004 bb->aux = bb->next_bb;
2006 /* Obviously the block has to end in a computed jump. */
2007 if (!computed_jump_p (BB_END (bb)))
2008 continue;
2010 /* Only consider blocks that can be duplicated. */
2011 if (find_reg_note (BB_END (bb), REG_CROSSING_JUMP, NULL_RTX)
2012 || !can_duplicate_block_p (bb))
2013 continue;
2015 /* Make sure that the block is small enough. */
2016 size = 0;
2017 FOR_BB_INSNS (bb, insn)
2018 if (INSN_P (insn))
2020 size += get_attr_min_length (insn);
2021 if (size > max_size)
2022 break;
2024 if (size > max_size)
2025 continue;
2027 /* Final check: there must not be any incoming abnormal edges. */
2028 all_flags = 0;
2029 FOR_EACH_EDGE (e, ei, bb->preds)
2030 all_flags |= e->flags;
2031 if (all_flags & EDGE_COMPLEX)
2032 continue;
2034 bitmap_set_bit (candidates, bb->index);
2037 /* Nothing to do if there is no computed jump here. */
2038 if (bitmap_empty_p (candidates))
2039 goto done;
2041 /* Duplicate computed gotos. */
2042 FOR_EACH_BB (bb)
2044 if (bb->il.rtl->visited)
2045 continue;
2047 bb->il.rtl->visited = 1;
2049 /* BB must have one outgoing edge. That edge must not lead to
2050 the exit block or the next block.
2051 The destination must have more than one predecessor. */
2052 if (!single_succ_p (bb)
2053 || single_succ (bb) == EXIT_BLOCK_PTR
2054 || single_succ (bb) == bb->next_bb
2055 || single_pred_p (single_succ (bb)))
2056 continue;
2058 /* The successor block has to be a duplication candidate. */
2059 if (!bitmap_bit_p (candidates, single_succ (bb)->index))
2060 continue;
2062 new_bb = duplicate_block (single_succ (bb), single_succ_edge (bb), bb);
2063 new_bb->aux = bb->aux;
2064 bb->aux = new_bb;
2065 new_bb->il.rtl->visited = 1;
2068 done:
2069 cfg_layout_finalize ();
2071 BITMAP_FREE (candidates);
2072 return 0;
2075 struct tree_opt_pass pass_duplicate_computed_gotos =
2077 "compgotos", /* name */
2078 gate_duplicate_computed_gotos, /* gate */
2079 duplicate_computed_gotos, /* execute */
2080 NULL, /* sub */
2081 NULL, /* next */
2082 0, /* static_pass_number */
2083 TV_REORDER_BLOCKS, /* tv_id */
2084 0, /* properties_required */
2085 0, /* properties_provided */
2086 0, /* properties_destroyed */
2087 0, /* todo_flags_start */
2088 TODO_dump_func, /* todo_flags_finish */
2089 0 /* letter */
2093 /* This function is the main 'entrance' for the optimization that
2094 partitions hot and cold basic blocks into separate sections of the
2095 .o file (to improve performance and cache locality). Ideally it
2096 would be called after all optimizations that rearrange the CFG have
2097 been called. However part of this optimization may introduce new
2098 register usage, so it must be called before register allocation has
2099 occurred. This means that this optimization is actually called
2100 well before the optimization that reorders basic blocks (see
2101 function above).
2103 This optimization checks the feedback information to determine
2104 which basic blocks are hot/cold, updates flags on the basic blocks
2105 to indicate which section they belong in. This information is
2106 later used for writing out sections in the .o file. Because hot
2107 and cold sections can be arbitrarily large (within the bounds of
2108 memory), far beyond the size of a single function, it is necessary
2109 to fix up all edges that cross section boundaries, to make sure the
2110 instructions used can actually span the required distance. The
2111 fixes are described below.
2113 Fall-through edges must be changed into jumps; it is not safe or
2114 legal to fall through across a section boundary. Whenever a
2115 fall-through edge crossing a section boundary is encountered, a new
2116 basic block is inserted (in the same section as the fall-through
2117 source), and the fall through edge is redirected to the new basic
2118 block. The new basic block contains an unconditional jump to the
2119 original fall-through target. (If the unconditional jump is
2120 insufficient to cross section boundaries, that is dealt with a
2121 little later, see below).
2123 In order to deal with architectures that have short conditional
2124 branches (which cannot span all of memory) we take any conditional
2125 jump that attempts to cross a section boundary and add a level of
2126 indirection: it becomes a conditional jump to a new basic block, in
2127 the same section. The new basic block contains an unconditional
2128 jump to the original target, in the other section.
2130 For those architectures whose unconditional branch is also
2131 incapable of reaching all of memory, those unconditional jumps are
2132 converted into indirect jumps, through a register.
2134 IMPORTANT NOTE: This optimization causes some messy interactions
2135 with the cfg cleanup optimizations; those optimizations want to
2136 merge blocks wherever possible, and to collapse indirect jump
2137 sequences (change "A jumps to B jumps to C" directly into "A jumps
2138 to C"). Those optimizations can undo the jump fixes that
2139 partitioning is required to make (see above), in order to ensure
2140 that jumps attempting to cross section boundaries are really able
2141 to cover whatever distance the jump requires (on many architectures
2142 conditional or unconditional jumps are not able to reach all of
2143 memory). Therefore tests have to be inserted into each such
2144 optimization to make sure that it does not undo stuff necessary to
2145 cross partition boundaries. This would be much less of a problem
2146 if we could perform this optimization later in the compilation, but
2147 unfortunately the fact that we may need to create indirect jumps
2148 (through registers) requires that this optimization be performed
2149 before register allocation. */
2151 static void
2152 partition_hot_cold_basic_blocks (void)
2154 basic_block cur_bb;
2155 edge *crossing_edges;
2156 int n_crossing_edges;
2157 int max_edges = 2 * last_basic_block;
2159 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
2160 return;
2162 crossing_edges = XCNEWVEC (edge, max_edges);
2164 cfg_layout_initialize (0);
2166 FOR_EACH_BB (cur_bb)
2167 if (cur_bb->index >= NUM_FIXED_BLOCKS
2168 && cur_bb->next_bb->index >= NUM_FIXED_BLOCKS)
2169 cur_bb->aux = cur_bb->next_bb;
2171 find_rarely_executed_basic_blocks_and_crossing_edges (crossing_edges,
2172 &n_crossing_edges,
2173 &max_edges);
2175 if (n_crossing_edges > 0)
2176 fix_edges_for_rarely_executed_code (crossing_edges, n_crossing_edges);
2178 free (crossing_edges);
2180 cfg_layout_finalize ();
2183 static bool
2184 gate_handle_reorder_blocks (void)
2186 if (targetm.cannot_modify_jumps_p ())
2187 return false;
2188 return (optimize > 0);
2192 /* Reorder basic blocks. */
2193 static unsigned int
2194 rest_of_handle_reorder_blocks (void)
2196 basic_block bb;
2198 /* Last attempt to optimize CFG, as scheduling, peepholing and insn
2199 splitting possibly introduced more crossjumping opportunities. */
2200 cfg_layout_initialize (CLEANUP_EXPENSIVE);
2202 if (flag_sched2_use_traces && flag_schedule_insns_after_reload)
2204 timevar_push (TV_TRACER);
2205 tracer ();
2206 timevar_pop (TV_TRACER);
2209 if (flag_reorder_blocks || flag_reorder_blocks_and_partition)
2210 reorder_basic_blocks ();
2211 if (flag_reorder_blocks || flag_reorder_blocks_and_partition
2212 || (flag_sched2_use_traces && flag_schedule_insns_after_reload))
2213 cleanup_cfg (CLEANUP_EXPENSIVE);
2215 FOR_EACH_BB (bb)
2216 if (bb->next_bb != EXIT_BLOCK_PTR)
2217 bb->aux = bb->next_bb;
2218 cfg_layout_finalize ();
2220 /* Add NOTE_INSN_SWITCH_TEXT_SECTIONS notes. */
2221 insert_section_boundary_note ();
2222 return 0;
2225 struct tree_opt_pass pass_reorder_blocks =
2227 "bbro", /* name */
2228 gate_handle_reorder_blocks, /* gate */
2229 rest_of_handle_reorder_blocks, /* execute */
2230 NULL, /* sub */
2231 NULL, /* next */
2232 0, /* static_pass_number */
2233 TV_REORDER_BLOCKS, /* tv_id */
2234 0, /* properties_required */
2235 0, /* properties_provided */
2236 0, /* properties_destroyed */
2237 0, /* todo_flags_start */
2238 TODO_dump_func, /* todo_flags_finish */
2239 'B' /* letter */
2242 static bool
2243 gate_handle_partition_blocks (void)
2245 /* The optimization to partition hot/cold basic blocks into separate
2246 sections of the .o file does not work well with linkonce or with
2247 user defined section attributes. Don't call it if either case
2248 arises. */
2250 return (flag_reorder_blocks_and_partition
2251 && !DECL_ONE_ONLY (current_function_decl)
2252 && !user_defined_section_attribute);
2255 /* Partition hot and cold basic blocks. */
2256 static unsigned int
2257 rest_of_handle_partition_blocks (void)
2259 partition_hot_cold_basic_blocks ();
2260 return 0;
2263 struct tree_opt_pass pass_partition_blocks =
2265 "bbpart", /* name */
2266 gate_handle_partition_blocks, /* gate */
2267 rest_of_handle_partition_blocks, /* execute */
2268 NULL, /* sub */
2269 NULL, /* next */
2270 0, /* static_pass_number */
2271 TV_REORDER_BLOCKS, /* tv_id */
2272 0, /* properties_required */
2273 0, /* properties_provided */
2274 0, /* properties_destroyed */
2275 0, /* todo_flags_start */
2276 TODO_dump_func, /* todo_flags_finish */
2277 0 /* letter */