* cp-tree.h (note_decl_for_pch): New function.
[official-gcc.git] / gcc / bb-reorder.c
blob774affb76786f5b0d7b4568bd2e856658b776f52
1 /* Basic block reordering routines for the GNU compiler.
2 Copyright (C) 2000, 2002, 2003, 2004 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
19 02111-1307, USA. */
21 /* This (greedy) algorithm constructs traces in several rounds.
22 The construction starts from "seeds". The seed for the first round
23 is the entry point of function. When there are more than one seed
24 that one is selected first that has the lowest key in the heap
25 (see function bb_to_key). Then the algorithm repeatedly adds the most
26 probable successor to the end of a trace. Finally it connects the traces.
28 There are two parameters: Branch Threshold and Exec Threshold.
29 If the edge to a successor of the actual basic block is lower than
30 Branch Threshold or the frequency of the successor is lower than
31 Exec Threshold the successor will be the seed in one of the next rounds.
32 Each round has these parameters lower than the previous one.
33 The last round has to have these parameters set to zero
34 so that the remaining blocks are picked up.
36 The algorithm selects the most probable successor from all unvisited
37 successors and successors that have been added to this trace.
38 The other successors (that has not been "sent" to the next round) will be
39 other seeds for this round and the secondary traces will start in them.
40 If the successor has not been visited in this trace it is added to the trace
41 (however, there is some heuristic for simple branches).
42 If the successor has been visited in this trace the loop has been found.
43 If the loop has many iterations the loop is rotated so that the
44 source block of the most probable edge going out from the loop
45 is the last block of the trace.
46 If the loop has few iterations and there is no edge from the last block of
47 the loop going out from loop the loop header is duplicated.
48 Finally, the construction of the trace is terminated.
50 When connecting traces it first checks whether there is an edge from the
51 last block of one trace to the first block of another trace.
52 When there are still some unconnected traces it checks whether there exists
53 a basic block BB such that BB is a successor of the last bb of one trace
54 and BB is a predecessor of the first block of another trace. In this case,
55 BB is duplicated and the traces are connected through this duplicate.
56 The rest of traces are simply connected so there will be a jump to the
57 beginning of the rest of trace.
60 References:
62 "Software Trace Cache"
63 A. Ramirez, J. Larriba-Pey, C. Navarro, J. Torrellas and M. Valero; 1999
64 http://citeseer.nj.nec.com/15361.html
68 #include "config.h"
69 #include "system.h"
70 #include "coretypes.h"
71 #include "tm.h"
72 #include "rtl.h"
73 #include "regs.h"
74 #include "flags.h"
75 #include "timevar.h"
76 #include "output.h"
77 #include "cfglayout.h"
78 #include "fibheap.h"
79 #include "target.h"
80 #include "function.h"
81 #include "tm_p.h"
82 #include "obstack.h"
83 #include "expr.h"
85 /* The number of rounds. In most cases there will only be 4 rounds, but
86 when partitioning hot and cold basic blocks into separate sections of
87 the .o file there will be an extra round.*/
88 #define N_ROUNDS 5
90 /* Stubs in case we don't have a return insn.
91 We have to check at runtime too, not only compiletime. */
93 #ifndef HAVE_return
94 #define HAVE_return 0
95 #define gen_return() NULL_RTX
96 #endif
99 /* Branch thresholds in thousandths (per mille) of the REG_BR_PROB_BASE. */
100 static int branch_threshold[N_ROUNDS] = {400, 200, 100, 0, 0};
102 /* Exec thresholds in thousandths (per mille) of the frequency of bb 0. */
103 static int exec_threshold[N_ROUNDS] = {500, 200, 50, 0, 0};
105 /* If edge frequency is lower than DUPLICATION_THRESHOLD per mille of entry
106 block the edge destination is not duplicated while connecting traces. */
107 #define DUPLICATION_THRESHOLD 100
109 /* Length of unconditional jump instruction. */
110 static int uncond_jump_length;
112 /* Structure to hold needed information for each basic block. */
113 typedef struct bbro_basic_block_data_def
115 /* Which trace is the bb start of (-1 means it is not a start of a trace). */
116 int start_of_trace;
118 /* Which trace is the bb end of (-1 means it is not an end of a trace). */
119 int end_of_trace;
121 /* Which heap is BB in (if any)? */
122 fibheap_t heap;
124 /* Which heap node is BB in (if any)? */
125 fibnode_t node;
126 } bbro_basic_block_data;
128 /* The current size of the following dynamic array. */
129 static int array_size;
131 /* The array which holds needed information for basic blocks. */
132 static bbro_basic_block_data *bbd;
134 /* To avoid frequent reallocation the size of arrays is greater than needed,
135 the number of elements is (not less than) 1.25 * size_wanted. */
136 #define GET_ARRAY_SIZE(X) ((((X) / 4) + 1) * 5)
138 /* Free the memory and set the pointer to NULL. */
139 #define FREE(P) (gcc_assert (P), free (P), P = 0)
141 /* Structure for holding information about a trace. */
142 struct trace
144 /* First and last basic block of the trace. */
145 basic_block first, last;
147 /* The round of the STC creation which this trace was found in. */
148 int round;
150 /* The length (i.e. the number of basic blocks) of the trace. */
151 int length;
154 /* Maximum frequency and count of one of the entry blocks. */
155 int max_entry_frequency;
156 gcov_type max_entry_count;
158 /* Local function prototypes. */
159 static void find_traces (int *, struct trace *);
160 static basic_block rotate_loop (edge, struct trace *, int);
161 static void mark_bb_visited (basic_block, int);
162 static void find_traces_1_round (int, int, gcov_type, struct trace *, int *,
163 int, fibheap_t *, int);
164 static basic_block copy_bb (basic_block, edge, basic_block, int);
165 static fibheapkey_t bb_to_key (basic_block);
166 static bool better_edge_p (basic_block, edge, int, int, int, int, edge);
167 static void connect_traces (int, struct trace *);
168 static bool copy_bb_p (basic_block, int);
169 static int get_uncond_jump_length (void);
170 static bool push_to_next_round_p (basic_block, int, int, int, gcov_type);
171 static void add_unlikely_executed_notes (void);
172 static void find_rarely_executed_basic_blocks_and_crossing_edges (edge *,
173 int *,
174 int *);
175 static void mark_bb_for_unlikely_executed_section (basic_block);
176 static void add_labels_and_missing_jumps (edge *, int);
177 static void add_reg_crossing_jump_notes (void);
178 static void fix_up_fall_thru_edges (void);
179 static void fix_edges_for_rarely_executed_code (edge *, int);
180 static void fix_crossing_conditional_branches (void);
181 static void fix_crossing_unconditional_branches (void);
183 /* Check to see if bb should be pushed into the next round of trace
184 collections or not. Reasons for pushing the block forward are 1).
185 If the block is cold, we are doing partitioning, and there will be
186 another round (cold partition blocks are not supposed to be
187 collected into traces until the very last round); or 2). There will
188 be another round, and the basic block is not "hot enough" for the
189 current round of trace collection. */
191 static bool
192 push_to_next_round_p (basic_block bb, int round, int number_of_rounds,
193 int exec_th, gcov_type count_th)
195 bool there_exists_another_round;
196 bool cold_block;
197 bool block_not_hot_enough;
198 bool next_round_is_last;
200 there_exists_another_round = round < number_of_rounds - 1;
201 next_round_is_last = round + 1 == number_of_rounds - 1;
203 cold_block = (flag_reorder_blocks_and_partition
204 && BB_PARTITION (bb) == BB_COLD_PARTITION);
206 block_not_hot_enough = (bb->frequency < exec_th
207 || bb->count < count_th
208 || probably_never_executed_bb_p (bb));
210 if (flag_reorder_blocks_and_partition
211 && next_round_is_last
212 && BB_PARTITION (bb) != BB_COLD_PARTITION)
213 return false;
214 else if (there_exists_another_round
215 && (cold_block || block_not_hot_enough))
216 return true;
217 else
218 return false;
221 /* Find the traces for Software Trace Cache. Chain each trace through
222 RBI()->next. Store the number of traces to N_TRACES and description of
223 traces to TRACES. */
225 static void
226 find_traces (int *n_traces, struct trace *traces)
228 int i;
229 int number_of_rounds;
230 edge e;
231 edge_iterator ei;
232 fibheap_t heap;
234 /* Add one extra round of trace collection when partitioning hot/cold
235 basic blocks into separate sections. The last round is for all the
236 cold blocks (and ONLY the cold blocks). */
238 number_of_rounds = N_ROUNDS - 1;
239 if (flag_reorder_blocks_and_partition)
240 number_of_rounds = N_ROUNDS;
242 /* Insert entry points of function into heap. */
243 heap = fibheap_new ();
244 max_entry_frequency = 0;
245 max_entry_count = 0;
246 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
248 bbd[e->dest->index].heap = heap;
249 bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest),
250 e->dest);
251 if (e->dest->frequency > max_entry_frequency)
252 max_entry_frequency = e->dest->frequency;
253 if (e->dest->count > max_entry_count)
254 max_entry_count = e->dest->count;
257 /* Find the traces. */
258 for (i = 0; i < number_of_rounds; i++)
260 gcov_type count_threshold;
262 if (dump_file)
263 fprintf (dump_file, "STC - round %d\n", i + 1);
265 if (max_entry_count < INT_MAX / 1000)
266 count_threshold = max_entry_count * exec_threshold[i] / 1000;
267 else
268 count_threshold = max_entry_count / 1000 * exec_threshold[i];
270 find_traces_1_round (REG_BR_PROB_BASE * branch_threshold[i] / 1000,
271 max_entry_frequency * exec_threshold[i] / 1000,
272 count_threshold, traces, n_traces, i, &heap,
273 number_of_rounds);
275 fibheap_delete (heap);
277 if (dump_file)
279 for (i = 0; i < *n_traces; i++)
281 basic_block bb;
282 fprintf (dump_file, "Trace %d (round %d): ", i + 1,
283 traces[i].round + 1);
284 for (bb = traces[i].first; bb != traces[i].last; bb = bb->rbi->next)
285 fprintf (dump_file, "%d [%d] ", bb->index, bb->frequency);
286 fprintf (dump_file, "%d [%d]\n", bb->index, bb->frequency);
288 fflush (dump_file);
292 /* Rotate loop whose back edge is BACK_EDGE in the tail of trace TRACE
293 (with sequential number TRACE_N). */
295 static basic_block
296 rotate_loop (edge back_edge, struct trace *trace, int trace_n)
298 basic_block bb;
300 /* Information about the best end (end after rotation) of the loop. */
301 basic_block best_bb = NULL;
302 edge best_edge = NULL;
303 int best_freq = -1;
304 gcov_type best_count = -1;
305 /* The best edge is preferred when its destination is not visited yet
306 or is a start block of some trace. */
307 bool is_preferred = false;
309 /* Find the most frequent edge that goes out from current trace. */
310 bb = back_edge->dest;
313 edge e;
314 edge_iterator ei;
316 FOR_EACH_EDGE (e, ei, bb->succs)
317 if (e->dest != EXIT_BLOCK_PTR
318 && e->dest->rbi->visited != trace_n
319 && (e->flags & EDGE_CAN_FALLTHRU)
320 && !(e->flags & EDGE_COMPLEX))
322 if (is_preferred)
324 /* The best edge is preferred. */
325 if (!e->dest->rbi->visited
326 || bbd[e->dest->index].start_of_trace >= 0)
328 /* The current edge E is also preferred. */
329 int freq = EDGE_FREQUENCY (e);
330 if (freq > best_freq || e->count > best_count)
332 best_freq = freq;
333 best_count = e->count;
334 best_edge = e;
335 best_bb = bb;
339 else
341 if (!e->dest->rbi->visited
342 || bbd[e->dest->index].start_of_trace >= 0)
344 /* The current edge E is preferred. */
345 is_preferred = true;
346 best_freq = EDGE_FREQUENCY (e);
347 best_count = e->count;
348 best_edge = e;
349 best_bb = bb;
351 else
353 int freq = EDGE_FREQUENCY (e);
354 if (!best_edge || freq > best_freq || e->count > best_count)
356 best_freq = freq;
357 best_count = e->count;
358 best_edge = e;
359 best_bb = bb;
364 bb = bb->rbi->next;
366 while (bb != back_edge->dest);
368 if (best_bb)
370 /* Rotate the loop so that the BEST_EDGE goes out from the last block of
371 the trace. */
372 if (back_edge->dest == trace->first)
374 trace->first = best_bb->rbi->next;
376 else
378 basic_block prev_bb;
380 for (prev_bb = trace->first;
381 prev_bb->rbi->next != back_edge->dest;
382 prev_bb = prev_bb->rbi->next)
384 prev_bb->rbi->next = best_bb->rbi->next;
386 /* Try to get rid of uncond jump to cond jump. */
387 if (EDGE_COUNT (prev_bb->succs) == 1)
389 basic_block header = EDGE_SUCC (prev_bb, 0)->dest;
391 /* Duplicate HEADER if it is a small block containing cond jump
392 in the end. */
393 if (any_condjump_p (BB_END (header)) && copy_bb_p (header, 0)
394 && !find_reg_note (BB_END (header), REG_CROSSING_JUMP,
395 NULL_RTX))
397 copy_bb (header, EDGE_SUCC (prev_bb, 0), prev_bb, trace_n);
402 else
404 /* We have not found suitable loop tail so do no rotation. */
405 best_bb = back_edge->src;
407 best_bb->rbi->next = NULL;
408 return best_bb;
411 /* This function marks BB that it was visited in trace number TRACE. */
413 static void
414 mark_bb_visited (basic_block bb, int trace)
416 bb->rbi->visited = trace;
417 if (bbd[bb->index].heap)
419 fibheap_delete_node (bbd[bb->index].heap, bbd[bb->index].node);
420 bbd[bb->index].heap = NULL;
421 bbd[bb->index].node = NULL;
425 /* One round of finding traces. Find traces for BRANCH_TH and EXEC_TH i.e. do
426 not include basic blocks their probability is lower than BRANCH_TH or their
427 frequency is lower than EXEC_TH into traces (or count is lower than
428 COUNT_TH). It stores the new traces into TRACES and modifies the number of
429 traces *N_TRACES. Sets the round (which the trace belongs to) to ROUND. It
430 expects that starting basic blocks are in *HEAP and at the end it deletes
431 *HEAP and stores starting points for the next round into new *HEAP. */
433 static void
434 find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
435 struct trace *traces, int *n_traces, int round,
436 fibheap_t *heap, int number_of_rounds)
438 /* The following variable refers to the last round in which non-"cold"
439 blocks may be collected into a trace. */
441 int last_round = N_ROUNDS - 1;
443 /* Heap for discarded basic blocks which are possible starting points for
444 the next round. */
445 fibheap_t new_heap = fibheap_new ();
447 while (!fibheap_empty (*heap))
449 basic_block bb;
450 struct trace *trace;
451 edge best_edge, e;
452 fibheapkey_t key;
453 edge_iterator ei;
455 bb = fibheap_extract_min (*heap);
456 bbd[bb->index].heap = NULL;
457 bbd[bb->index].node = NULL;
459 if (dump_file)
460 fprintf (dump_file, "Getting bb %d\n", bb->index);
462 /* If the BB's frequency is too low send BB to the next round. When
463 partitioning hot/cold blocks into separate sections, make sure all
464 the cold blocks (and ONLY the cold blocks) go into the (extra) final
465 round. */
467 if (push_to_next_round_p (bb, round, number_of_rounds, exec_th,
468 count_th))
470 int key = bb_to_key (bb);
471 bbd[bb->index].heap = new_heap;
472 bbd[bb->index].node = fibheap_insert (new_heap, key, bb);
474 if (dump_file)
475 fprintf (dump_file,
476 " Possible start point of next round: %d (key: %d)\n",
477 bb->index, key);
478 continue;
481 trace = traces + *n_traces;
482 trace->first = bb;
483 trace->round = round;
484 trace->length = 0;
485 (*n_traces)++;
489 int prob, freq;
491 /* The probability and frequency of the best edge. */
492 int best_prob = INT_MIN / 2;
493 int best_freq = INT_MIN / 2;
495 best_edge = NULL;
496 mark_bb_visited (bb, *n_traces);
497 trace->length++;
499 if (dump_file)
500 fprintf (dump_file, "Basic block %d was visited in trace %d\n",
501 bb->index, *n_traces - 1);
503 /* Select the successor that will be placed after BB. */
504 FOR_EACH_EDGE (e, ei, bb->succs)
506 gcc_assert (!(e->flags & EDGE_FAKE));
508 if (e->dest == EXIT_BLOCK_PTR)
509 continue;
511 if (e->dest->rbi->visited
512 && e->dest->rbi->visited != *n_traces)
513 continue;
515 if (BB_PARTITION (e->dest) == BB_COLD_PARTITION
516 && round < last_round)
517 continue;
519 prob = e->probability;
520 freq = EDGE_FREQUENCY (e);
522 /* Edge that cannot be fallthru or improbable or infrequent
523 successor (i.e. it is unsuitable successor). */
524 if (!(e->flags & EDGE_CAN_FALLTHRU) || (e->flags & EDGE_COMPLEX)
525 || prob < branch_th || freq < exec_th || e->count < count_th)
526 continue;
528 /* If partitioning hot/cold basic blocks, don't consider edges
529 that cross section boundaries. */
531 if (better_edge_p (bb, e, prob, freq, best_prob, best_freq,
532 best_edge))
534 best_edge = e;
535 best_prob = prob;
536 best_freq = freq;
540 /* If the best destination has multiple predecessors, and can be
541 duplicated cheaper than a jump, don't allow it to be added
542 to a trace. We'll duplicate it when connecting traces. */
543 if (best_edge && EDGE_COUNT (best_edge->dest->preds) >= 2
544 && copy_bb_p (best_edge->dest, 0))
545 best_edge = NULL;
547 /* Add all non-selected successors to the heaps. */
548 FOR_EACH_EDGE (e, ei, bb->succs)
550 if (e == best_edge
551 || e->dest == EXIT_BLOCK_PTR
552 || e->dest->rbi->visited)
553 continue;
555 key = bb_to_key (e->dest);
557 if (bbd[e->dest->index].heap)
559 /* E->DEST is already in some heap. */
560 if (key != bbd[e->dest->index].node->key)
562 if (dump_file)
564 fprintf (dump_file,
565 "Changing key for bb %d from %ld to %ld.\n",
566 e->dest->index,
567 (long) bbd[e->dest->index].node->key,
568 key);
570 fibheap_replace_key (bbd[e->dest->index].heap,
571 bbd[e->dest->index].node, key);
574 else
576 fibheap_t which_heap = *heap;
578 prob = e->probability;
579 freq = EDGE_FREQUENCY (e);
581 if (!(e->flags & EDGE_CAN_FALLTHRU)
582 || (e->flags & EDGE_COMPLEX)
583 || prob < branch_th || freq < exec_th
584 || e->count < count_th)
586 /* When partitioning hot/cold basic blocks, make sure
587 the cold blocks (and only the cold blocks) all get
588 pushed to the last round of trace collection. */
590 if (push_to_next_round_p (e->dest, round,
591 number_of_rounds,
592 exec_th, count_th))
593 which_heap = new_heap;
596 bbd[e->dest->index].heap = which_heap;
597 bbd[e->dest->index].node = fibheap_insert (which_heap,
598 key, e->dest);
600 if (dump_file)
602 fprintf (dump_file,
603 " Possible start of %s round: %d (key: %ld)\n",
604 (which_heap == new_heap) ? "next" : "this",
605 e->dest->index, (long) key);
611 if (best_edge) /* Suitable successor was found. */
613 if (best_edge->dest->rbi->visited == *n_traces)
615 /* We do nothing with one basic block loops. */
616 if (best_edge->dest != bb)
618 if (EDGE_FREQUENCY (best_edge)
619 > 4 * best_edge->dest->frequency / 5)
621 /* The loop has at least 4 iterations. If the loop
622 header is not the first block of the function
623 we can rotate the loop. */
625 if (best_edge->dest != ENTRY_BLOCK_PTR->next_bb)
627 if (dump_file)
629 fprintf (dump_file,
630 "Rotating loop %d - %d\n",
631 best_edge->dest->index, bb->index);
633 bb->rbi->next = best_edge->dest;
634 bb = rotate_loop (best_edge, trace, *n_traces);
637 else
639 /* The loop has less than 4 iterations. */
641 if (EDGE_COUNT (bb->succs) == 1
642 && copy_bb_p (best_edge->dest, !optimize_size))
644 bb = copy_bb (best_edge->dest, best_edge, bb,
645 *n_traces);
650 /* Terminate the trace. */
651 break;
653 else
655 /* Check for a situation
663 where
664 EDGE_FREQUENCY (AB) + EDGE_FREQUENCY (BC)
665 >= EDGE_FREQUENCY (AC).
666 (i.e. 2 * B->frequency >= EDGE_FREQUENCY (AC) )
667 Best ordering is then A B C.
669 This situation is created for example by:
671 if (A) B;
676 FOR_EACH_EDGE (e, ei, bb->succs)
677 if (e != best_edge
678 && (e->flags & EDGE_CAN_FALLTHRU)
679 && !(e->flags & EDGE_COMPLEX)
680 && !e->dest->rbi->visited
681 && EDGE_COUNT (e->dest->preds) == 1
682 && !(e->flags & EDGE_CROSSING)
683 && EDGE_COUNT (e->dest->succs) == 1
684 && (EDGE_SUCC (e->dest, 0)->flags & EDGE_CAN_FALLTHRU)
685 && !(EDGE_SUCC (e->dest, 0)->flags & EDGE_COMPLEX)
686 && EDGE_SUCC (e->dest, 0)->dest == best_edge->dest
687 && 2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge))
689 best_edge = e;
690 if (dump_file)
691 fprintf (dump_file, "Selecting BB %d\n",
692 best_edge->dest->index);
693 break;
696 bb->rbi->next = best_edge->dest;
697 bb = best_edge->dest;
701 while (best_edge);
702 trace->last = bb;
703 bbd[trace->first->index].start_of_trace = *n_traces - 1;
704 bbd[trace->last->index].end_of_trace = *n_traces - 1;
706 /* The trace is terminated so we have to recount the keys in heap
707 (some block can have a lower key because now one of its predecessors
708 is an end of the trace). */
709 FOR_EACH_EDGE (e, ei, bb->succs)
711 if (e->dest == EXIT_BLOCK_PTR
712 || e->dest->rbi->visited)
713 continue;
715 if (bbd[e->dest->index].heap)
717 key = bb_to_key (e->dest);
718 if (key != bbd[e->dest->index].node->key)
720 if (dump_file)
722 fprintf (dump_file,
723 "Changing key for bb %d from %ld to %ld.\n",
724 e->dest->index,
725 (long) bbd[e->dest->index].node->key, key);
727 fibheap_replace_key (bbd[e->dest->index].heap,
728 bbd[e->dest->index].node,
729 key);
735 fibheap_delete (*heap);
737 /* "Return" the new heap. */
738 *heap = new_heap;
741 /* Create a duplicate of the basic block OLD_BB and redirect edge E to it, add
742 it to trace after BB, mark OLD_BB visited and update pass' data structures
743 (TRACE is a number of trace which OLD_BB is duplicated to). */
745 static basic_block
746 copy_bb (basic_block old_bb, edge e, basic_block bb, int trace)
748 basic_block new_bb;
750 new_bb = duplicate_block (old_bb, e);
751 BB_COPY_PARTITION (new_bb, old_bb);
753 gcc_assert (e->dest == new_bb);
754 gcc_assert (!e->dest->rbi->visited);
756 if (dump_file)
757 fprintf (dump_file,
758 "Duplicated bb %d (created bb %d)\n",
759 old_bb->index, new_bb->index);
760 new_bb->rbi->visited = trace;
761 new_bb->rbi->next = bb->rbi->next;
762 bb->rbi->next = new_bb;
764 if (new_bb->index >= array_size || last_basic_block > array_size)
766 int i;
767 int new_size;
769 new_size = MAX (last_basic_block, new_bb->index + 1);
770 new_size = GET_ARRAY_SIZE (new_size);
771 bbd = xrealloc (bbd, new_size * sizeof (bbro_basic_block_data));
772 for (i = array_size; i < new_size; i++)
774 bbd[i].start_of_trace = -1;
775 bbd[i].end_of_trace = -1;
776 bbd[i].heap = NULL;
777 bbd[i].node = NULL;
779 array_size = new_size;
781 if (dump_file)
783 fprintf (dump_file,
784 "Growing the dynamic array to %d elements.\n",
785 array_size);
789 return new_bb;
792 /* Compute and return the key (for the heap) of the basic block BB. */
794 static fibheapkey_t
795 bb_to_key (basic_block bb)
797 edge e;
798 edge_iterator ei;
799 int priority = 0;
801 /* Do not start in probably never executed blocks. */
803 if (BB_PARTITION (bb) == BB_COLD_PARTITION
804 || probably_never_executed_bb_p (bb))
805 return BB_FREQ_MAX;
807 /* Prefer blocks whose predecessor is an end of some trace
808 or whose predecessor edge is EDGE_DFS_BACK. */
809 FOR_EACH_EDGE (e, ei, bb->preds)
811 if ((e->src != ENTRY_BLOCK_PTR && bbd[e->src->index].end_of_trace >= 0)
812 || (e->flags & EDGE_DFS_BACK))
814 int edge_freq = EDGE_FREQUENCY (e);
816 if (edge_freq > priority)
817 priority = edge_freq;
821 if (priority)
822 /* The block with priority should have significantly lower key. */
823 return -(100 * BB_FREQ_MAX + 100 * priority + bb->frequency);
824 return -bb->frequency;
827 /* Return true when the edge E from basic block BB is better than the temporary
828 best edge (details are in function). The probability of edge E is PROB. The
829 frequency of the successor is FREQ. The current best probability is
830 BEST_PROB, the best frequency is BEST_FREQ.
831 The edge is considered to be equivalent when PROB does not differ much from
832 BEST_PROB; similarly for frequency. */
834 static bool
835 better_edge_p (basic_block bb, edge e, int prob, int freq, int best_prob,
836 int best_freq, edge cur_best_edge)
838 bool is_better_edge;
840 /* The BEST_* values do not have to be best, but can be a bit smaller than
841 maximum values. */
842 int diff_prob = best_prob / 10;
843 int diff_freq = best_freq / 10;
845 if (prob > best_prob + diff_prob)
846 /* The edge has higher probability than the temporary best edge. */
847 is_better_edge = true;
848 else if (prob < best_prob - diff_prob)
849 /* The edge has lower probability than the temporary best edge. */
850 is_better_edge = false;
851 else if (freq < best_freq - diff_freq)
852 /* The edge and the temporary best edge have almost equivalent
853 probabilities. The higher frequency of a successor now means
854 that there is another edge going into that successor.
855 This successor has lower frequency so it is better. */
856 is_better_edge = true;
857 else if (freq > best_freq + diff_freq)
858 /* This successor has higher frequency so it is worse. */
859 is_better_edge = false;
860 else if (e->dest->prev_bb == bb)
861 /* The edges have equivalent probabilities and the successors
862 have equivalent frequencies. Select the previous successor. */
863 is_better_edge = true;
864 else
865 is_better_edge = false;
867 /* If we are doing hot/cold partitioning, make sure that we always favor
868 non-crossing edges over crossing edges. */
870 if (!is_better_edge
871 && flag_reorder_blocks_and_partition
872 && cur_best_edge
873 && (cur_best_edge->flags & EDGE_CROSSING)
874 && !(e->flags & EDGE_CROSSING))
875 is_better_edge = true;
877 return is_better_edge;
880 /* Connect traces in array TRACES, N_TRACES is the count of traces. */
882 static void
883 connect_traces (int n_traces, struct trace *traces)
885 int i;
886 int unconnected_hot_trace_count = 0;
887 bool cold_connected = true;
888 bool *connected;
889 bool *cold_traces;
890 int last_trace;
891 int freq_threshold;
892 gcov_type count_threshold;
894 freq_threshold = max_entry_frequency * DUPLICATION_THRESHOLD / 1000;
895 if (max_entry_count < INT_MAX / 1000)
896 count_threshold = max_entry_count * DUPLICATION_THRESHOLD / 1000;
897 else
898 count_threshold = max_entry_count / 1000 * DUPLICATION_THRESHOLD;
900 connected = xcalloc (n_traces, sizeof (bool));
901 last_trace = -1;
903 /* If we are partitioning hot/cold basic blocks, mark the cold
904 traces as already connected, to remove them from consideration
905 for connection to the hot traces. After the hot traces have all
906 been connected (determined by "unconnected_hot_trace_count"), we
907 will go back and connect the cold traces. */
909 cold_traces = xcalloc (n_traces, sizeof (bool));
911 if (flag_reorder_blocks_and_partition)
912 for (i = 0; i < n_traces; i++)
914 if (BB_PARTITION (traces[i].first) == BB_COLD_PARTITION)
916 connected[i] = true;
917 cold_traces[i] = true;
918 cold_connected = false;
920 else
921 unconnected_hot_trace_count++;
924 for (i = 0; i < n_traces || !cold_connected ; i++)
926 int t = i;
927 int t2;
928 edge e, best;
929 int best_len;
931 /* If we are partitioning hot/cold basic blocks, check to see
932 if all the hot traces have been connected. If so, go back
933 and mark the cold traces as unconnected so we can connect
934 them up too. Re-set "i" to the first (unconnected) cold
935 trace. Use flag "cold_connected" to make sure we don't do
936 this step more than once. */
938 if (flag_reorder_blocks_and_partition
939 && (i >= n_traces || unconnected_hot_trace_count <= 0)
940 && !cold_connected)
942 int j;
943 int first_cold_trace = -1;
945 for (j = 0; j < n_traces; j++)
946 if (cold_traces[j])
948 connected[j] = false;
949 if (first_cold_trace == -1)
950 first_cold_trace = j;
952 i = t = first_cold_trace;
953 cold_connected = true;
956 if (connected[t])
957 continue;
959 connected[t] = true;
960 if (unconnected_hot_trace_count > 0)
961 unconnected_hot_trace_count--;
963 /* Find the predecessor traces. */
964 for (t2 = t; t2 > 0;)
966 edge_iterator ei;
967 best = NULL;
968 best_len = 0;
969 FOR_EACH_EDGE (e, ei, traces[t2].first->preds)
971 int si = e->src->index;
973 if (e->src != ENTRY_BLOCK_PTR
974 && (e->flags & EDGE_CAN_FALLTHRU)
975 && !(e->flags & EDGE_COMPLEX)
976 && bbd[si].end_of_trace >= 0
977 && !connected[bbd[si].end_of_trace]
978 && (!best
979 || e->probability > best->probability
980 || (e->probability == best->probability
981 && traces[bbd[si].end_of_trace].length > best_len)))
983 best = e;
984 best_len = traces[bbd[si].end_of_trace].length;
987 if (best)
989 best->src->rbi->next = best->dest;
990 t2 = bbd[best->src->index].end_of_trace;
991 connected[t2] = true;
993 if (unconnected_hot_trace_count > 0)
994 unconnected_hot_trace_count--;
996 if (dump_file)
998 fprintf (dump_file, "Connection: %d %d\n",
999 best->src->index, best->dest->index);
1002 else
1003 break;
1006 if (last_trace >= 0)
1007 traces[last_trace].last->rbi->next = traces[t2].first;
1008 last_trace = t;
1010 /* Find the successor traces. */
1011 while (1)
1013 /* Find the continuation of the chain. */
1014 edge_iterator ei;
1015 best = NULL;
1016 best_len = 0;
1017 FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1019 int di = e->dest->index;
1021 if (e->dest != EXIT_BLOCK_PTR
1022 && (e->flags & EDGE_CAN_FALLTHRU)
1023 && !(e->flags & EDGE_COMPLEX)
1024 && bbd[di].start_of_trace >= 0
1025 && !connected[bbd[di].start_of_trace]
1026 && (!best
1027 || e->probability > best->probability
1028 || (e->probability == best->probability
1029 && traces[bbd[di].start_of_trace].length > best_len)))
1031 best = e;
1032 best_len = traces[bbd[di].start_of_trace].length;
1036 if (best)
1038 if (dump_file)
1040 fprintf (dump_file, "Connection: %d %d\n",
1041 best->src->index, best->dest->index);
1043 t = bbd[best->dest->index].start_of_trace;
1044 traces[last_trace].last->rbi->next = traces[t].first;
1045 connected[t] = true;
1046 if (unconnected_hot_trace_count > 0)
1047 unconnected_hot_trace_count--;
1048 last_trace = t;
1050 else
1052 /* Try to connect the traces by duplication of 1 block. */
1053 edge e2;
1054 basic_block next_bb = NULL;
1055 bool try_copy = false;
1057 FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1058 if (e->dest != EXIT_BLOCK_PTR
1059 && (e->flags & EDGE_CAN_FALLTHRU)
1060 && !(e->flags & EDGE_COMPLEX)
1061 && (!best || e->probability > best->probability))
1063 edge_iterator ei;
1064 edge best2 = NULL;
1065 int best2_len = 0;
1067 /* If the destination is a start of a trace which is only
1068 one block long, then no need to search the successor
1069 blocks of the trace. Accept it. */
1070 if (bbd[e->dest->index].start_of_trace >= 0
1071 && traces[bbd[e->dest->index].start_of_trace].length
1072 == 1)
1074 best = e;
1075 try_copy = true;
1076 continue;
1079 FOR_EACH_EDGE (e2, ei, e->dest->succs)
1081 int di = e2->dest->index;
1083 if (e2->dest == EXIT_BLOCK_PTR
1084 || ((e2->flags & EDGE_CAN_FALLTHRU)
1085 && !(e2->flags & EDGE_COMPLEX)
1086 && bbd[di].start_of_trace >= 0
1087 && !connected[bbd[di].start_of_trace]
1088 && (EDGE_FREQUENCY (e2) >= freq_threshold)
1089 && (e2->count >= count_threshold)
1090 && (!best2
1091 || e2->probability > best2->probability
1092 || (e2->probability == best2->probability
1093 && traces[bbd[di].start_of_trace].length
1094 > best2_len))))
1096 best = e;
1097 best2 = e2;
1098 if (e2->dest != EXIT_BLOCK_PTR)
1099 best2_len = traces[bbd[di].start_of_trace].length;
1100 else
1101 best2_len = INT_MAX;
1102 next_bb = e2->dest;
1103 try_copy = true;
1108 if (flag_reorder_blocks_and_partition)
1109 try_copy = false;
1111 /* Copy tiny blocks always; copy larger blocks only when the
1112 edge is traversed frequently enough. */
1113 if (try_copy
1114 && copy_bb_p (best->dest,
1115 !optimize_size
1116 && EDGE_FREQUENCY (best) >= freq_threshold
1117 && best->count >= count_threshold))
1119 basic_block new_bb;
1121 if (dump_file)
1123 fprintf (dump_file, "Connection: %d %d ",
1124 traces[t].last->index, best->dest->index);
1125 if (!next_bb)
1126 fputc ('\n', dump_file);
1127 else if (next_bb == EXIT_BLOCK_PTR)
1128 fprintf (dump_file, "exit\n");
1129 else
1130 fprintf (dump_file, "%d\n", next_bb->index);
1133 new_bb = copy_bb (best->dest, best, traces[t].last, t);
1134 traces[t].last = new_bb;
1135 if (next_bb && next_bb != EXIT_BLOCK_PTR)
1137 t = bbd[next_bb->index].start_of_trace;
1138 traces[last_trace].last->rbi->next = traces[t].first;
1139 connected[t] = true;
1140 if (unconnected_hot_trace_count > 0)
1141 unconnected_hot_trace_count--;
1142 last_trace = t;
1144 else
1145 break; /* Stop finding the successor traces. */
1147 else
1148 break; /* Stop finding the successor traces. */
1153 if (dump_file)
1155 basic_block bb;
1157 fprintf (dump_file, "Final order:\n");
1158 for (bb = traces[0].first; bb; bb = bb->rbi->next)
1159 fprintf (dump_file, "%d ", bb->index);
1160 fprintf (dump_file, "\n");
1161 fflush (dump_file);
1164 FREE (connected);
1165 FREE (cold_traces);
1168 /* Return true when BB can and should be copied. CODE_MAY_GROW is true
1169 when code size is allowed to grow by duplication. */
1171 static bool
1172 copy_bb_p (basic_block bb, int code_may_grow)
1174 int size = 0;
1175 int max_size = uncond_jump_length;
1176 rtx insn;
1178 if (!bb->frequency)
1179 return false;
1180 if (EDGE_COUNT (bb->preds) < 2)
1181 return false;
1182 if (!can_duplicate_block_p (bb))
1183 return false;
1185 /* Avoid duplicating blocks which have many successors (PR/13430). */
1186 if (EDGE_COUNT (bb->succs) > 8)
1187 return false;
1189 if (code_may_grow && maybe_hot_bb_p (bb))
1190 max_size *= 8;
1192 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb));
1193 insn = NEXT_INSN (insn))
1195 if (INSN_P (insn))
1196 size += get_attr_length (insn);
1199 if (size <= max_size)
1200 return true;
1202 if (dump_file)
1204 fprintf (dump_file,
1205 "Block %d can't be copied because its size = %d.\n",
1206 bb->index, size);
1209 return false;
1212 /* Return the length of unconditional jump instruction. */
1214 static int
1215 get_uncond_jump_length (void)
1217 rtx label, jump;
1218 int length;
1220 label = emit_label_before (gen_label_rtx (), get_insns ());
1221 jump = emit_jump_insn (gen_jump (label));
1223 length = get_attr_length (jump);
1225 delete_insn (jump);
1226 delete_insn (label);
1227 return length;
1230 static void
1231 add_unlikely_executed_notes (void)
1233 basic_block bb;
1235 /* Add the UNLIKELY_EXECUTED_NOTES to each cold basic block. */
1237 FOR_EACH_BB (bb)
1238 if (BB_PARTITION (bb) == BB_COLD_PARTITION)
1239 mark_bb_for_unlikely_executed_section (bb);
1242 /* Find the basic blocks that are rarely executed and need to be moved to
1243 a separate section of the .o file (to cut down on paging and improve
1244 cache locality). */
1246 static void
1247 find_rarely_executed_basic_blocks_and_crossing_edges (edge *crossing_edges,
1248 int *n_crossing_edges,
1249 int *max_idx)
1251 basic_block bb;
1252 bool has_hot_blocks = false;
1253 edge e;
1254 int i;
1255 edge_iterator ei;
1257 /* Mark which partition (hot/cold) each basic block belongs in. */
1259 FOR_EACH_BB (bb)
1261 if (probably_never_executed_bb_p (bb))
1262 BB_SET_PARTITION (bb, BB_COLD_PARTITION);
1263 else
1265 BB_SET_PARTITION (bb, BB_HOT_PARTITION);
1266 has_hot_blocks = true;
1270 /* Since all "hot" basic blocks will eventually be scheduled before all
1271 cold basic blocks, make *sure* the real function entry block is in
1272 the hot partition (if there is one). */
1274 if (has_hot_blocks)
1275 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
1276 if (e->dest->index >= 0)
1278 BB_SET_PARTITION (e->dest, BB_HOT_PARTITION);
1279 break;
1282 /* Mark every edge that crosses between sections. */
1284 i = 0;
1285 if (targetm.have_named_sections)
1287 FOR_EACH_BB (bb)
1288 FOR_EACH_EDGE (e, ei, bb->succs)
1290 if (e->src != ENTRY_BLOCK_PTR
1291 && e->dest != EXIT_BLOCK_PTR
1292 && BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1294 e->flags |= EDGE_CROSSING;
1295 if (i == *max_idx)
1297 *max_idx *= 2;
1298 crossing_edges = xrealloc (crossing_edges,
1299 (*max_idx) * sizeof (edge));
1301 crossing_edges[i++] = e;
1303 else
1304 e->flags &= ~EDGE_CROSSING;
1307 *n_crossing_edges = i;
1310 /* Add NOTE_INSN_UNLIKELY_EXECUTED_CODE to top of basic block. This note
1311 is later used to mark the basic block to be put in the
1312 unlikely-to-be-executed section of the .o file. */
1314 static void
1315 mark_bb_for_unlikely_executed_section (basic_block bb)
1317 rtx cur_insn;
1318 rtx insert_insn = NULL;
1319 rtx new_note;
1321 /* Insert new NOTE immediately after BASIC_BLOCK note. */
1323 for (cur_insn = BB_HEAD (bb); cur_insn != NEXT_INSN (BB_END (bb));
1324 cur_insn = NEXT_INSN (cur_insn))
1325 if (GET_CODE (cur_insn) == NOTE
1326 && NOTE_LINE_NUMBER (cur_insn) == NOTE_INSN_BASIC_BLOCK)
1328 insert_insn = cur_insn;
1329 break;
1332 /* If basic block does not contain a NOTE_INSN_BASIC_BLOCK, there is
1333 a major problem. */
1334 gcc_assert (insert_insn);
1336 /* Insert note and assign basic block number to it. */
1338 new_note = emit_note_after (NOTE_INSN_UNLIKELY_EXECUTED_CODE,
1339 insert_insn);
1340 NOTE_BASIC_BLOCK (new_note) = bb;
1343 /* If any destination of a crossing edge does not have a label, add label;
1344 Convert any fall-through crossing edges (for blocks that do not contain
1345 a jump) to unconditional jumps. */
1347 static void
1348 add_labels_and_missing_jumps (edge *crossing_edges, int n_crossing_edges)
1350 int i;
1351 basic_block src;
1352 basic_block dest;
1353 rtx label;
1354 rtx barrier;
1355 rtx new_jump;
1357 for (i=0; i < n_crossing_edges; i++)
1359 if (crossing_edges[i])
1361 src = crossing_edges[i]->src;
1362 dest = crossing_edges[i]->dest;
1364 /* Make sure dest has a label. */
1366 if (dest && (dest != EXIT_BLOCK_PTR))
1368 label = block_label (dest);
1370 /* Make sure source block ends with a jump. */
1372 if (src && (src != ENTRY_BLOCK_PTR))
1374 if (!JUMP_P (BB_END (src)))
1375 /* bb just falls through. */
1377 /* make sure there's only one successor */
1378 gcc_assert (EDGE_COUNT (src->succs) == 1);
1380 /* Find label in dest block. */
1381 label = block_label (dest);
1383 new_jump = emit_jump_insn_after (gen_jump (label),
1384 BB_END (src));
1385 barrier = emit_barrier_after (new_jump);
1386 JUMP_LABEL (new_jump) = label;
1387 LABEL_NUSES (label) += 1;
1388 src->rbi->footer = unlink_insn_chain (barrier, barrier);
1389 /* Mark edge as non-fallthru. */
1390 crossing_edges[i]->flags &= ~EDGE_FALLTHRU;
1391 } /* end: 'if (GET_CODE ... ' */
1392 } /* end: 'if (src && src->index...' */
1393 } /* end: 'if (dest && dest->index...' */
1394 } /* end: 'if (crossing_edges[i]...' */
1395 } /* end for loop */
1398 /* Find any bb's where the fall-through edge is a crossing edge (note that
1399 these bb's must also contain a conditional jump; we've already
1400 dealt with fall-through edges for blocks that didn't have a
1401 conditional jump in the call to add_labels_and_missing_jumps).
1402 Convert the fall-through edge to non-crossing edge by inserting a
1403 new bb to fall-through into. The new bb will contain an
1404 unconditional jump (crossing edge) to the original fall through
1405 destination. */
1407 static void
1408 fix_up_fall_thru_edges (void)
1410 basic_block cur_bb;
1411 basic_block new_bb;
1412 edge succ1;
1413 edge succ2;
1414 edge fall_thru;
1415 edge cond_jump = NULL;
1416 edge e;
1417 bool cond_jump_crosses;
1418 int invert_worked;
1419 rtx old_jump;
1420 rtx fall_thru_label;
1421 rtx barrier;
1423 FOR_EACH_BB (cur_bb)
1425 fall_thru = NULL;
1426 if (EDGE_COUNT (cur_bb->succs) > 0)
1427 succ1 = EDGE_SUCC (cur_bb, 0);
1428 else
1429 succ1 = NULL;
1431 if (EDGE_COUNT (cur_bb->succs) > 1)
1432 succ2 = EDGE_SUCC (cur_bb, 1);
1433 else
1434 succ2 = NULL;
1436 /* Find the fall-through edge. */
1438 if (succ1
1439 && (succ1->flags & EDGE_FALLTHRU))
1441 fall_thru = succ1;
1442 cond_jump = succ2;
1444 else if (succ2
1445 && (succ2->flags & EDGE_FALLTHRU))
1447 fall_thru = succ2;
1448 cond_jump = succ1;
1451 if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR))
1453 /* Check to see if the fall-thru edge is a crossing edge. */
1455 if (fall_thru->flags & EDGE_CROSSING)
1457 /* The fall_thru edge crosses; now check the cond jump edge, if
1458 it exists. */
1460 cond_jump_crosses = true;
1461 invert_worked = 0;
1462 old_jump = BB_END (cur_bb);
1464 /* Find the jump instruction, if there is one. */
1466 if (cond_jump)
1468 if (!(cond_jump->flags & EDGE_CROSSING))
1469 cond_jump_crosses = false;
1471 /* We know the fall-thru edge crosses; if the cond
1472 jump edge does NOT cross, and its destination is the
1473 next block in the bb order, invert the jump
1474 (i.e. fix it so the fall thru does not cross and
1475 the cond jump does). */
1477 if (!cond_jump_crosses
1478 && cur_bb->rbi->next == cond_jump->dest)
1480 /* Find label in fall_thru block. We've already added
1481 any missing labels, so there must be one. */
1483 fall_thru_label = block_label (fall_thru->dest);
1485 if (old_jump && fall_thru_label)
1486 invert_worked = invert_jump (old_jump,
1487 fall_thru_label,0);
1488 if (invert_worked)
1490 fall_thru->flags &= ~EDGE_FALLTHRU;
1491 cond_jump->flags |= EDGE_FALLTHRU;
1492 update_br_prob_note (cur_bb);
1493 e = fall_thru;
1494 fall_thru = cond_jump;
1495 cond_jump = e;
1496 cond_jump->flags |= EDGE_CROSSING;
1497 fall_thru->flags &= ~EDGE_CROSSING;
1502 if (cond_jump_crosses || !invert_worked)
1504 /* This is the case where both edges out of the basic
1505 block are crossing edges. Here we will fix up the
1506 fall through edge. The jump edge will be taken care
1507 of later. */
1509 new_bb = force_nonfallthru (fall_thru);
1511 if (new_bb)
1513 new_bb->rbi->next = cur_bb->rbi->next;
1514 cur_bb->rbi->next = new_bb;
1516 /* Make sure new fall-through bb is in same
1517 partition as bb it's falling through from. */
1519 BB_COPY_PARTITION (new_bb, cur_bb);
1520 EDGE_SUCC (new_bb, 0)->flags |= EDGE_CROSSING;
1523 /* Add barrier after new jump */
1525 if (new_bb)
1527 barrier = emit_barrier_after (BB_END (new_bb));
1528 new_bb->rbi->footer = unlink_insn_chain (barrier,
1529 barrier);
1531 else
1533 barrier = emit_barrier_after (BB_END (cur_bb));
1534 cur_bb->rbi->footer = unlink_insn_chain (barrier,
1535 barrier);
1543 /* This function checks the destination blockof a "crossing jump" to
1544 see if it has any crossing predecessors that begin with a code label
1545 and end with an unconditional jump. If so, it returns that predecessor
1546 block. (This is to avoid creating lots of new basic blocks that all
1547 contain unconditional jumps to the same destination). */
1549 static basic_block
1550 find_jump_block (basic_block jump_dest)
1552 basic_block source_bb = NULL;
1553 edge e;
1554 rtx insn;
1555 edge_iterator ei;
1557 FOR_EACH_EDGE (e, ei, jump_dest->preds)
1558 if (e->flags & EDGE_CROSSING)
1560 basic_block src = e->src;
1562 /* Check each predecessor to see if it has a label, and contains
1563 only one executable instruction, which is an unconditional jump.
1564 If so, we can use it. */
1566 if (LABEL_P (BB_HEAD (src)))
1567 for (insn = BB_HEAD (src);
1568 !INSN_P (insn) && insn != NEXT_INSN (BB_END (src));
1569 insn = NEXT_INSN (insn))
1571 if (INSN_P (insn)
1572 && insn == BB_END (src)
1573 && JUMP_P (insn)
1574 && !any_condjump_p (insn))
1576 source_bb = src;
1577 break;
1581 if (source_bb)
1582 break;
1585 return source_bb;
1588 /* Find all BB's with conditional jumps that are crossing edges;
1589 insert a new bb and make the conditional jump branch to the new
1590 bb instead (make the new bb same color so conditional branch won't
1591 be a 'crossing' edge). Insert an unconditional jump from the
1592 new bb to the original destination of the conditional jump. */
1594 static void
1595 fix_crossing_conditional_branches (void)
1597 basic_block cur_bb;
1598 basic_block new_bb;
1599 basic_block last_bb;
1600 basic_block dest;
1601 basic_block prev_bb;
1602 edge succ1;
1603 edge succ2;
1604 edge crossing_edge;
1605 edge new_edge;
1606 rtx old_jump;
1607 rtx set_src;
1608 rtx old_label = NULL_RTX;
1609 rtx new_label;
1610 rtx new_jump;
1611 rtx barrier;
1613 last_bb = EXIT_BLOCK_PTR->prev_bb;
1615 FOR_EACH_BB (cur_bb)
1617 crossing_edge = NULL;
1618 if (EDGE_COUNT (cur_bb->succs) > 0)
1619 succ1 = EDGE_SUCC (cur_bb, 0);
1620 else
1621 succ1 = NULL;
1623 if (EDGE_COUNT (cur_bb->succs) > 1)
1624 succ2 = EDGE_SUCC (cur_bb, 1);
1625 else
1626 succ2 = NULL;
1628 /* We already took care of fall-through edges, so only one successor
1629 can be a crossing edge. */
1631 if (succ1 && (succ1->flags & EDGE_CROSSING))
1632 crossing_edge = succ1;
1633 else if (succ2 && (succ2->flags & EDGE_CROSSING))
1634 crossing_edge = succ2;
1636 if (crossing_edge)
1638 old_jump = BB_END (cur_bb);
1640 /* Check to make sure the jump instruction is a
1641 conditional jump. */
1643 set_src = NULL_RTX;
1645 if (any_condjump_p (old_jump))
1647 if (GET_CODE (PATTERN (old_jump)) == SET)
1648 set_src = SET_SRC (PATTERN (old_jump));
1649 else if (GET_CODE (PATTERN (old_jump)) == PARALLEL)
1651 set_src = XVECEXP (PATTERN (old_jump), 0,0);
1652 if (GET_CODE (set_src) == SET)
1653 set_src = SET_SRC (set_src);
1654 else
1655 set_src = NULL_RTX;
1659 if (set_src && (GET_CODE (set_src) == IF_THEN_ELSE))
1661 if (GET_CODE (XEXP (set_src, 1)) == PC)
1662 old_label = XEXP (set_src, 2);
1663 else if (GET_CODE (XEXP (set_src, 2)) == PC)
1664 old_label = XEXP (set_src, 1);
1666 /* Check to see if new bb for jumping to that dest has
1667 already been created; if so, use it; if not, create
1668 a new one. */
1670 new_bb = find_jump_block (crossing_edge->dest);
1672 if (new_bb)
1673 new_label = block_label (new_bb);
1674 else
1676 /* Create new basic block to be dest for
1677 conditional jump. */
1679 new_bb = create_basic_block (NULL, NULL, last_bb);
1680 new_bb->rbi->next = last_bb->rbi->next;
1681 last_bb->rbi->next = new_bb;
1682 prev_bb = last_bb;
1683 last_bb = new_bb;
1685 /* Update register liveness information. */
1687 new_bb->global_live_at_start = ALLOC_REG_SET (&reg_obstack);
1688 new_bb->global_live_at_end = ALLOC_REG_SET (&reg_obstack);
1689 COPY_REG_SET (new_bb->global_live_at_end,
1690 prev_bb->global_live_at_end);
1691 COPY_REG_SET (new_bb->global_live_at_start,
1692 prev_bb->global_live_at_end);
1694 /* Put appropriate instructions in new bb. */
1696 new_label = gen_label_rtx ();
1697 emit_label_before (new_label, BB_HEAD (new_bb));
1698 BB_HEAD (new_bb) = new_label;
1700 if (GET_CODE (old_label) == LABEL_REF)
1702 old_label = JUMP_LABEL (old_jump);
1703 new_jump = emit_jump_insn_after (gen_jump
1704 (old_label),
1705 BB_END (new_bb));
1707 else
1709 gcc_assert (HAVE_return
1710 && GET_CODE (old_label) == RETURN);
1711 new_jump = emit_jump_insn_after (gen_return (),
1712 BB_END (new_bb));
1715 barrier = emit_barrier_after (new_jump);
1716 JUMP_LABEL (new_jump) = old_label;
1717 new_bb->rbi->footer = unlink_insn_chain (barrier,
1718 barrier);
1720 /* Make sure new bb is in same partition as source
1721 of conditional branch. */
1722 BB_COPY_PARTITION (new_bb, cur_bb);
1725 /* Make old jump branch to new bb. */
1727 redirect_jump (old_jump, new_label, 0);
1729 /* Remove crossing_edge as predecessor of 'dest'. */
1731 dest = crossing_edge->dest;
1733 redirect_edge_succ (crossing_edge, new_bb);
1735 /* Make a new edge from new_bb to old dest; new edge
1736 will be a successor for new_bb and a predecessor
1737 for 'dest'. */
1739 if (EDGE_COUNT (new_bb->succs) == 0)
1740 new_edge = make_edge (new_bb, dest, 0);
1741 else
1742 new_edge = EDGE_SUCC (new_bb, 0);
1744 crossing_edge->flags &= ~EDGE_CROSSING;
1745 new_edge->flags |= EDGE_CROSSING;
1751 /* Find any unconditional branches that cross between hot and cold
1752 sections. Convert them into indirect jumps instead. */
1754 static void
1755 fix_crossing_unconditional_branches (void)
1757 basic_block cur_bb;
1758 rtx last_insn;
1759 rtx label;
1760 rtx label_addr;
1761 rtx indirect_jump_sequence;
1762 rtx jump_insn = NULL_RTX;
1763 rtx new_reg;
1764 rtx cur_insn;
1765 edge succ;
1767 FOR_EACH_BB (cur_bb)
1769 last_insn = BB_END (cur_bb);
1770 succ = EDGE_SUCC (cur_bb, 0);
1772 /* Check to see if bb ends in a crossing (unconditional) jump. At
1773 this point, no crossing jumps should be conditional. */
1775 if (JUMP_P (last_insn)
1776 && (succ->flags & EDGE_CROSSING))
1778 rtx label2, table;
1780 gcc_assert (!any_condjump_p (last_insn));
1782 /* Make sure the jump is not already an indirect or table jump. */
1784 if (!computed_jump_p (last_insn)
1785 && !tablejump_p (last_insn, &label2, &table))
1787 /* We have found a "crossing" unconditional branch. Now
1788 we must convert it to an indirect jump. First create
1789 reference of label, as target for jump. */
1791 label = JUMP_LABEL (last_insn);
1792 label_addr = gen_rtx_LABEL_REF (Pmode, label);
1793 LABEL_NUSES (label) += 1;
1795 /* Get a register to use for the indirect jump. */
1797 new_reg = gen_reg_rtx (Pmode);
1799 /* Generate indirect the jump sequence. */
1801 start_sequence ();
1802 emit_move_insn (new_reg, label_addr);
1803 emit_indirect_jump (new_reg);
1804 indirect_jump_sequence = get_insns ();
1805 end_sequence ();
1807 /* Make sure every instruction in the new jump sequence has
1808 its basic block set to be cur_bb. */
1810 for (cur_insn = indirect_jump_sequence; cur_insn;
1811 cur_insn = NEXT_INSN (cur_insn))
1813 BLOCK_FOR_INSN (cur_insn) = cur_bb;
1814 if (JUMP_P (cur_insn))
1815 jump_insn = cur_insn;
1818 /* Insert the new (indirect) jump sequence immediately before
1819 the unconditional jump, then delete the unconditional jump. */
1821 emit_insn_before (indirect_jump_sequence, last_insn);
1822 delete_insn (last_insn);
1824 /* Make BB_END for cur_bb be the jump instruction (NOT the
1825 barrier instruction at the end of the sequence...). */
1827 BB_END (cur_bb) = jump_insn;
1833 /* Add REG_CROSSING_JUMP note to all crossing jump insns. */
1835 static void
1836 add_reg_crossing_jump_notes (void)
1838 basic_block bb;
1839 edge e;
1840 edge_iterator ei;
1842 FOR_EACH_BB (bb)
1843 FOR_EACH_EDGE (e, ei, bb->succs)
1844 if ((e->flags & EDGE_CROSSING)
1845 && JUMP_P (BB_END (e->src)))
1846 REG_NOTES (BB_END (e->src)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
1847 NULL_RTX,
1848 REG_NOTES (BB_END
1849 (e->src)));
1852 /* Basic blocks containing NOTE_INSN_UNLIKELY_EXECUTED_CODE will be
1853 put in a separate section of the .o file, to reduce paging and
1854 improve cache performance (hopefully). This can result in bits of
1855 code from the same function being widely separated in the .o file.
1856 However this is not obvious to the current bb structure. Therefore
1857 we must take care to ensure that: 1). There are no fall_thru edges
1858 that cross between sections; 2). For those architectures which
1859 have "short" conditional branches, all conditional branches that
1860 attempt to cross between sections are converted to unconditional
1861 branches; and, 3). For those architectures which have "short"
1862 unconditional branches, all unconditional branches that attempt
1863 to cross between sections are converted to indirect jumps.
1865 The code for fixing up fall_thru edges that cross between hot and
1866 cold basic blocks does so by creating new basic blocks containing
1867 unconditional branches to the appropriate label in the "other"
1868 section. The new basic block is then put in the same (hot or cold)
1869 section as the original conditional branch, and the fall_thru edge
1870 is modified to fall into the new basic block instead. By adding
1871 this level of indirection we end up with only unconditional branches
1872 crossing between hot and cold sections.
1874 Conditional branches are dealt with by adding a level of indirection.
1875 A new basic block is added in the same (hot/cold) section as the
1876 conditional branch, and the conditional branch is retargeted to the
1877 new basic block. The new basic block contains an unconditional branch
1878 to the original target of the conditional branch (in the other section).
1880 Unconditional branches are dealt with by converting them into
1881 indirect jumps. */
1883 static void
1884 fix_edges_for_rarely_executed_code (edge *crossing_edges,
1885 int n_crossing_edges)
1887 /* Make sure the source of any crossing edge ends in a jump and the
1888 destination of any crossing edge has a label. */
1890 add_labels_and_missing_jumps (crossing_edges, n_crossing_edges);
1892 /* Convert all crossing fall_thru edges to non-crossing fall
1893 thrus to unconditional jumps (that jump to the original fall
1894 thru dest). */
1896 fix_up_fall_thru_edges ();
1898 /* Only do the parts necessary for writing separate sections if
1899 the target architecture has the ability to write separate sections
1900 (i.e. it has named sections). Otherwise, the hot/cold partitioning
1901 information will be used when reordering blocks to try to put all
1902 the hot blocks together, then all the cold blocks, but no actual
1903 section partitioning will be done. */
1905 if (targetm.have_named_sections)
1907 /* If the architecture does not have conditional branches that can
1908 span all of memory, convert crossing conditional branches into
1909 crossing unconditional branches. */
1911 if (!HAS_LONG_COND_BRANCH)
1912 fix_crossing_conditional_branches ();
1914 /* If the architecture does not have unconditional branches that
1915 can span all of memory, convert crossing unconditional branches
1916 into indirect jumps. Since adding an indirect jump also adds
1917 a new register usage, update the register usage information as
1918 well. */
1920 if (!HAS_LONG_UNCOND_BRANCH)
1922 fix_crossing_unconditional_branches ();
1923 reg_scan (get_insns(), max_reg_num (), 1);
1926 add_reg_crossing_jump_notes ();
1930 /* Reorder basic blocks. The main entry point to this file. FLAGS is
1931 the set of flags to pass to cfg_layout_initialize(). */
1933 void
1934 reorder_basic_blocks (unsigned int flags)
1936 int n_traces;
1937 int i;
1938 struct trace *traces;
1940 if (n_basic_blocks <= 1)
1941 return;
1943 if (targetm.cannot_modify_jumps_p ())
1944 return;
1946 timevar_push (TV_REORDER_BLOCKS);
1948 cfg_layout_initialize (flags);
1950 set_edge_can_fallthru_flag ();
1951 mark_dfs_back_edges ();
1953 /* We are estimating the length of uncond jump insn only once since the code
1954 for getting the insn length always returns the minimal length now. */
1955 if (uncond_jump_length == 0)
1956 uncond_jump_length = get_uncond_jump_length ();
1958 /* We need to know some information for each basic block. */
1959 array_size = GET_ARRAY_SIZE (last_basic_block);
1960 bbd = xmalloc (array_size * sizeof (bbro_basic_block_data));
1961 for (i = 0; i < array_size; i++)
1963 bbd[i].start_of_trace = -1;
1964 bbd[i].end_of_trace = -1;
1965 bbd[i].heap = NULL;
1966 bbd[i].node = NULL;
1969 traces = xmalloc (n_basic_blocks * sizeof (struct trace));
1970 n_traces = 0;
1971 find_traces (&n_traces, traces);
1972 connect_traces (n_traces, traces);
1973 FREE (traces);
1974 FREE (bbd);
1976 if (dump_file)
1977 dump_flow_info (dump_file);
1979 if (flag_reorder_blocks_and_partition
1980 && targetm.have_named_sections)
1981 add_unlikely_executed_notes ();
1983 cfg_layout_finalize ();
1985 timevar_pop (TV_REORDER_BLOCKS);
1988 /* This function is the main 'entrance' for the optimization that
1989 partitions hot and cold basic blocks into separate sections of the
1990 .o file (to improve performance and cache locality). Ideally it
1991 would be called after all optimizations that rearrange the CFG have
1992 been called. However part of this optimization may introduce new
1993 register usage, so it must be called before register allocation has
1994 occurred. This means that this optimization is actually called
1995 well before the optimization that reorders basic blocks (see
1996 function above).
1998 This optimization checks the feedback information to determine
1999 which basic blocks are hot/cold and causes reorder_basic_blocks to
2000 add NOTE_INSN_UNLIKELY_EXECUTED_CODE to non-hot basic blocks. The
2001 presence or absence of this note is later used for writing out
2002 sections in the .o file. Because hot and cold sections can be
2003 arbitrarily large (within the bounds of memory), far beyond the
2004 size of a single function, it is necessary to fix up all edges that
2005 cross section boundaries, to make sure the instructions used can
2006 actually span the required distance. The fixes are described
2007 below.
2009 Fall-through edges must be changed into jumps; it is not safe or
2010 legal to fall through across a section boundary. Whenever a
2011 fall-through edge crossing a section boundary is encountered, a new
2012 basic block is inserted (in the same section as the fall-through
2013 source), and the fall through edge is redirected to the new basic
2014 block. The new basic block contains an unconditional jump to the
2015 original fall-through target. (If the unconditional jump is
2016 insufficient to cross section boundaries, that is dealt with a
2017 little later, see below).
2019 In order to deal with architectures that have short conditional
2020 branches (which cannot span all of memory) we take any conditional
2021 jump that attempts to cross a section boundary and add a level of
2022 indirection: it becomes a conditional jump to a new basic block, in
2023 the same section. The new basic block contains an unconditional
2024 jump to the original target, in the other section.
2026 For those architectures whose unconditional branch is also
2027 incapable of reaching all of memory, those unconditional jumps are
2028 converted into indirect jumps, through a register.
2030 IMPORTANT NOTE: This optimization causes some messy interactions
2031 with the cfg cleanup optimizations; those optimizations want to
2032 merge blocks wherever possible, and to collapse indirect jump
2033 sequences (change "A jumps to B jumps to C" directly into "A jumps
2034 to C"). Those optimizations can undo the jump fixes that
2035 partitioning is required to make (see above), in order to ensure
2036 that jumps attempting to cross section boundaries are really able
2037 to cover whatever distance the jump requires (on many architectures
2038 conditional or unconditional jumps are not able to reach all of
2039 memory). Therefore tests have to be inserted into each such
2040 optimization to make sure that it does not undo stuff necessary to
2041 cross partition boundaries. This would be much less of a problem
2042 if we could perform this optimization later in the compilation, but
2043 unfortunately the fact that we may need to create indirect jumps
2044 (through registers) requires that this optimization be performed
2045 before register allocation. */
2047 void
2048 partition_hot_cold_basic_blocks (void)
2050 basic_block cur_bb;
2051 edge *crossing_edges;
2052 int n_crossing_edges;
2053 int max_edges = 2 * last_basic_block;
2055 if (n_basic_blocks <= 1)
2056 return;
2058 crossing_edges = xcalloc (max_edges, sizeof (edge));
2060 cfg_layout_initialize (0);
2062 FOR_EACH_BB (cur_bb)
2063 if (cur_bb->index >= 0
2064 && cur_bb->next_bb->index >= 0)
2065 cur_bb->rbi->next = cur_bb->next_bb;
2067 find_rarely_executed_basic_blocks_and_crossing_edges (crossing_edges,
2068 &n_crossing_edges,
2069 &max_edges);
2071 if (n_crossing_edges > 0)
2072 fix_edges_for_rarely_executed_code (crossing_edges, n_crossing_edges);
2074 free (crossing_edges);
2076 cfg_layout_finalize();