Implement #pragma GCC warning/error
[official-gcc.git] / gcc / bb-reorder.c
blob6c6b456ab7cef38f21be463f554a10793c05c014
1 /* Basic block reordering routines for the GNU compiler.
2 Copyright (C) 2000, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011,
3 2012 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This (greedy) algorithm constructs traces in several rounds.
22 The construction starts from "seeds". The seed for the first round
23 is the entry point of the function. When there are more than one seed,
24 the one with the lowest key in the heap is selected first (see bb_to_key).
25 Then the algorithm repeatedly adds the most probable successor to the end
26 of a trace. Finally it connects the traces.
28 There are two parameters: Branch Threshold and Exec Threshold.
29 If the probability of an edge to a successor of the current basic block is
30 lower than Branch Threshold or its frequency is lower than Exec Threshold,
31 then the successor will be the seed in one of the next rounds.
32 Each round has these parameters lower than the previous one.
33 The last round has to have these parameters set to zero so that the
34 remaining blocks are picked up.
36 The algorithm selects the most probable successor from all unvisited
37 successors and successors that have been added to this trace.
38 The other successors (that has not been "sent" to the next round) will be
39 other seeds for this round and the secondary traces will start from them.
40 If the successor has not been visited in this trace, it is added to the
41 trace (however, there is some heuristic for simple branches).
42 If the successor has been visited in this trace, a loop has been found.
43 If the loop has many iterations, the loop is rotated so that the source
44 block of the most probable edge going out of the loop is the last block
45 of the trace.
46 If the loop has few iterations and there is no edge from the last block of
47 the loop going out of the loop, the loop header is duplicated.
49 When connecting traces, the algorithm first checks whether there is an edge
50 from the last block of a trace to the first block of another trace.
51 When there are still some unconnected traces it checks whether there exists
52 a basic block BB such that BB is a successor of the last block of a trace
53 and BB is a predecessor of the first block of another trace. In this case,
54 BB is duplicated, added at the end of the first trace and the traces are
55 connected through it.
56 The rest of traces are simply connected so there will be a jump to the
57 beginning of the rest of traces.
59 The above description is for the full algorithm, which is used when the
60 function is optimized for speed. When the function is optimized for size,
61 in order to reduce long jumps and connect more fallthru edges, the
62 algorithm is modified as follows:
63 (1) Break long traces to short ones. A trace is broken at a block that has
64 multiple predecessors/ successors during trace discovery. When connecting
65 traces, only connect Trace n with Trace n + 1. This change reduces most
66 long jumps compared with the above algorithm.
67 (2) Ignore the edge probability and frequency for fallthru edges.
68 (3) Keep the original order of blocks when there is no chance to fall
69 through. We rely on the results of cfg_cleanup.
71 To implement the change for code size optimization, block's index is
72 selected as the key and all traces are found in one round.
74 References:
76 "Software Trace Cache"
77 A. Ramirez, J. Larriba-Pey, C. Navarro, J. Torrellas and M. Valero; 1999
78 http://citeseer.nj.nec.com/15361.html
82 #include "config.h"
83 #include "system.h"
84 #include "coretypes.h"
85 #include "tm.h"
86 #include "rtl.h"
87 #include "regs.h"
88 #include "flags.h"
89 #include "output.h"
90 #include "fibheap.h"
91 #include "target.h"
92 #include "function.h"
93 #include "tm_p.h"
94 #include "obstack.h"
95 #include "expr.h"
96 #include "params.h"
97 #include "diagnostic-core.h"
98 #include "toplev.h" /* user_defined_section_attribute */
99 #include "tree-pass.h"
100 #include "df.h"
101 #include "bb-reorder.h"
102 #include "except.h"
104 /* The number of rounds. In most cases there will only be 4 rounds, but
105 when partitioning hot and cold basic blocks into separate sections of
106 the object file there will be an extra round. */
107 #define N_ROUNDS 5
109 /* Stubs in case we don't have a return insn.
110 We have to check at run time too, not only compile time. */
112 #ifndef HAVE_return
113 #define HAVE_return 0
114 #define gen_return() NULL_RTX
115 #endif
118 struct target_bb_reorder default_target_bb_reorder;
119 #if SWITCHABLE_TARGET
120 struct target_bb_reorder *this_target_bb_reorder = &default_target_bb_reorder;
121 #endif
123 #define uncond_jump_length \
124 (this_target_bb_reorder->x_uncond_jump_length)
126 /* Branch thresholds in thousandths (per mille) of the REG_BR_PROB_BASE. */
127 static int branch_threshold[N_ROUNDS] = {400, 200, 100, 0, 0};
129 /* Exec thresholds in thousandths (per mille) of the frequency of bb 0. */
130 static int exec_threshold[N_ROUNDS] = {500, 200, 50, 0, 0};
132 /* If edge frequency is lower than DUPLICATION_THRESHOLD per mille of entry
133 block the edge destination is not duplicated while connecting traces. */
134 #define DUPLICATION_THRESHOLD 100
136 /* Structure to hold needed information for each basic block. */
137 typedef struct bbro_basic_block_data_def
139 /* Which trace is the bb start of (-1 means it is not a start of any). */
140 int start_of_trace;
142 /* Which trace is the bb end of (-1 means it is not an end of any). */
143 int end_of_trace;
145 /* Which trace is the bb in? */
146 int in_trace;
148 /* Which trace was this bb visited in? */
149 int visited;
151 /* Which heap is BB in (if any)? */
152 fibheap_t heap;
154 /* Which heap node is BB in (if any)? */
155 fibnode_t node;
156 } bbro_basic_block_data;
158 /* The current size of the following dynamic array. */
159 static int array_size;
161 /* The array which holds needed information for basic blocks. */
162 static bbro_basic_block_data *bbd;
164 /* To avoid frequent reallocation the size of arrays is greater than needed,
165 the number of elements is (not less than) 1.25 * size_wanted. */
166 #define GET_ARRAY_SIZE(X) ((((X) / 4) + 1) * 5)
168 /* Free the memory and set the pointer to NULL. */
169 #define FREE(P) (gcc_assert (P), free (P), P = 0)
171 /* Structure for holding information about a trace. */
172 struct trace
174 /* First and last basic block of the trace. */
175 basic_block first, last;
177 /* The round of the STC creation which this trace was found in. */
178 int round;
180 /* The length (i.e. the number of basic blocks) of the trace. */
181 int length;
184 /* Maximum frequency and count of one of the entry blocks. */
185 static int max_entry_frequency;
186 static gcov_type max_entry_count;
188 /* Local function prototypes. */
189 static void find_traces (int *, struct trace *);
190 static basic_block rotate_loop (edge, struct trace *, int);
191 static void mark_bb_visited (basic_block, int);
192 static void find_traces_1_round (int, int, gcov_type, struct trace *, int *,
193 int, fibheap_t *, int);
194 static basic_block copy_bb (basic_block, edge, basic_block, int);
195 static fibheapkey_t bb_to_key (basic_block);
196 static bool better_edge_p (const_basic_block, const_edge, int, int, int, int,
197 const_edge);
198 static bool connect_better_edge_p (const_edge, bool, int, const_edge,
199 struct trace *);
200 static void connect_traces (int, struct trace *);
201 static bool copy_bb_p (const_basic_block, int);
202 static bool push_to_next_round_p (const_basic_block, int, int, int, gcov_type);
204 /* Return the trace number in which BB was visited. */
206 static int
207 bb_visited_trace (const_basic_block bb)
209 gcc_assert (bb->index < array_size);
210 return bbd[bb->index].visited;
213 /* This function marks BB that it was visited in trace number TRACE. */
215 static void
216 mark_bb_visited (basic_block bb, int trace)
218 bbd[bb->index].visited = trace;
219 if (bbd[bb->index].heap)
221 fibheap_delete_node (bbd[bb->index].heap, bbd[bb->index].node);
222 bbd[bb->index].heap = NULL;
223 bbd[bb->index].node = NULL;
227 /* Check to see if bb should be pushed into the next round of trace
228 collections or not. Reasons for pushing the block forward are 1).
229 If the block is cold, we are doing partitioning, and there will be
230 another round (cold partition blocks are not supposed to be
231 collected into traces until the very last round); or 2). There will
232 be another round, and the basic block is not "hot enough" for the
233 current round of trace collection. */
235 static bool
236 push_to_next_round_p (const_basic_block bb, int round, int number_of_rounds,
237 int exec_th, gcov_type count_th)
239 bool there_exists_another_round;
240 bool block_not_hot_enough;
242 there_exists_another_round = round < number_of_rounds - 1;
244 block_not_hot_enough = (bb->frequency < exec_th
245 || bb->count < count_th
246 || probably_never_executed_bb_p (cfun, bb));
248 if (there_exists_another_round
249 && block_not_hot_enough)
250 return true;
251 else
252 return false;
255 /* Find the traces for Software Trace Cache. Chain each trace through
256 RBI()->next. Store the number of traces to N_TRACES and description of
257 traces to TRACES. */
259 static void
260 find_traces (int *n_traces, struct trace *traces)
262 int i;
263 int number_of_rounds;
264 edge e;
265 edge_iterator ei;
266 fibheap_t heap;
268 /* Add one extra round of trace collection when partitioning hot/cold
269 basic blocks into separate sections. The last round is for all the
270 cold blocks (and ONLY the cold blocks). */
272 number_of_rounds = N_ROUNDS - 1;
274 /* Insert entry points of function into heap. */
275 heap = fibheap_new ();
276 max_entry_frequency = 0;
277 max_entry_count = 0;
278 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
280 bbd[e->dest->index].heap = heap;
281 bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest),
282 e->dest);
283 if (e->dest->frequency > max_entry_frequency)
284 max_entry_frequency = e->dest->frequency;
285 if (e->dest->count > max_entry_count)
286 max_entry_count = e->dest->count;
289 /* Find the traces. */
290 for (i = 0; i < number_of_rounds; i++)
292 gcov_type count_threshold;
294 if (dump_file)
295 fprintf (dump_file, "STC - round %d\n", i + 1);
297 if (max_entry_count < INT_MAX / 1000)
298 count_threshold = max_entry_count * exec_threshold[i] / 1000;
299 else
300 count_threshold = max_entry_count / 1000 * exec_threshold[i];
302 find_traces_1_round (REG_BR_PROB_BASE * branch_threshold[i] / 1000,
303 max_entry_frequency * exec_threshold[i] / 1000,
304 count_threshold, traces, n_traces, i, &heap,
305 number_of_rounds);
307 fibheap_delete (heap);
309 if (dump_file)
311 for (i = 0; i < *n_traces; i++)
313 basic_block bb;
314 fprintf (dump_file, "Trace %d (round %d): ", i + 1,
315 traces[i].round + 1);
316 for (bb = traces[i].first;
317 bb != traces[i].last;
318 bb = (basic_block) bb->aux)
319 fprintf (dump_file, "%d [%d] ", bb->index, bb->frequency);
320 fprintf (dump_file, "%d [%d]\n", bb->index, bb->frequency);
322 fflush (dump_file);
326 /* Rotate loop whose back edge is BACK_EDGE in the tail of trace TRACE
327 (with sequential number TRACE_N). */
329 static basic_block
330 rotate_loop (edge back_edge, struct trace *trace, int trace_n)
332 basic_block bb;
334 /* Information about the best end (end after rotation) of the loop. */
335 basic_block best_bb = NULL;
336 edge best_edge = NULL;
337 int best_freq = -1;
338 gcov_type best_count = -1;
339 /* The best edge is preferred when its destination is not visited yet
340 or is a start block of some trace. */
341 bool is_preferred = false;
343 /* Find the most frequent edge that goes out from current trace. */
344 bb = back_edge->dest;
347 edge e;
348 edge_iterator ei;
350 FOR_EACH_EDGE (e, ei, bb->succs)
351 if (e->dest != EXIT_BLOCK_PTR
352 && bb_visited_trace (e->dest) != trace_n
353 && (e->flags & EDGE_CAN_FALLTHRU)
354 && !(e->flags & EDGE_COMPLEX))
356 if (is_preferred)
358 /* The best edge is preferred. */
359 if (!bb_visited_trace (e->dest)
360 || bbd[e->dest->index].start_of_trace >= 0)
362 /* The current edge E is also preferred. */
363 int freq = EDGE_FREQUENCY (e);
364 if (freq > best_freq || e->count > best_count)
366 best_freq = freq;
367 best_count = e->count;
368 best_edge = e;
369 best_bb = bb;
373 else
375 if (!bb_visited_trace (e->dest)
376 || bbd[e->dest->index].start_of_trace >= 0)
378 /* The current edge E is preferred. */
379 is_preferred = true;
380 best_freq = EDGE_FREQUENCY (e);
381 best_count = e->count;
382 best_edge = e;
383 best_bb = bb;
385 else
387 int freq = EDGE_FREQUENCY (e);
388 if (!best_edge || freq > best_freq || e->count > best_count)
390 best_freq = freq;
391 best_count = e->count;
392 best_edge = e;
393 best_bb = bb;
398 bb = (basic_block) bb->aux;
400 while (bb != back_edge->dest);
402 if (best_bb)
404 /* Rotate the loop so that the BEST_EDGE goes out from the last block of
405 the trace. */
406 if (back_edge->dest == trace->first)
408 trace->first = (basic_block) best_bb->aux;
410 else
412 basic_block prev_bb;
414 for (prev_bb = trace->first;
415 prev_bb->aux != back_edge->dest;
416 prev_bb = (basic_block) prev_bb->aux)
418 prev_bb->aux = best_bb->aux;
420 /* Try to get rid of uncond jump to cond jump. */
421 if (single_succ_p (prev_bb))
423 basic_block header = single_succ (prev_bb);
425 /* Duplicate HEADER if it is a small block containing cond jump
426 in the end. */
427 if (any_condjump_p (BB_END (header)) && copy_bb_p (header, 0)
428 && !find_reg_note (BB_END (header), REG_CROSSING_JUMP,
429 NULL_RTX))
430 copy_bb (header, single_succ_edge (prev_bb), prev_bb, trace_n);
434 else
436 /* We have not found suitable loop tail so do no rotation. */
437 best_bb = back_edge->src;
439 best_bb->aux = NULL;
440 return best_bb;
443 /* One round of finding traces. Find traces for BRANCH_TH and EXEC_TH i.e. do
444 not include basic blocks whose probability is lower than BRANCH_TH or whose
445 frequency is lower than EXEC_TH into traces (or whose count is lower than
446 COUNT_TH). Store the new traces into TRACES and modify the number of
447 traces *N_TRACES. Set the round (which the trace belongs to) to ROUND.
448 The function expects starting basic blocks to be in *HEAP and will delete
449 *HEAP and store starting points for the next round into new *HEAP. */
451 static void
452 find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
453 struct trace *traces, int *n_traces, int round,
454 fibheap_t *heap, int number_of_rounds)
456 /* Heap for discarded basic blocks which are possible starting points for
457 the next round. */
458 fibheap_t new_heap = fibheap_new ();
459 bool for_size = optimize_function_for_size_p (cfun);
461 while (!fibheap_empty (*heap))
463 basic_block bb;
464 struct trace *trace;
465 edge best_edge, e;
466 fibheapkey_t key;
467 edge_iterator ei;
469 bb = (basic_block) fibheap_extract_min (*heap);
470 bbd[bb->index].heap = NULL;
471 bbd[bb->index].node = NULL;
473 if (dump_file)
474 fprintf (dump_file, "Getting bb %d\n", bb->index);
476 /* If the BB's frequency is too low, send BB to the next round. When
477 partitioning hot/cold blocks into separate sections, make sure all
478 the cold blocks (and ONLY the cold blocks) go into the (extra) final
479 round. When optimizing for size, do not push to next round. */
481 if (!for_size
482 && push_to_next_round_p (bb, round, number_of_rounds, exec_th,
483 count_th))
485 int key = bb_to_key (bb);
486 bbd[bb->index].heap = new_heap;
487 bbd[bb->index].node = fibheap_insert (new_heap, key, bb);
489 if (dump_file)
490 fprintf (dump_file,
491 " Possible start point of next round: %d (key: %d)\n",
492 bb->index, key);
493 continue;
496 trace = traces + *n_traces;
497 trace->first = bb;
498 trace->round = round;
499 trace->length = 0;
500 bbd[bb->index].in_trace = *n_traces;
501 (*n_traces)++;
505 int prob, freq;
506 bool ends_in_call;
508 /* The probability and frequency of the best edge. */
509 int best_prob = INT_MIN / 2;
510 int best_freq = INT_MIN / 2;
512 best_edge = NULL;
513 mark_bb_visited (bb, *n_traces);
514 trace->length++;
516 if (dump_file)
517 fprintf (dump_file, "Basic block %d was visited in trace %d\n",
518 bb->index, *n_traces - 1);
520 ends_in_call = block_ends_with_call_p (bb);
522 /* Select the successor that will be placed after BB. */
523 FOR_EACH_EDGE (e, ei, bb->succs)
525 gcc_assert (!(e->flags & EDGE_FAKE));
527 if (e->dest == EXIT_BLOCK_PTR)
528 continue;
530 if (bb_visited_trace (e->dest)
531 && bb_visited_trace (e->dest) != *n_traces)
532 continue;
534 if (BB_PARTITION (e->dest) != BB_PARTITION (bb))
535 continue;
537 prob = e->probability;
538 freq = e->dest->frequency;
540 /* The only sensible preference for a call instruction is the
541 fallthru edge. Don't bother selecting anything else. */
542 if (ends_in_call)
544 if (e->flags & EDGE_CAN_FALLTHRU)
546 best_edge = e;
547 best_prob = prob;
548 best_freq = freq;
550 continue;
553 /* Edge that cannot be fallthru or improbable or infrequent
554 successor (i.e. it is unsuitable successor). When optimizing
555 for size, ignore the probability and frequency. */
556 if (!(e->flags & EDGE_CAN_FALLTHRU) || (e->flags & EDGE_COMPLEX)
557 || ((prob < branch_th || EDGE_FREQUENCY (e) < exec_th
558 || e->count < count_th) && (!for_size)))
559 continue;
561 /* If partitioning hot/cold basic blocks, don't consider edges
562 that cross section boundaries. */
564 if (better_edge_p (bb, e, prob, freq, best_prob, best_freq,
565 best_edge))
567 best_edge = e;
568 best_prob = prob;
569 best_freq = freq;
573 /* If the best destination has multiple predecessors, and can be
574 duplicated cheaper than a jump, don't allow it to be added
575 to a trace. We'll duplicate it when connecting traces. */
576 if (best_edge && EDGE_COUNT (best_edge->dest->preds) >= 2
577 && copy_bb_p (best_edge->dest, 0))
578 best_edge = NULL;
580 /* If the best destination has multiple successors or predecessors,
581 don't allow it to be added when optimizing for size. This makes
582 sure predecessors with smaller index are handled before the best
583 destinarion. It breaks long trace and reduces long jumps.
585 Take if-then-else as an example.
591 If we do not remove the best edge B->D/C->D, the final order might
592 be A B D ... C. C is at the end of the program. If D's successors
593 and D are complicated, might need long jumps for A->C and C->D.
594 Similar issue for order: A C D ... B.
596 After removing the best edge, the final result will be ABCD/ ACBD.
597 It does not add jump compared with the previous order. But it
598 reduces the possiblity of long jumps. */
599 if (best_edge && for_size
600 && (EDGE_COUNT (best_edge->dest->succs) > 1
601 || EDGE_COUNT (best_edge->dest->preds) > 1))
602 best_edge = NULL;
604 /* Add all non-selected successors to the heaps. */
605 FOR_EACH_EDGE (e, ei, bb->succs)
607 if (e == best_edge
608 || e->dest == EXIT_BLOCK_PTR
609 || bb_visited_trace (e->dest))
610 continue;
612 key = bb_to_key (e->dest);
614 if (bbd[e->dest->index].heap)
616 /* E->DEST is already in some heap. */
617 if (key != bbd[e->dest->index].node->key)
619 if (dump_file)
621 fprintf (dump_file,
622 "Changing key for bb %d from %ld to %ld.\n",
623 e->dest->index,
624 (long) bbd[e->dest->index].node->key,
625 key);
627 fibheap_replace_key (bbd[e->dest->index].heap,
628 bbd[e->dest->index].node, key);
631 else
633 fibheap_t which_heap = *heap;
635 prob = e->probability;
636 freq = EDGE_FREQUENCY (e);
638 if (!(e->flags & EDGE_CAN_FALLTHRU)
639 || (e->flags & EDGE_COMPLEX)
640 || prob < branch_th || freq < exec_th
641 || e->count < count_th)
643 /* When partitioning hot/cold basic blocks, make sure
644 the cold blocks (and only the cold blocks) all get
645 pushed to the last round of trace collection. When
646 optimizing for size, do not push to next round. */
648 if (!for_size && push_to_next_round_p (e->dest, round,
649 number_of_rounds,
650 exec_th, count_th))
651 which_heap = new_heap;
654 bbd[e->dest->index].heap = which_heap;
655 bbd[e->dest->index].node = fibheap_insert (which_heap,
656 key, e->dest);
658 if (dump_file)
660 fprintf (dump_file,
661 " Possible start of %s round: %d (key: %ld)\n",
662 (which_heap == new_heap) ? "next" : "this",
663 e->dest->index, (long) key);
669 if (best_edge) /* Suitable successor was found. */
671 if (bb_visited_trace (best_edge->dest) == *n_traces)
673 /* We do nothing with one basic block loops. */
674 if (best_edge->dest != bb)
676 if (EDGE_FREQUENCY (best_edge)
677 > 4 * best_edge->dest->frequency / 5)
679 /* The loop has at least 4 iterations. If the loop
680 header is not the first block of the function
681 we can rotate the loop. */
683 if (best_edge->dest != ENTRY_BLOCK_PTR->next_bb)
685 if (dump_file)
687 fprintf (dump_file,
688 "Rotating loop %d - %d\n",
689 best_edge->dest->index, bb->index);
691 bb->aux = best_edge->dest;
692 bbd[best_edge->dest->index].in_trace =
693 (*n_traces) - 1;
694 bb = rotate_loop (best_edge, trace, *n_traces);
697 else
699 /* The loop has less than 4 iterations. */
701 if (single_succ_p (bb)
702 && copy_bb_p (best_edge->dest,
703 optimize_edge_for_speed_p
704 (best_edge)))
706 bb = copy_bb (best_edge->dest, best_edge, bb,
707 *n_traces);
708 trace->length++;
713 /* Terminate the trace. */
714 break;
716 else
718 /* Check for a situation
726 where
727 EDGE_FREQUENCY (AB) + EDGE_FREQUENCY (BC)
728 >= EDGE_FREQUENCY (AC).
729 (i.e. 2 * B->frequency >= EDGE_FREQUENCY (AC) )
730 Best ordering is then A B C.
732 When optimizing for size, A B C is always the best order.
734 This situation is created for example by:
736 if (A) B;
741 FOR_EACH_EDGE (e, ei, bb->succs)
742 if (e != best_edge
743 && (e->flags & EDGE_CAN_FALLTHRU)
744 && !(e->flags & EDGE_COMPLEX)
745 && !bb_visited_trace (e->dest)
746 && single_pred_p (e->dest)
747 && !(e->flags & EDGE_CROSSING)
748 && single_succ_p (e->dest)
749 && (single_succ_edge (e->dest)->flags
750 & EDGE_CAN_FALLTHRU)
751 && !(single_succ_edge (e->dest)->flags & EDGE_COMPLEX)
752 && single_succ (e->dest) == best_edge->dest
753 && (2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge)
754 || for_size))
756 best_edge = e;
757 if (dump_file)
758 fprintf (dump_file, "Selecting BB %d\n",
759 best_edge->dest->index);
760 break;
763 bb->aux = best_edge->dest;
764 bbd[best_edge->dest->index].in_trace = (*n_traces) - 1;
765 bb = best_edge->dest;
769 while (best_edge);
770 trace->last = bb;
771 bbd[trace->first->index].start_of_trace = *n_traces - 1;
772 bbd[trace->last->index].end_of_trace = *n_traces - 1;
774 /* The trace is terminated so we have to recount the keys in heap
775 (some block can have a lower key because now one of its predecessors
776 is an end of the trace). */
777 FOR_EACH_EDGE (e, ei, bb->succs)
779 if (e->dest == EXIT_BLOCK_PTR
780 || bb_visited_trace (e->dest))
781 continue;
783 if (bbd[e->dest->index].heap)
785 key = bb_to_key (e->dest);
786 if (key != bbd[e->dest->index].node->key)
788 if (dump_file)
790 fprintf (dump_file,
791 "Changing key for bb %d from %ld to %ld.\n",
792 e->dest->index,
793 (long) bbd[e->dest->index].node->key, key);
795 fibheap_replace_key (bbd[e->dest->index].heap,
796 bbd[e->dest->index].node,
797 key);
803 fibheap_delete (*heap);
805 /* "Return" the new heap. */
806 *heap = new_heap;
809 /* Create a duplicate of the basic block OLD_BB and redirect edge E to it, add
810 it to trace after BB, mark OLD_BB visited and update pass' data structures
811 (TRACE is a number of trace which OLD_BB is duplicated to). */
813 static basic_block
814 copy_bb (basic_block old_bb, edge e, basic_block bb, int trace)
816 basic_block new_bb;
818 new_bb = duplicate_block (old_bb, e, bb);
819 BB_COPY_PARTITION (new_bb, old_bb);
821 gcc_assert (e->dest == new_bb);
823 if (dump_file)
824 fprintf (dump_file,
825 "Duplicated bb %d (created bb %d)\n",
826 old_bb->index, new_bb->index);
828 if (new_bb->index >= array_size || last_basic_block > array_size)
830 int i;
831 int new_size;
833 new_size = MAX (last_basic_block, new_bb->index + 1);
834 new_size = GET_ARRAY_SIZE (new_size);
835 bbd = XRESIZEVEC (bbro_basic_block_data, bbd, new_size);
836 for (i = array_size; i < new_size; i++)
838 bbd[i].start_of_trace = -1;
839 bbd[i].end_of_trace = -1;
840 bbd[i].in_trace = -1;
841 bbd[i].visited = 0;
842 bbd[i].heap = NULL;
843 bbd[i].node = NULL;
845 array_size = new_size;
847 if (dump_file)
849 fprintf (dump_file,
850 "Growing the dynamic array to %d elements.\n",
851 array_size);
855 gcc_assert (!bb_visited_trace (e->dest));
856 mark_bb_visited (new_bb, trace);
857 new_bb->aux = bb->aux;
858 bb->aux = new_bb;
860 bbd[new_bb->index].in_trace = trace;
862 return new_bb;
865 /* Compute and return the key (for the heap) of the basic block BB. */
867 static fibheapkey_t
868 bb_to_key (basic_block bb)
870 edge e;
871 edge_iterator ei;
872 int priority = 0;
874 /* Use index as key to align with its original order. */
875 if (optimize_function_for_size_p (cfun))
876 return bb->index;
878 /* Do not start in probably never executed blocks. */
880 if (BB_PARTITION (bb) == BB_COLD_PARTITION
881 || probably_never_executed_bb_p (cfun, bb))
882 return BB_FREQ_MAX;
884 /* Prefer blocks whose predecessor is an end of some trace
885 or whose predecessor edge is EDGE_DFS_BACK. */
886 FOR_EACH_EDGE (e, ei, bb->preds)
888 if ((e->src != ENTRY_BLOCK_PTR && bbd[e->src->index].end_of_trace >= 0)
889 || (e->flags & EDGE_DFS_BACK))
891 int edge_freq = EDGE_FREQUENCY (e);
893 if (edge_freq > priority)
894 priority = edge_freq;
898 if (priority)
899 /* The block with priority should have significantly lower key. */
900 return -(100 * BB_FREQ_MAX + 100 * priority + bb->frequency);
902 return -bb->frequency;
905 /* Return true when the edge E from basic block BB is better than the temporary
906 best edge (details are in function). The probability of edge E is PROB. The
907 frequency of the successor is FREQ. The current best probability is
908 BEST_PROB, the best frequency is BEST_FREQ.
909 The edge is considered to be equivalent when PROB does not differ much from
910 BEST_PROB; similarly for frequency. */
912 static bool
913 better_edge_p (const_basic_block bb, const_edge e, int prob, int freq,
914 int best_prob, int best_freq, const_edge cur_best_edge)
916 bool is_better_edge;
918 /* The BEST_* values do not have to be best, but can be a bit smaller than
919 maximum values. */
920 int diff_prob = best_prob / 10;
921 int diff_freq = best_freq / 10;
923 /* The smaller one is better to keep the original order. */
924 if (optimize_function_for_size_p (cfun))
925 return !cur_best_edge
926 || cur_best_edge->dest->index > e->dest->index;
928 if (prob > best_prob + diff_prob)
929 /* The edge has higher probability than the temporary best edge. */
930 is_better_edge = true;
931 else if (prob < best_prob - diff_prob)
932 /* The edge has lower probability than the temporary best edge. */
933 is_better_edge = false;
934 else if (freq < best_freq - diff_freq)
935 /* The edge and the temporary best edge have almost equivalent
936 probabilities. The higher frequency of a successor now means
937 that there is another edge going into that successor.
938 This successor has lower frequency so it is better. */
939 is_better_edge = true;
940 else if (freq > best_freq + diff_freq)
941 /* This successor has higher frequency so it is worse. */
942 is_better_edge = false;
943 else if (e->dest->prev_bb == bb)
944 /* The edges have equivalent probabilities and the successors
945 have equivalent frequencies. Select the previous successor. */
946 is_better_edge = true;
947 else
948 is_better_edge = false;
950 /* If we are doing hot/cold partitioning, make sure that we always favor
951 non-crossing edges over crossing edges. */
953 if (!is_better_edge
954 && flag_reorder_blocks_and_partition
955 && cur_best_edge
956 && (cur_best_edge->flags & EDGE_CROSSING)
957 && !(e->flags & EDGE_CROSSING))
958 is_better_edge = true;
960 return is_better_edge;
963 /* Return true when the edge E is better than the temporary best edge
964 CUR_BEST_EDGE. If SRC_INDEX_P is true, the function compares the src bb of
965 E and CUR_BEST_EDGE; otherwise it will compare the dest bb.
966 BEST_LEN is the trace length of src (or dest) bb in CUR_BEST_EDGE.
967 TRACES record the information about traces.
968 When optimizing for size, the edge with smaller index is better.
969 When optimizing for speed, the edge with bigger probability or longer trace
970 is better. */
972 static bool
973 connect_better_edge_p (const_edge e, bool src_index_p, int best_len,
974 const_edge cur_best_edge, struct trace *traces)
976 int e_index;
977 int b_index;
978 bool is_better_edge;
980 if (!cur_best_edge)
981 return true;
983 if (optimize_function_for_size_p (cfun))
985 e_index = src_index_p ? e->src->index : e->dest->index;
986 b_index = src_index_p ? cur_best_edge->src->index
987 : cur_best_edge->dest->index;
988 /* The smaller one is better to keep the original order. */
989 return b_index > e_index;
992 if (src_index_p)
994 e_index = e->src->index;
996 if (e->probability > cur_best_edge->probability)
997 /* The edge has higher probability than the temporary best edge. */
998 is_better_edge = true;
999 else if (e->probability < cur_best_edge->probability)
1000 /* The edge has lower probability than the temporary best edge. */
1001 is_better_edge = false;
1002 else if (traces[bbd[e_index].end_of_trace].length > best_len)
1003 /* The edge and the temporary best edge have equivalent probabilities.
1004 The edge with longer trace is better. */
1005 is_better_edge = true;
1006 else
1007 is_better_edge = false;
1009 else
1011 e_index = e->dest->index;
1013 if (e->probability > cur_best_edge->probability)
1014 /* The edge has higher probability than the temporary best edge. */
1015 is_better_edge = true;
1016 else if (e->probability < cur_best_edge->probability)
1017 /* The edge has lower probability than the temporary best edge. */
1018 is_better_edge = false;
1019 else if (traces[bbd[e_index].start_of_trace].length > best_len)
1020 /* The edge and the temporary best edge have equivalent probabilities.
1021 The edge with longer trace is better. */
1022 is_better_edge = true;
1023 else
1024 is_better_edge = false;
1027 return is_better_edge;
1030 /* Connect traces in array TRACES, N_TRACES is the count of traces. */
1032 static void
1033 connect_traces (int n_traces, struct trace *traces)
1035 int i;
1036 bool *connected;
1037 bool two_passes;
1038 int last_trace;
1039 int current_pass;
1040 int current_partition;
1041 int freq_threshold;
1042 gcov_type count_threshold;
1043 bool for_size = optimize_function_for_size_p (cfun);
1045 freq_threshold = max_entry_frequency * DUPLICATION_THRESHOLD / 1000;
1046 if (max_entry_count < INT_MAX / 1000)
1047 count_threshold = max_entry_count * DUPLICATION_THRESHOLD / 1000;
1048 else
1049 count_threshold = max_entry_count / 1000 * DUPLICATION_THRESHOLD;
1051 connected = XCNEWVEC (bool, n_traces);
1052 last_trace = -1;
1053 current_pass = 1;
1054 current_partition = BB_PARTITION (traces[0].first);
1055 two_passes = false;
1057 if (flag_reorder_blocks_and_partition)
1058 for (i = 0; i < n_traces && !two_passes; i++)
1059 if (BB_PARTITION (traces[0].first)
1060 != BB_PARTITION (traces[i].first))
1061 two_passes = true;
1063 for (i = 0; i < n_traces || (two_passes && current_pass == 1) ; i++)
1065 int t = i;
1066 int t2;
1067 edge e, best;
1068 int best_len;
1070 if (i >= n_traces)
1072 gcc_assert (two_passes && current_pass == 1);
1073 i = 0;
1074 t = i;
1075 current_pass = 2;
1076 if (current_partition == BB_HOT_PARTITION)
1077 current_partition = BB_COLD_PARTITION;
1078 else
1079 current_partition = BB_HOT_PARTITION;
1082 if (connected[t])
1083 continue;
1085 if (two_passes
1086 && BB_PARTITION (traces[t].first) != current_partition)
1087 continue;
1089 connected[t] = true;
1091 /* Find the predecessor traces. */
1092 for (t2 = t; t2 > 0;)
1094 edge_iterator ei;
1095 best = NULL;
1096 best_len = 0;
1097 FOR_EACH_EDGE (e, ei, traces[t2].first->preds)
1099 int si = e->src->index;
1101 if (e->src != ENTRY_BLOCK_PTR
1102 && (e->flags & EDGE_CAN_FALLTHRU)
1103 && !(e->flags & EDGE_COMPLEX)
1104 && bbd[si].end_of_trace >= 0
1105 && !connected[bbd[si].end_of_trace]
1106 && (BB_PARTITION (e->src) == current_partition)
1107 && connect_better_edge_p (e, true, best_len, best, traces))
1109 best = e;
1110 best_len = traces[bbd[si].end_of_trace].length;
1113 if (best)
1115 best->src->aux = best->dest;
1116 t2 = bbd[best->src->index].end_of_trace;
1117 connected[t2] = true;
1119 if (dump_file)
1121 fprintf (dump_file, "Connection: %d %d\n",
1122 best->src->index, best->dest->index);
1125 else
1126 break;
1129 if (last_trace >= 0)
1130 traces[last_trace].last->aux = traces[t2].first;
1131 last_trace = t;
1133 /* Find the successor traces. */
1134 while (1)
1136 /* Find the continuation of the chain. */
1137 edge_iterator ei;
1138 best = NULL;
1139 best_len = 0;
1140 FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1142 int di = e->dest->index;
1144 if (e->dest != EXIT_BLOCK_PTR
1145 && (e->flags & EDGE_CAN_FALLTHRU)
1146 && !(e->flags & EDGE_COMPLEX)
1147 && bbd[di].start_of_trace >= 0
1148 && !connected[bbd[di].start_of_trace]
1149 && (BB_PARTITION (e->dest) == current_partition)
1150 && connect_better_edge_p (e, false, best_len, best, traces))
1152 best = e;
1153 best_len = traces[bbd[di].start_of_trace].length;
1157 if (for_size)
1159 if (!best)
1160 /* Stop finding the successor traces. */
1161 break;
1163 /* It is OK to connect block n with block n + 1 or a block
1164 before n. For others, only connect to the loop header. */
1165 if (best->dest->index > (traces[t].last->index + 1))
1167 int count = EDGE_COUNT (best->dest->preds);
1169 FOR_EACH_EDGE (e, ei, best->dest->preds)
1170 if (e->flags & EDGE_DFS_BACK)
1171 count--;
1173 /* If dest has multiple predecessors, skip it. We expect
1174 that one predecessor with smaller index connects with it
1175 later. */
1176 if (count != 1)
1177 break;
1180 /* Only connect Trace n with Trace n + 1. It is conservative
1181 to keep the order as close as possible to the original order.
1182 It also helps to reduce long jumps. */
1183 if (last_trace != bbd[best->dest->index].start_of_trace - 1)
1184 break;
1186 if (dump_file)
1187 fprintf (dump_file, "Connection: %d %d\n",
1188 best->src->index, best->dest->index);
1190 t = bbd[best->dest->index].start_of_trace;
1191 traces[last_trace].last->aux = traces[t].first;
1192 connected[t] = true;
1193 last_trace = t;
1195 else if (best)
1197 if (dump_file)
1199 fprintf (dump_file, "Connection: %d %d\n",
1200 best->src->index, best->dest->index);
1202 t = bbd[best->dest->index].start_of_trace;
1203 traces[last_trace].last->aux = traces[t].first;
1204 connected[t] = true;
1205 last_trace = t;
1207 else
1209 /* Try to connect the traces by duplication of 1 block. */
1210 edge e2;
1211 basic_block next_bb = NULL;
1212 bool try_copy = false;
1214 FOR_EACH_EDGE (e, ei, traces[t].last->succs)
1215 if (e->dest != EXIT_BLOCK_PTR
1216 && (e->flags & EDGE_CAN_FALLTHRU)
1217 && !(e->flags & EDGE_COMPLEX)
1218 && (!best || e->probability > best->probability))
1220 edge_iterator ei;
1221 edge best2 = NULL;
1222 int best2_len = 0;
1224 /* If the destination is a start of a trace which is only
1225 one block long, then no need to search the successor
1226 blocks of the trace. Accept it. */
1227 if (bbd[e->dest->index].start_of_trace >= 0
1228 && traces[bbd[e->dest->index].start_of_trace].length
1229 == 1)
1231 best = e;
1232 try_copy = true;
1233 continue;
1236 FOR_EACH_EDGE (e2, ei, e->dest->succs)
1238 int di = e2->dest->index;
1240 if (e2->dest == EXIT_BLOCK_PTR
1241 || ((e2->flags & EDGE_CAN_FALLTHRU)
1242 && !(e2->flags & EDGE_COMPLEX)
1243 && bbd[di].start_of_trace >= 0
1244 && !connected[bbd[di].start_of_trace]
1245 && BB_PARTITION (e2->dest) == current_partition
1246 && EDGE_FREQUENCY (e2) >= freq_threshold
1247 && e2->count >= count_threshold
1248 && (!best2
1249 || e2->probability > best2->probability
1250 || (e2->probability == best2->probability
1251 && traces[bbd[di].start_of_trace].length
1252 > best2_len))))
1254 best = e;
1255 best2 = e2;
1256 if (e2->dest != EXIT_BLOCK_PTR)
1257 best2_len = traces[bbd[di].start_of_trace].length;
1258 else
1259 best2_len = INT_MAX;
1260 next_bb = e2->dest;
1261 try_copy = true;
1266 if (flag_reorder_blocks_and_partition)
1267 try_copy = false;
1269 /* Copy tiny blocks always; copy larger blocks only when the
1270 edge is traversed frequently enough. */
1271 if (try_copy
1272 && copy_bb_p (best->dest,
1273 optimize_edge_for_speed_p (best)
1274 && EDGE_FREQUENCY (best) >= freq_threshold
1275 && best->count >= count_threshold))
1277 basic_block new_bb;
1279 if (dump_file)
1281 fprintf (dump_file, "Connection: %d %d ",
1282 traces[t].last->index, best->dest->index);
1283 if (!next_bb)
1284 fputc ('\n', dump_file);
1285 else if (next_bb == EXIT_BLOCK_PTR)
1286 fprintf (dump_file, "exit\n");
1287 else
1288 fprintf (dump_file, "%d\n", next_bb->index);
1291 new_bb = copy_bb (best->dest, best, traces[t].last, t);
1292 traces[t].last = new_bb;
1293 if (next_bb && next_bb != EXIT_BLOCK_PTR)
1295 t = bbd[next_bb->index].start_of_trace;
1296 traces[last_trace].last->aux = traces[t].first;
1297 connected[t] = true;
1298 last_trace = t;
1300 else
1301 break; /* Stop finding the successor traces. */
1303 else
1304 break; /* Stop finding the successor traces. */
1309 if (dump_file)
1311 basic_block bb;
1313 fprintf (dump_file, "Final order:\n");
1314 for (bb = traces[0].first; bb; bb = (basic_block) bb->aux)
1315 fprintf (dump_file, "%d ", bb->index);
1316 fprintf (dump_file, "\n");
1317 fflush (dump_file);
1320 FREE (connected);
1323 /* Return true when BB can and should be copied. CODE_MAY_GROW is true
1324 when code size is allowed to grow by duplication. */
1326 static bool
1327 copy_bb_p (const_basic_block bb, int code_may_grow)
1329 int size = 0;
1330 int max_size = uncond_jump_length;
1331 rtx insn;
1333 if (!bb->frequency)
1334 return false;
1335 if (EDGE_COUNT (bb->preds) < 2)
1336 return false;
1337 if (!can_duplicate_block_p (bb))
1338 return false;
1340 /* Avoid duplicating blocks which have many successors (PR/13430). */
1341 if (EDGE_COUNT (bb->succs) > 8)
1342 return false;
1344 if (code_may_grow && optimize_bb_for_speed_p (bb))
1345 max_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
1347 FOR_BB_INSNS (bb, insn)
1349 if (INSN_P (insn))
1350 size += get_attr_min_length (insn);
1353 if (size <= max_size)
1354 return true;
1356 if (dump_file)
1358 fprintf (dump_file,
1359 "Block %d can't be copied because its size = %d.\n",
1360 bb->index, size);
1363 return false;
1366 /* Return the length of unconditional jump instruction. */
1369 get_uncond_jump_length (void)
1371 rtx label, jump;
1372 int length;
1374 label = emit_label_before (gen_label_rtx (), get_insns ());
1375 jump = emit_jump_insn (gen_jump (label));
1377 length = get_attr_min_length (jump);
1379 delete_insn (jump);
1380 delete_insn (label);
1381 return length;
1384 /* Emit a barrier into the footer of BB. */
1386 static void
1387 emit_barrier_after_bb (basic_block bb)
1389 rtx barrier = emit_barrier_after (BB_END (bb));
1390 BB_FOOTER (bb) = unlink_insn_chain (barrier, barrier);
1393 /* The landing pad OLD_LP, in block OLD_BB, has edges from both partitions.
1394 Duplicate the landing pad and split the edges so that no EH edge
1395 crosses partitions. */
1397 static void
1398 fix_up_crossing_landing_pad (eh_landing_pad old_lp, basic_block old_bb)
1400 eh_landing_pad new_lp;
1401 basic_block new_bb, last_bb, post_bb;
1402 rtx new_label, jump, post_label;
1403 unsigned new_partition;
1404 edge_iterator ei;
1405 edge e;
1407 /* Generate the new landing-pad structure. */
1408 new_lp = gen_eh_landing_pad (old_lp->region);
1409 new_lp->post_landing_pad = old_lp->post_landing_pad;
1410 new_lp->landing_pad = gen_label_rtx ();
1411 LABEL_PRESERVE_P (new_lp->landing_pad) = 1;
1413 /* Put appropriate instructions in new bb. */
1414 new_label = emit_label (new_lp->landing_pad);
1416 expand_dw2_landing_pad_for_region (old_lp->region);
1418 post_bb = BLOCK_FOR_INSN (old_lp->landing_pad);
1419 post_bb = single_succ (post_bb);
1420 post_label = block_label (post_bb);
1421 jump = emit_jump_insn (gen_jump (post_label));
1422 JUMP_LABEL (jump) = post_label;
1424 /* Create new basic block to be dest for lp. */
1425 last_bb = EXIT_BLOCK_PTR->prev_bb;
1426 new_bb = create_basic_block (new_label, jump, last_bb);
1427 new_bb->aux = last_bb->aux;
1428 last_bb->aux = new_bb;
1430 emit_barrier_after_bb (new_bb);
1432 make_edge (new_bb, post_bb, 0);
1434 /* Make sure new bb is in the other partition. */
1435 new_partition = BB_PARTITION (old_bb);
1436 new_partition ^= BB_HOT_PARTITION | BB_COLD_PARTITION;
1437 BB_SET_PARTITION (new_bb, new_partition);
1439 /* Fix up the edges. */
1440 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)) != NULL; )
1441 if (BB_PARTITION (e->src) == new_partition)
1443 rtx insn = BB_END (e->src);
1444 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
1446 gcc_assert (note != NULL);
1447 gcc_checking_assert (INTVAL (XEXP (note, 0)) == old_lp->index);
1448 XEXP (note, 0) = GEN_INT (new_lp->index);
1450 /* Adjust the edge to the new destination. */
1451 redirect_edge_succ (e, new_bb);
1453 else
1454 ei_next (&ei);
1457 /* Find the basic blocks that are rarely executed and need to be moved to
1458 a separate section of the .o file (to cut down on paging and improve
1459 cache locality). Return a vector of all edges that cross. */
1461 static VEC(edge, heap) *
1462 find_rarely_executed_basic_blocks_and_crossing_edges (void)
1464 VEC(edge, heap) *crossing_edges = NULL;
1465 basic_block bb;
1466 edge e;
1467 edge_iterator ei;
1469 /* Mark which partition (hot/cold) each basic block belongs in. */
1470 FOR_EACH_BB (bb)
1472 if (probably_never_executed_bb_p (cfun, bb))
1473 BB_SET_PARTITION (bb, BB_COLD_PARTITION);
1474 else
1475 BB_SET_PARTITION (bb, BB_HOT_PARTITION);
1478 /* The format of .gcc_except_table does not allow landing pads to
1479 be in a different partition as the throw. Fix this by either
1480 moving or duplicating the landing pads. */
1481 if (cfun->eh->lp_array)
1483 unsigned i;
1484 eh_landing_pad lp;
1486 FOR_EACH_VEC_ELT (eh_landing_pad, cfun->eh->lp_array, i, lp)
1488 bool all_same, all_diff;
1490 if (lp == NULL
1491 || lp->landing_pad == NULL_RTX
1492 || !LABEL_P (lp->landing_pad))
1493 continue;
1495 all_same = all_diff = true;
1496 bb = BLOCK_FOR_INSN (lp->landing_pad);
1497 FOR_EACH_EDGE (e, ei, bb->preds)
1499 gcc_assert (e->flags & EDGE_EH);
1500 if (BB_PARTITION (bb) == BB_PARTITION (e->src))
1501 all_diff = false;
1502 else
1503 all_same = false;
1506 if (all_same)
1508 else if (all_diff)
1510 int which = BB_PARTITION (bb);
1511 which ^= BB_HOT_PARTITION | BB_COLD_PARTITION;
1512 BB_SET_PARTITION (bb, which);
1514 else
1515 fix_up_crossing_landing_pad (lp, bb);
1519 /* Mark every edge that crosses between sections. */
1521 FOR_EACH_BB (bb)
1522 FOR_EACH_EDGE (e, ei, bb->succs)
1524 unsigned int flags = e->flags;
1526 /* We should never have EDGE_CROSSING set yet. */
1527 gcc_checking_assert ((flags & EDGE_CROSSING) == 0);
1529 if (e->src != ENTRY_BLOCK_PTR
1530 && e->dest != EXIT_BLOCK_PTR
1531 && BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1533 VEC_safe_push (edge, heap, crossing_edges, e);
1534 flags |= EDGE_CROSSING;
1537 /* Now that we've split eh edges as appropriate, allow landing pads
1538 to be merged with the post-landing pads. */
1539 flags &= ~EDGE_PRESERVE;
1541 e->flags = flags;
1544 return crossing_edges;
1547 /* Set the flag EDGE_CAN_FALLTHRU for edges that can be fallthru. */
1549 static void
1550 set_edge_can_fallthru_flag (void)
1552 basic_block bb;
1554 FOR_EACH_BB (bb)
1556 edge e;
1557 edge_iterator ei;
1559 FOR_EACH_EDGE (e, ei, bb->succs)
1561 e->flags &= ~EDGE_CAN_FALLTHRU;
1563 /* The FALLTHRU edge is also CAN_FALLTHRU edge. */
1564 if (e->flags & EDGE_FALLTHRU)
1565 e->flags |= EDGE_CAN_FALLTHRU;
1568 /* If the BB ends with an invertible condjump all (2) edges are
1569 CAN_FALLTHRU edges. */
1570 if (EDGE_COUNT (bb->succs) != 2)
1571 continue;
1572 if (!any_condjump_p (BB_END (bb)))
1573 continue;
1574 if (!invert_jump (BB_END (bb), JUMP_LABEL (BB_END (bb)), 0))
1575 continue;
1576 invert_jump (BB_END (bb), JUMP_LABEL (BB_END (bb)), 0);
1577 EDGE_SUCC (bb, 0)->flags |= EDGE_CAN_FALLTHRU;
1578 EDGE_SUCC (bb, 1)->flags |= EDGE_CAN_FALLTHRU;
1582 /* If any destination of a crossing edge does not have a label, add label;
1583 Convert any easy fall-through crossing edges to unconditional jumps. */
1585 static void
1586 add_labels_and_missing_jumps (VEC(edge, heap) *crossing_edges)
1588 size_t i;
1589 edge e;
1591 FOR_EACH_VEC_ELT (edge, crossing_edges, i, e)
1593 basic_block src = e->src;
1594 basic_block dest = e->dest;
1595 rtx label, new_jump;
1597 if (dest == EXIT_BLOCK_PTR)
1598 continue;
1600 /* Make sure dest has a label. */
1601 label = block_label (dest);
1603 /* Nothing to do for non-fallthru edges. */
1604 if (src == ENTRY_BLOCK_PTR)
1605 continue;
1606 if ((e->flags & EDGE_FALLTHRU) == 0)
1607 continue;
1609 /* If the block does not end with a control flow insn, then we
1610 can trivially add a jump to the end to fixup the crossing.
1611 Otherwise the jump will have to go in a new bb, which will
1612 be handled by fix_up_fall_thru_edges function. */
1613 if (control_flow_insn_p (BB_END (src)))
1614 continue;
1616 /* Make sure there's only one successor. */
1617 gcc_assert (single_succ_p (src));
1619 new_jump = emit_jump_insn_after (gen_jump (label), BB_END (src));
1620 BB_END (src) = new_jump;
1621 JUMP_LABEL (new_jump) = label;
1622 LABEL_NUSES (label) += 1;
1624 emit_barrier_after_bb (src);
1626 /* Mark edge as non-fallthru. */
1627 e->flags &= ~EDGE_FALLTHRU;
1631 /* Find any bb's where the fall-through edge is a crossing edge (note that
1632 these bb's must also contain a conditional jump or end with a call
1633 instruction; we've already dealt with fall-through edges for blocks
1634 that didn't have a conditional jump or didn't end with call instruction
1635 in the call to add_labels_and_missing_jumps). Convert the fall-through
1636 edge to non-crossing edge by inserting a new bb to fall-through into.
1637 The new bb will contain an unconditional jump (crossing edge) to the
1638 original fall through destination. */
1640 static void
1641 fix_up_fall_thru_edges (void)
1643 basic_block cur_bb;
1644 basic_block new_bb;
1645 edge succ1;
1646 edge succ2;
1647 edge fall_thru;
1648 edge cond_jump = NULL;
1649 edge e;
1650 bool cond_jump_crosses;
1651 int invert_worked;
1652 rtx old_jump;
1653 rtx fall_thru_label;
1655 FOR_EACH_BB (cur_bb)
1657 fall_thru = NULL;
1658 if (EDGE_COUNT (cur_bb->succs) > 0)
1659 succ1 = EDGE_SUCC (cur_bb, 0);
1660 else
1661 succ1 = NULL;
1663 if (EDGE_COUNT (cur_bb->succs) > 1)
1664 succ2 = EDGE_SUCC (cur_bb, 1);
1665 else
1666 succ2 = NULL;
1668 /* Find the fall-through edge. */
1670 if (succ1
1671 && (succ1->flags & EDGE_FALLTHRU))
1673 fall_thru = succ1;
1674 cond_jump = succ2;
1676 else if (succ2
1677 && (succ2->flags & EDGE_FALLTHRU))
1679 fall_thru = succ2;
1680 cond_jump = succ1;
1682 else if (succ1
1683 && (block_ends_with_call_p (cur_bb)
1684 || can_throw_internal (BB_END (cur_bb))))
1686 edge e;
1687 edge_iterator ei;
1689 /* Find EDGE_CAN_FALLTHRU edge. */
1690 FOR_EACH_EDGE (e, ei, cur_bb->succs)
1691 if (e->flags & EDGE_CAN_FALLTHRU)
1693 fall_thru = e;
1694 break;
1698 if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR))
1700 /* Check to see if the fall-thru edge is a crossing edge. */
1702 if (fall_thru->flags & EDGE_CROSSING)
1704 /* The fall_thru edge crosses; now check the cond jump edge, if
1705 it exists. */
1707 cond_jump_crosses = true;
1708 invert_worked = 0;
1709 old_jump = BB_END (cur_bb);
1711 /* Find the jump instruction, if there is one. */
1713 if (cond_jump)
1715 if (!(cond_jump->flags & EDGE_CROSSING))
1716 cond_jump_crosses = false;
1718 /* We know the fall-thru edge crosses; if the cond
1719 jump edge does NOT cross, and its destination is the
1720 next block in the bb order, invert the jump
1721 (i.e. fix it so the fall through does not cross and
1722 the cond jump does). */
1724 if (!cond_jump_crosses
1725 && cur_bb->aux == cond_jump->dest)
1727 /* Find label in fall_thru block. We've already added
1728 any missing labels, so there must be one. */
1730 fall_thru_label = block_label (fall_thru->dest);
1732 if (old_jump && JUMP_P (old_jump) && fall_thru_label)
1733 invert_worked = invert_jump (old_jump,
1734 fall_thru_label,0);
1735 if (invert_worked)
1737 fall_thru->flags &= ~EDGE_FALLTHRU;
1738 cond_jump->flags |= EDGE_FALLTHRU;
1739 update_br_prob_note (cur_bb);
1740 e = fall_thru;
1741 fall_thru = cond_jump;
1742 cond_jump = e;
1743 cond_jump->flags |= EDGE_CROSSING;
1744 fall_thru->flags &= ~EDGE_CROSSING;
1749 if (cond_jump_crosses || !invert_worked)
1751 /* This is the case where both edges out of the basic
1752 block are crossing edges. Here we will fix up the
1753 fall through edge. The jump edge will be taken care
1754 of later. The EDGE_CROSSING flag of fall_thru edge
1755 is unset before the call to force_nonfallthru
1756 function because if a new basic-block is created
1757 this edge remains in the current section boundary
1758 while the edge between new_bb and the fall_thru->dest
1759 becomes EDGE_CROSSING. */
1761 fall_thru->flags &= ~EDGE_CROSSING;
1762 new_bb = force_nonfallthru (fall_thru);
1764 if (new_bb)
1766 new_bb->aux = cur_bb->aux;
1767 cur_bb->aux = new_bb;
1769 /* Make sure new fall-through bb is in same
1770 partition as bb it's falling through from. */
1772 BB_COPY_PARTITION (new_bb, cur_bb);
1773 single_succ_edge (new_bb)->flags |= EDGE_CROSSING;
1775 else
1777 /* If a new basic-block was not created; restore
1778 the EDGE_CROSSING flag. */
1779 fall_thru->flags |= EDGE_CROSSING;
1782 /* Add barrier after new jump */
1783 emit_barrier_after_bb (new_bb ? new_bb : cur_bb);
1790 /* This function checks the destination block of a "crossing jump" to
1791 see if it has any crossing predecessors that begin with a code label
1792 and end with an unconditional jump. If so, it returns that predecessor
1793 block. (This is to avoid creating lots of new basic blocks that all
1794 contain unconditional jumps to the same destination). */
1796 static basic_block
1797 find_jump_block (basic_block jump_dest)
1799 basic_block source_bb = NULL;
1800 edge e;
1801 rtx insn;
1802 edge_iterator ei;
1804 FOR_EACH_EDGE (e, ei, jump_dest->preds)
1805 if (e->flags & EDGE_CROSSING)
1807 basic_block src = e->src;
1809 /* Check each predecessor to see if it has a label, and contains
1810 only one executable instruction, which is an unconditional jump.
1811 If so, we can use it. */
1813 if (LABEL_P (BB_HEAD (src)))
1814 for (insn = BB_HEAD (src);
1815 !INSN_P (insn) && insn != NEXT_INSN (BB_END (src));
1816 insn = NEXT_INSN (insn))
1818 if (INSN_P (insn)
1819 && insn == BB_END (src)
1820 && JUMP_P (insn)
1821 && !any_condjump_p (insn))
1823 source_bb = src;
1824 break;
1828 if (source_bb)
1829 break;
1832 return source_bb;
1835 /* Find all BB's with conditional jumps that are crossing edges;
1836 insert a new bb and make the conditional jump branch to the new
1837 bb instead (make the new bb same color so conditional branch won't
1838 be a 'crossing' edge). Insert an unconditional jump from the
1839 new bb to the original destination of the conditional jump. */
1841 static void
1842 fix_crossing_conditional_branches (void)
1844 basic_block cur_bb;
1845 basic_block new_bb;
1846 basic_block dest;
1847 edge succ1;
1848 edge succ2;
1849 edge crossing_edge;
1850 edge new_edge;
1851 rtx old_jump;
1852 rtx set_src;
1853 rtx old_label = NULL_RTX;
1854 rtx new_label;
1856 FOR_EACH_BB (cur_bb)
1858 crossing_edge = NULL;
1859 if (EDGE_COUNT (cur_bb->succs) > 0)
1860 succ1 = EDGE_SUCC (cur_bb, 0);
1861 else
1862 succ1 = NULL;
1864 if (EDGE_COUNT (cur_bb->succs) > 1)
1865 succ2 = EDGE_SUCC (cur_bb, 1);
1866 else
1867 succ2 = NULL;
1869 /* We already took care of fall-through edges, so only one successor
1870 can be a crossing edge. */
1872 if (succ1 && (succ1->flags & EDGE_CROSSING))
1873 crossing_edge = succ1;
1874 else if (succ2 && (succ2->flags & EDGE_CROSSING))
1875 crossing_edge = succ2;
1877 if (crossing_edge)
1879 old_jump = BB_END (cur_bb);
1881 /* Check to make sure the jump instruction is a
1882 conditional jump. */
1884 set_src = NULL_RTX;
1886 if (any_condjump_p (old_jump))
1888 if (GET_CODE (PATTERN (old_jump)) == SET)
1889 set_src = SET_SRC (PATTERN (old_jump));
1890 else if (GET_CODE (PATTERN (old_jump)) == PARALLEL)
1892 set_src = XVECEXP (PATTERN (old_jump), 0,0);
1893 if (GET_CODE (set_src) == SET)
1894 set_src = SET_SRC (set_src);
1895 else
1896 set_src = NULL_RTX;
1900 if (set_src && (GET_CODE (set_src) == IF_THEN_ELSE))
1902 if (GET_CODE (XEXP (set_src, 1)) == PC)
1903 old_label = XEXP (set_src, 2);
1904 else if (GET_CODE (XEXP (set_src, 2)) == PC)
1905 old_label = XEXP (set_src, 1);
1907 /* Check to see if new bb for jumping to that dest has
1908 already been created; if so, use it; if not, create
1909 a new one. */
1911 new_bb = find_jump_block (crossing_edge->dest);
1913 if (new_bb)
1914 new_label = block_label (new_bb);
1915 else
1917 basic_block last_bb;
1918 rtx new_jump;
1920 /* Create new basic block to be dest for
1921 conditional jump. */
1923 /* Put appropriate instructions in new bb. */
1925 new_label = gen_label_rtx ();
1926 emit_label (new_label);
1928 gcc_assert (GET_CODE (old_label) == LABEL_REF);
1929 old_label = JUMP_LABEL (old_jump);
1930 new_jump = emit_jump_insn (gen_jump (old_label));
1931 JUMP_LABEL (new_jump) = old_label;
1933 last_bb = EXIT_BLOCK_PTR->prev_bb;
1934 new_bb = create_basic_block (new_label, new_jump, last_bb);
1935 new_bb->aux = last_bb->aux;
1936 last_bb->aux = new_bb;
1938 emit_barrier_after_bb (new_bb);
1940 /* Make sure new bb is in same partition as source
1941 of conditional branch. */
1942 BB_COPY_PARTITION (new_bb, cur_bb);
1945 /* Make old jump branch to new bb. */
1947 redirect_jump (old_jump, new_label, 0);
1949 /* Remove crossing_edge as predecessor of 'dest'. */
1951 dest = crossing_edge->dest;
1953 redirect_edge_succ (crossing_edge, new_bb);
1955 /* Make a new edge from new_bb to old dest; new edge
1956 will be a successor for new_bb and a predecessor
1957 for 'dest'. */
1959 if (EDGE_COUNT (new_bb->succs) == 0)
1960 new_edge = make_edge (new_bb, dest, 0);
1961 else
1962 new_edge = EDGE_SUCC (new_bb, 0);
1964 crossing_edge->flags &= ~EDGE_CROSSING;
1965 new_edge->flags |= EDGE_CROSSING;
1971 /* Find any unconditional branches that cross between hot and cold
1972 sections. Convert them into indirect jumps instead. */
1974 static void
1975 fix_crossing_unconditional_branches (void)
1977 basic_block cur_bb;
1978 rtx last_insn;
1979 rtx label;
1980 rtx label_addr;
1981 rtx indirect_jump_sequence;
1982 rtx jump_insn = NULL_RTX;
1983 rtx new_reg;
1984 rtx cur_insn;
1985 edge succ;
1987 FOR_EACH_BB (cur_bb)
1989 last_insn = BB_END (cur_bb);
1991 if (EDGE_COUNT (cur_bb->succs) < 1)
1992 continue;
1994 succ = EDGE_SUCC (cur_bb, 0);
1996 /* Check to see if bb ends in a crossing (unconditional) jump. At
1997 this point, no crossing jumps should be conditional. */
1999 if (JUMP_P (last_insn)
2000 && (succ->flags & EDGE_CROSSING))
2002 rtx label2, table;
2004 gcc_assert (!any_condjump_p (last_insn));
2006 /* Make sure the jump is not already an indirect or table jump. */
2008 if (!computed_jump_p (last_insn)
2009 && !tablejump_p (last_insn, &label2, &table))
2011 /* We have found a "crossing" unconditional branch. Now
2012 we must convert it to an indirect jump. First create
2013 reference of label, as target for jump. */
2015 label = JUMP_LABEL (last_insn);
2016 label_addr = gen_rtx_LABEL_REF (Pmode, label);
2017 LABEL_NUSES (label) += 1;
2019 /* Get a register to use for the indirect jump. */
2021 new_reg = gen_reg_rtx (Pmode);
2023 /* Generate indirect the jump sequence. */
2025 start_sequence ();
2026 emit_move_insn (new_reg, label_addr);
2027 emit_indirect_jump (new_reg);
2028 indirect_jump_sequence = get_insns ();
2029 end_sequence ();
2031 /* Make sure every instruction in the new jump sequence has
2032 its basic block set to be cur_bb. */
2034 for (cur_insn = indirect_jump_sequence; cur_insn;
2035 cur_insn = NEXT_INSN (cur_insn))
2037 if (!BARRIER_P (cur_insn))
2038 BLOCK_FOR_INSN (cur_insn) = cur_bb;
2039 if (JUMP_P (cur_insn))
2040 jump_insn = cur_insn;
2043 /* Insert the new (indirect) jump sequence immediately before
2044 the unconditional jump, then delete the unconditional jump. */
2046 emit_insn_before (indirect_jump_sequence, last_insn);
2047 delete_insn (last_insn);
2049 /* Make BB_END for cur_bb be the jump instruction (NOT the
2050 barrier instruction at the end of the sequence...). */
2052 BB_END (cur_bb) = jump_insn;
2058 /* Add REG_CROSSING_JUMP note to all crossing jump insns. */
2060 static void
2061 add_reg_crossing_jump_notes (void)
2063 basic_block bb;
2064 edge e;
2065 edge_iterator ei;
2067 FOR_EACH_BB (bb)
2068 FOR_EACH_EDGE (e, ei, bb->succs)
2069 if ((e->flags & EDGE_CROSSING)
2070 && JUMP_P (BB_END (e->src)))
2071 add_reg_note (BB_END (e->src), REG_CROSSING_JUMP, NULL_RTX);
2074 /* Verify, in the basic block chain, that there is at most one switch
2075 between hot/cold partitions. This is modelled on
2076 rtl_verify_flow_info_1, but it cannot go inside that function
2077 because this condition will not be true until after
2078 reorder_basic_blocks is called. */
2080 static void
2081 verify_hot_cold_block_grouping (void)
2083 basic_block bb;
2084 int err = 0;
2085 bool switched_sections = false;
2086 int current_partition = 0;
2088 FOR_EACH_BB (bb)
2090 if (!current_partition)
2091 current_partition = BB_PARTITION (bb);
2092 if (BB_PARTITION (bb) != current_partition)
2094 if (switched_sections)
2096 error ("multiple hot/cold transitions found (bb %i)",
2097 bb->index);
2098 err = 1;
2100 else
2102 switched_sections = true;
2103 current_partition = BB_PARTITION (bb);
2108 gcc_assert(!err);
2111 /* Reorder basic blocks. The main entry point to this file. FLAGS is
2112 the set of flags to pass to cfg_layout_initialize(). */
2114 static void
2115 reorder_basic_blocks (void)
2117 int n_traces;
2118 int i;
2119 struct trace *traces;
2121 gcc_assert (current_ir_type () == IR_RTL_CFGLAYOUT);
2123 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
2124 return;
2126 set_edge_can_fallthru_flag ();
2127 mark_dfs_back_edges ();
2129 /* We are estimating the length of uncond jump insn only once since the code
2130 for getting the insn length always returns the minimal length now. */
2131 if (uncond_jump_length == 0)
2132 uncond_jump_length = get_uncond_jump_length ();
2134 /* We need to know some information for each basic block. */
2135 array_size = GET_ARRAY_SIZE (last_basic_block);
2136 bbd = XNEWVEC (bbro_basic_block_data, array_size);
2137 for (i = 0; i < array_size; i++)
2139 bbd[i].start_of_trace = -1;
2140 bbd[i].end_of_trace = -1;
2141 bbd[i].in_trace = -1;
2142 bbd[i].visited = 0;
2143 bbd[i].heap = NULL;
2144 bbd[i].node = NULL;
2147 traces = XNEWVEC (struct trace, n_basic_blocks);
2148 n_traces = 0;
2149 find_traces (&n_traces, traces);
2150 connect_traces (n_traces, traces);
2151 FREE (traces);
2152 FREE (bbd);
2154 relink_block_chain (/*stay_in_cfglayout_mode=*/true);
2156 if (dump_file)
2158 if (dump_flags & TDF_DETAILS)
2159 dump_reg_info (dump_file);
2160 dump_flow_info (dump_file, dump_flags);
2163 if (flag_reorder_blocks_and_partition)
2164 verify_hot_cold_block_grouping ();
2167 /* Determine which partition the first basic block in the function
2168 belongs to, then find the first basic block in the current function
2169 that belongs to a different section, and insert a
2170 NOTE_INSN_SWITCH_TEXT_SECTIONS note immediately before it in the
2171 instruction stream. When writing out the assembly code,
2172 encountering this note will make the compiler switch between the
2173 hot and cold text sections. */
2175 static void
2176 insert_section_boundary_note (void)
2178 basic_block bb;
2179 rtx new_note;
2180 int first_partition = 0;
2182 if (!flag_reorder_blocks_and_partition)
2183 return;
2185 FOR_EACH_BB (bb)
2187 if (!first_partition)
2188 first_partition = BB_PARTITION (bb);
2189 if (BB_PARTITION (bb) != first_partition)
2191 new_note = emit_note_before (NOTE_INSN_SWITCH_TEXT_SECTIONS,
2192 BB_HEAD (bb));
2193 /* ??? This kind of note always lives between basic blocks,
2194 but add_insn_before will set BLOCK_FOR_INSN anyway. */
2195 BLOCK_FOR_INSN (new_note) = NULL;
2196 break;
2201 static bool
2202 gate_handle_reorder_blocks (void)
2204 if (targetm.cannot_modify_jumps_p ())
2205 return false;
2206 return (optimize > 0
2207 && (flag_reorder_blocks || flag_reorder_blocks_and_partition));
2210 static unsigned int
2211 rest_of_handle_reorder_blocks (void)
2213 basic_block bb;
2215 /* Last attempt to optimize CFG, as scheduling, peepholing and insn
2216 splitting possibly introduced more crossjumping opportunities. */
2217 cfg_layout_initialize (CLEANUP_EXPENSIVE);
2219 reorder_basic_blocks ();
2220 cleanup_cfg (CLEANUP_EXPENSIVE);
2222 FOR_EACH_BB (bb)
2223 if (bb->next_bb != EXIT_BLOCK_PTR)
2224 bb->aux = bb->next_bb;
2225 cfg_layout_finalize ();
2227 /* Add NOTE_INSN_SWITCH_TEXT_SECTIONS notes. */
2228 insert_section_boundary_note ();
2229 return 0;
2232 struct rtl_opt_pass pass_reorder_blocks =
2235 RTL_PASS,
2236 "bbro", /* name */
2237 gate_handle_reorder_blocks, /* gate */
2238 rest_of_handle_reorder_blocks, /* execute */
2239 NULL, /* sub */
2240 NULL, /* next */
2241 0, /* static_pass_number */
2242 TV_REORDER_BLOCKS, /* tv_id */
2243 0, /* properties_required */
2244 0, /* properties_provided */
2245 0, /* properties_destroyed */
2246 0, /* todo_flags_start */
2247 TODO_verify_rtl_sharing, /* todo_flags_finish */
2251 /* Duplicate the blocks containing computed gotos. This basically unfactors
2252 computed gotos that were factored early on in the compilation process to
2253 speed up edge based data flow. We used to not unfactoring them again,
2254 which can seriously pessimize code with many computed jumps in the source
2255 code, such as interpreters. See e.g. PR15242. */
2257 static bool
2258 gate_duplicate_computed_gotos (void)
2260 if (targetm.cannot_modify_jumps_p ())
2261 return false;
2262 return (optimize > 0
2263 && flag_expensive_optimizations
2264 && ! optimize_function_for_size_p (cfun));
2268 static unsigned int
2269 duplicate_computed_gotos (void)
2271 basic_block bb, new_bb;
2272 bitmap candidates;
2273 int max_size;
2275 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
2276 return 0;
2278 clear_bb_flags ();
2279 cfg_layout_initialize (0);
2281 /* We are estimating the length of uncond jump insn only once
2282 since the code for getting the insn length always returns
2283 the minimal length now. */
2284 if (uncond_jump_length == 0)
2285 uncond_jump_length = get_uncond_jump_length ();
2287 max_size
2288 = uncond_jump_length * PARAM_VALUE (PARAM_MAX_GOTO_DUPLICATION_INSNS);
2289 candidates = BITMAP_ALLOC (NULL);
2291 /* Look for blocks that end in a computed jump, and see if such blocks
2292 are suitable for unfactoring. If a block is a candidate for unfactoring,
2293 mark it in the candidates. */
2294 FOR_EACH_BB (bb)
2296 rtx insn;
2297 edge e;
2298 edge_iterator ei;
2299 int size, all_flags;
2301 /* Build the reorder chain for the original order of blocks. */
2302 if (bb->next_bb != EXIT_BLOCK_PTR)
2303 bb->aux = bb->next_bb;
2305 /* Obviously the block has to end in a computed jump. */
2306 if (!computed_jump_p (BB_END (bb)))
2307 continue;
2309 /* Only consider blocks that can be duplicated. */
2310 if (find_reg_note (BB_END (bb), REG_CROSSING_JUMP, NULL_RTX)
2311 || !can_duplicate_block_p (bb))
2312 continue;
2314 /* Make sure that the block is small enough. */
2315 size = 0;
2316 FOR_BB_INSNS (bb, insn)
2317 if (INSN_P (insn))
2319 size += get_attr_min_length (insn);
2320 if (size > max_size)
2321 break;
2323 if (size > max_size)
2324 continue;
2326 /* Final check: there must not be any incoming abnormal edges. */
2327 all_flags = 0;
2328 FOR_EACH_EDGE (e, ei, bb->preds)
2329 all_flags |= e->flags;
2330 if (all_flags & EDGE_COMPLEX)
2331 continue;
2333 bitmap_set_bit (candidates, bb->index);
2336 /* Nothing to do if there is no computed jump here. */
2337 if (bitmap_empty_p (candidates))
2338 goto done;
2340 /* Duplicate computed gotos. */
2341 FOR_EACH_BB (bb)
2343 if (bb->flags & BB_VISITED)
2344 continue;
2346 bb->flags |= BB_VISITED;
2348 /* BB must have one outgoing edge. That edge must not lead to
2349 the exit block or the next block.
2350 The destination must have more than one predecessor. */
2351 if (!single_succ_p (bb)
2352 || single_succ (bb) == EXIT_BLOCK_PTR
2353 || single_succ (bb) == bb->next_bb
2354 || single_pred_p (single_succ (bb)))
2355 continue;
2357 /* The successor block has to be a duplication candidate. */
2358 if (!bitmap_bit_p (candidates, single_succ (bb)->index))
2359 continue;
2361 new_bb = duplicate_block (single_succ (bb), single_succ_edge (bb), bb);
2362 new_bb->aux = bb->aux;
2363 bb->aux = new_bb;
2364 new_bb->flags |= BB_VISITED;
2367 done:
2368 cfg_layout_finalize ();
2370 BITMAP_FREE (candidates);
2371 return 0;
2374 struct rtl_opt_pass pass_duplicate_computed_gotos =
2377 RTL_PASS,
2378 "compgotos", /* name */
2379 gate_duplicate_computed_gotos, /* gate */
2380 duplicate_computed_gotos, /* execute */
2381 NULL, /* sub */
2382 NULL, /* next */
2383 0, /* static_pass_number */
2384 TV_REORDER_BLOCKS, /* tv_id */
2385 0, /* properties_required */
2386 0, /* properties_provided */
2387 0, /* properties_destroyed */
2388 0, /* todo_flags_start */
2389 TODO_verify_rtl_sharing,/* todo_flags_finish */
2393 static bool
2394 gate_handle_partition_blocks (void)
2396 /* The optimization to partition hot/cold basic blocks into separate
2397 sections of the .o file does not work well with linkonce or with
2398 user defined section attributes. Don't call it if either case
2399 arises. */
2400 return (flag_reorder_blocks_and_partition
2401 && optimize
2402 /* See gate_handle_reorder_blocks. We should not partition if
2403 we are going to omit the reordering. */
2404 && optimize_function_for_speed_p (cfun)
2405 && !DECL_ONE_ONLY (current_function_decl)
2406 && !user_defined_section_attribute);
2409 /* This function is the main 'entrance' for the optimization that
2410 partitions hot and cold basic blocks into separate sections of the
2411 .o file (to improve performance and cache locality). Ideally it
2412 would be called after all optimizations that rearrange the CFG have
2413 been called. However part of this optimization may introduce new
2414 register usage, so it must be called before register allocation has
2415 occurred. This means that this optimization is actually called
2416 well before the optimization that reorders basic blocks (see
2417 function above).
2419 This optimization checks the feedback information to determine
2420 which basic blocks are hot/cold, updates flags on the basic blocks
2421 to indicate which section they belong in. This information is
2422 later used for writing out sections in the .o file. Because hot
2423 and cold sections can be arbitrarily large (within the bounds of
2424 memory), far beyond the size of a single function, it is necessary
2425 to fix up all edges that cross section boundaries, to make sure the
2426 instructions used can actually span the required distance. The
2427 fixes are described below.
2429 Fall-through edges must be changed into jumps; it is not safe or
2430 legal to fall through across a section boundary. Whenever a
2431 fall-through edge crossing a section boundary is encountered, a new
2432 basic block is inserted (in the same section as the fall-through
2433 source), and the fall through edge is redirected to the new basic
2434 block. The new basic block contains an unconditional jump to the
2435 original fall-through target. (If the unconditional jump is
2436 insufficient to cross section boundaries, that is dealt with a
2437 little later, see below).
2439 In order to deal with architectures that have short conditional
2440 branches (which cannot span all of memory) we take any conditional
2441 jump that attempts to cross a section boundary and add a level of
2442 indirection: it becomes a conditional jump to a new basic block, in
2443 the same section. The new basic block contains an unconditional
2444 jump to the original target, in the other section.
2446 For those architectures whose unconditional branch is also
2447 incapable of reaching all of memory, those unconditional jumps are
2448 converted into indirect jumps, through a register.
2450 IMPORTANT NOTE: This optimization causes some messy interactions
2451 with the cfg cleanup optimizations; those optimizations want to
2452 merge blocks wherever possible, and to collapse indirect jump
2453 sequences (change "A jumps to B jumps to C" directly into "A jumps
2454 to C"). Those optimizations can undo the jump fixes that
2455 partitioning is required to make (see above), in order to ensure
2456 that jumps attempting to cross section boundaries are really able
2457 to cover whatever distance the jump requires (on many architectures
2458 conditional or unconditional jumps are not able to reach all of
2459 memory). Therefore tests have to be inserted into each such
2460 optimization to make sure that it does not undo stuff necessary to
2461 cross partition boundaries. This would be much less of a problem
2462 if we could perform this optimization later in the compilation, but
2463 unfortunately the fact that we may need to create indirect jumps
2464 (through registers) requires that this optimization be performed
2465 before register allocation.
2467 Hot and cold basic blocks are partitioned and put in separate
2468 sections of the .o file, to reduce paging and improve cache
2469 performance (hopefully). This can result in bits of code from the
2470 same function being widely separated in the .o file. However this
2471 is not obvious to the current bb structure. Therefore we must take
2472 care to ensure that: 1). There are no fall_thru edges that cross
2473 between sections; 2). For those architectures which have "short"
2474 conditional branches, all conditional branches that attempt to
2475 cross between sections are converted to unconditional branches;
2476 and, 3). For those architectures which have "short" unconditional
2477 branches, all unconditional branches that attempt to cross between
2478 sections are converted to indirect jumps.
2480 The code for fixing up fall_thru edges that cross between hot and
2481 cold basic blocks does so by creating new basic blocks containing
2482 unconditional branches to the appropriate label in the "other"
2483 section. The new basic block is then put in the same (hot or cold)
2484 section as the original conditional branch, and the fall_thru edge
2485 is modified to fall into the new basic block instead. By adding
2486 this level of indirection we end up with only unconditional branches
2487 crossing between hot and cold sections.
2489 Conditional branches are dealt with by adding a level of indirection.
2490 A new basic block is added in the same (hot/cold) section as the
2491 conditional branch, and the conditional branch is retargeted to the
2492 new basic block. The new basic block contains an unconditional branch
2493 to the original target of the conditional branch (in the other section).
2495 Unconditional branches are dealt with by converting them into
2496 indirect jumps. */
2498 static unsigned
2499 partition_hot_cold_basic_blocks (void)
2501 VEC(edge, heap) *crossing_edges;
2503 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1)
2504 return 0;
2506 df_set_flags (DF_DEFER_INSN_RESCAN);
2508 crossing_edges = find_rarely_executed_basic_blocks_and_crossing_edges ();
2509 if (crossing_edges == NULL)
2510 return 0;
2512 /* Make sure the source of any crossing edge ends in a jump and the
2513 destination of any crossing edge has a label. */
2514 add_labels_and_missing_jumps (crossing_edges);
2516 /* Convert all crossing fall_thru edges to non-crossing fall
2517 thrus to unconditional jumps (that jump to the original fall
2518 through dest). */
2519 fix_up_fall_thru_edges ();
2521 /* If the architecture does not have conditional branches that can
2522 span all of memory, convert crossing conditional branches into
2523 crossing unconditional branches. */
2524 if (!HAS_LONG_COND_BRANCH)
2525 fix_crossing_conditional_branches ();
2527 /* If the architecture does not have unconditional branches that
2528 can span all of memory, convert crossing unconditional branches
2529 into indirect jumps. Since adding an indirect jump also adds
2530 a new register usage, update the register usage information as
2531 well. */
2532 if (!HAS_LONG_UNCOND_BRANCH)
2533 fix_crossing_unconditional_branches ();
2535 add_reg_crossing_jump_notes ();
2537 /* Clear bb->aux fields that the above routines were using. */
2538 clear_aux_for_blocks ();
2540 VEC_free (edge, heap, crossing_edges);
2542 /* ??? FIXME: DF generates the bb info for a block immediately.
2543 And by immediately, I mean *during* creation of the block.
2545 #0 df_bb_refs_collect
2546 #1 in df_bb_refs_record
2547 #2 in create_basic_block_structure
2549 Which means that the bb_has_eh_pred test in df_bb_refs_collect
2550 will *always* fail, because no edges can have been added to the
2551 block yet. Which of course means we don't add the right
2552 artificial refs, which means we fail df_verify (much) later.
2554 Cleanest solution would seem to make DF_DEFER_INSN_RESCAN imply
2555 that we also shouldn't grab data from the new blocks those new
2556 insns are in either. In this way one can create the block, link
2557 it up properly, and have everything Just Work later, when deferred
2558 insns are processed.
2560 In the meantime, we have no other option but to throw away all
2561 of the DF data and recompute it all. */
2562 if (cfun->eh->lp_array)
2564 df_finish_pass (true);
2565 df_scan_alloc (NULL);
2566 df_scan_blocks ();
2567 /* Not all post-landing pads use all of the EH_RETURN_DATA_REGNO
2568 data. We blindly generated all of them when creating the new
2569 landing pad. Delete those assignments we don't use. */
2570 df_set_flags (DF_LR_RUN_DCE);
2571 df_analyze ();
2574 return TODO_verify_flow | TODO_verify_rtl_sharing;
2577 struct rtl_opt_pass pass_partition_blocks =
2580 RTL_PASS,
2581 "bbpart", /* name */
2582 gate_handle_partition_blocks, /* gate */
2583 partition_hot_cold_basic_blocks, /* execute */
2584 NULL, /* sub */
2585 NULL, /* next */
2586 0, /* static_pass_number */
2587 TV_REORDER_BLOCKS, /* tv_id */
2588 PROP_cfglayout, /* properties_required */
2589 0, /* properties_provided */
2590 0, /* properties_destroyed */
2591 0, /* todo_flags_start */
2592 0 /* todo_flags_finish */