1 /* Basic block reordering routines for the GNU compiler.
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
14 License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This (greedy) algorithm constructs traces in several rounds.
21 The construction starts from "seeds". The seed for the first round
22 is the entry point of the function. When there are more than one seed,
23 the one with the lowest key in the heap is selected first (see bb_to_key).
24 Then the algorithm repeatedly adds the most probable successor to the end
25 of a trace. Finally it connects the traces.
27 There are two parameters: Branch Threshold and Exec Threshold.
28 If the probability of an edge to a successor of the current basic block is
29 lower than Branch Threshold or its frequency is lower than Exec Threshold,
30 then the successor will be the seed in one of the next rounds.
31 Each round has these parameters lower than the previous one.
32 The last round has to have these parameters set to zero so that the
33 remaining blocks are picked up.
35 The algorithm selects the most probable successor from all unvisited
36 successors and successors that have been added to this trace.
37 The other successors (that has not been "sent" to the next round) will be
38 other seeds for this round and the secondary traces will start from them.
39 If the successor has not been visited in this trace, it is added to the
40 trace (however, there is some heuristic for simple branches).
41 If the successor has been visited in this trace, a loop has been found.
42 If the loop has many iterations, the loop is rotated so that the source
43 block of the most probable edge going out of the loop is the last block
45 If the loop has few iterations and there is no edge from the last block of
46 the loop going out of the loop, the loop header is duplicated.
48 When connecting traces, the algorithm first checks whether there is an edge
49 from the last block of a trace to the first block of another trace.
50 When there are still some unconnected traces it checks whether there exists
51 a basic block BB such that BB is a successor of the last block of a trace
52 and BB is a predecessor of the first block of another trace. In this case,
53 BB is duplicated, added at the end of the first trace and the traces are
55 The rest of traces are simply connected so there will be a jump to the
56 beginning of the rest of traces.
58 The above description is for the full algorithm, which is used when the
59 function is optimized for speed. When the function is optimized for size,
60 in order to reduce long jumps and connect more fallthru edges, the
61 algorithm is modified as follows:
62 (1) Break long traces to short ones. A trace is broken at a block that has
63 multiple predecessors/ successors during trace discovery. When connecting
64 traces, only connect Trace n with Trace n + 1. This change reduces most
65 long jumps compared with the above algorithm.
66 (2) Ignore the edge probability and frequency for fallthru edges.
67 (3) Keep the original order of blocks when there is no chance to fall
68 through. We rely on the results of cfg_cleanup.
70 To implement the change for code size optimization, block's index is
71 selected as the key and all traces are found in one round.
75 "Software Trace Cache"
76 A. Ramirez, J. Larriba-Pey, C. Navarro, J. Torrellas and M. Valero; 1999
77 http://citeseer.nj.nec.com/15361.html
83 #include "coretypes.h"
88 #include "double-int.h"
101 #include "hard-reg-set.h"
102 #include "function.h"
105 #include "statistics.h"
107 #include "fixed-value.h"
108 #include "insn-config.h"
113 #include "emit-rtl.h"
119 #include "diagnostic-core.h"
120 #include "toplev.h" /* user_defined_section_attribute */
121 #include "tree-pass.h"
122 #include "dominance.h"
126 #include "cfgbuild.h"
127 #include "cfgcleanup.h"
129 #include "basic-block.h"
131 #include "bb-reorder.h"
132 #include "hash-map.h"
134 #include "plugin-api.h"
138 #include "fibonacci_heap.h"
140 /* The number of rounds. In most cases there will only be 4 rounds, but
141 when partitioning hot and cold basic blocks into separate sections of
142 the object file there will be an extra round. */
145 struct target_bb_reorder default_target_bb_reorder
;
146 #if SWITCHABLE_TARGET
147 struct target_bb_reorder
*this_target_bb_reorder
= &default_target_bb_reorder
;
150 #define uncond_jump_length \
151 (this_target_bb_reorder->x_uncond_jump_length)
153 /* Branch thresholds in thousandths (per mille) of the REG_BR_PROB_BASE. */
154 static const int branch_threshold
[N_ROUNDS
] = {400, 200, 100, 0, 0};
156 /* Exec thresholds in thousandths (per mille) of the frequency of bb 0. */
157 static const int exec_threshold
[N_ROUNDS
] = {500, 200, 50, 0, 0};
159 /* If edge frequency is lower than DUPLICATION_THRESHOLD per mille of entry
160 block the edge destination is not duplicated while connecting traces. */
161 #define DUPLICATION_THRESHOLD 100
163 typedef fibonacci_heap
<long, basic_block_def
> bb_heap_t
;
164 typedef fibonacci_node
<long, basic_block_def
> bb_heap_node_t
;
166 /* Structure to hold needed information for each basic block. */
167 typedef struct bbro_basic_block_data_def
169 /* Which trace is the bb start of (-1 means it is not a start of any). */
172 /* Which trace is the bb end of (-1 means it is not an end of any). */
175 /* Which trace is the bb in? */
178 /* Which trace was this bb visited in? */
181 /* Which heap is BB in (if any)? */
184 /* Which heap node is BB in (if any)? */
185 bb_heap_node_t
*node
;
186 } bbro_basic_block_data
;
188 /* The current size of the following dynamic array. */
189 static int array_size
;
191 /* The array which holds needed information for basic blocks. */
192 static bbro_basic_block_data
*bbd
;
194 /* To avoid frequent reallocation the size of arrays is greater than needed,
195 the number of elements is (not less than) 1.25 * size_wanted. */
196 #define GET_ARRAY_SIZE(X) ((((X) / 4) + 1) * 5)
198 /* Free the memory and set the pointer to NULL. */
199 #define FREE(P) (gcc_assert (P), free (P), P = 0)
201 /* Structure for holding information about a trace. */
204 /* First and last basic block of the trace. */
205 basic_block first
, last
;
207 /* The round of the STC creation which this trace was found in. */
210 /* The length (i.e. the number of basic blocks) of the trace. */
214 /* Maximum frequency and count of one of the entry blocks. */
215 static int max_entry_frequency
;
216 static gcov_type max_entry_count
;
218 /* Local function prototypes. */
219 static void find_traces (int *, struct trace
*);
220 static basic_block
rotate_loop (edge
, struct trace
*, int);
221 static void mark_bb_visited (basic_block
, int);
222 static void find_traces_1_round (int, int, gcov_type
, struct trace
*, int *,
223 int, bb_heap_t
**, int);
224 static basic_block
copy_bb (basic_block
, edge
, basic_block
, int);
225 static long bb_to_key (basic_block
);
226 static bool better_edge_p (const_basic_block
, const_edge
, int, int, int, int,
228 static bool connect_better_edge_p (const_edge
, bool, int, const_edge
,
230 static void connect_traces (int, struct trace
*);
231 static bool copy_bb_p (const_basic_block
, int);
232 static bool push_to_next_round_p (const_basic_block
, int, int, int, gcov_type
);
234 /* Return the trace number in which BB was visited. */
237 bb_visited_trace (const_basic_block bb
)
239 gcc_assert (bb
->index
< array_size
);
240 return bbd
[bb
->index
].visited
;
243 /* This function marks BB that it was visited in trace number TRACE. */
246 mark_bb_visited (basic_block bb
, int trace
)
248 bbd
[bb
->index
].visited
= trace
;
249 if (bbd
[bb
->index
].heap
)
251 bbd
[bb
->index
].heap
->delete_node (bbd
[bb
->index
].node
);
252 bbd
[bb
->index
].heap
= NULL
;
253 bbd
[bb
->index
].node
= NULL
;
257 /* Check to see if bb should be pushed into the next round of trace
258 collections or not. Reasons for pushing the block forward are 1).
259 If the block is cold, we are doing partitioning, and there will be
260 another round (cold partition blocks are not supposed to be
261 collected into traces until the very last round); or 2). There will
262 be another round, and the basic block is not "hot enough" for the
263 current round of trace collection. */
266 push_to_next_round_p (const_basic_block bb
, int round
, int number_of_rounds
,
267 int exec_th
, gcov_type count_th
)
269 bool there_exists_another_round
;
270 bool block_not_hot_enough
;
272 there_exists_another_round
= round
< number_of_rounds
- 1;
274 block_not_hot_enough
= (bb
->frequency
< exec_th
275 || bb
->count
< count_th
276 || probably_never_executed_bb_p (cfun
, bb
));
278 if (there_exists_another_round
279 && block_not_hot_enough
)
285 /* Find the traces for Software Trace Cache. Chain each trace through
286 RBI()->next. Store the number of traces to N_TRACES and description of
290 find_traces (int *n_traces
, struct trace
*traces
)
293 int number_of_rounds
;
296 bb_heap_t
*heap
= new bb_heap_t (LONG_MIN
);
298 /* Add one extra round of trace collection when partitioning hot/cold
299 basic blocks into separate sections. The last round is for all the
300 cold blocks (and ONLY the cold blocks). */
302 number_of_rounds
= N_ROUNDS
- 1;
304 /* Insert entry points of function into heap. */
305 max_entry_frequency
= 0;
307 FOR_EACH_EDGE (e
, ei
, ENTRY_BLOCK_PTR_FOR_FN (cfun
)->succs
)
309 bbd
[e
->dest
->index
].heap
= heap
;
310 bbd
[e
->dest
->index
].node
= heap
->insert (bb_to_key (e
->dest
), e
->dest
);
311 if (e
->dest
->frequency
> max_entry_frequency
)
312 max_entry_frequency
= e
->dest
->frequency
;
313 if (e
->dest
->count
> max_entry_count
)
314 max_entry_count
= e
->dest
->count
;
317 /* Find the traces. */
318 for (i
= 0; i
< number_of_rounds
; i
++)
320 gcov_type count_threshold
;
323 fprintf (dump_file
, "STC - round %d\n", i
+ 1);
325 if (max_entry_count
< INT_MAX
/ 1000)
326 count_threshold
= max_entry_count
* exec_threshold
[i
] / 1000;
328 count_threshold
= max_entry_count
/ 1000 * exec_threshold
[i
];
330 find_traces_1_round (REG_BR_PROB_BASE
* branch_threshold
[i
] / 1000,
331 max_entry_frequency
* exec_threshold
[i
] / 1000,
332 count_threshold
, traces
, n_traces
, i
, &heap
,
339 for (i
= 0; i
< *n_traces
; i
++)
342 fprintf (dump_file
, "Trace %d (round %d): ", i
+ 1,
343 traces
[i
].round
+ 1);
344 for (bb
= traces
[i
].first
;
345 bb
!= traces
[i
].last
;
346 bb
= (basic_block
) bb
->aux
)
347 fprintf (dump_file
, "%d [%d] ", bb
->index
, bb
->frequency
);
348 fprintf (dump_file
, "%d [%d]\n", bb
->index
, bb
->frequency
);
354 /* Rotate loop whose back edge is BACK_EDGE in the tail of trace TRACE
355 (with sequential number TRACE_N). */
358 rotate_loop (edge back_edge
, struct trace
*trace
, int trace_n
)
362 /* Information about the best end (end after rotation) of the loop. */
363 basic_block best_bb
= NULL
;
364 edge best_edge
= NULL
;
366 gcov_type best_count
= -1;
367 /* The best edge is preferred when its destination is not visited yet
368 or is a start block of some trace. */
369 bool is_preferred
= false;
371 /* Find the most frequent edge that goes out from current trace. */
372 bb
= back_edge
->dest
;
378 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
379 if (e
->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
380 && bb_visited_trace (e
->dest
) != trace_n
381 && (e
->flags
& EDGE_CAN_FALLTHRU
)
382 && !(e
->flags
& EDGE_COMPLEX
))
386 /* The best edge is preferred. */
387 if (!bb_visited_trace (e
->dest
)
388 || bbd
[e
->dest
->index
].start_of_trace
>= 0)
390 /* The current edge E is also preferred. */
391 int freq
= EDGE_FREQUENCY (e
);
392 if (freq
> best_freq
|| e
->count
> best_count
)
395 best_count
= e
->count
;
403 if (!bb_visited_trace (e
->dest
)
404 || bbd
[e
->dest
->index
].start_of_trace
>= 0)
406 /* The current edge E is preferred. */
408 best_freq
= EDGE_FREQUENCY (e
);
409 best_count
= e
->count
;
415 int freq
= EDGE_FREQUENCY (e
);
416 if (!best_edge
|| freq
> best_freq
|| e
->count
> best_count
)
419 best_count
= e
->count
;
426 bb
= (basic_block
) bb
->aux
;
428 while (bb
!= back_edge
->dest
);
432 /* Rotate the loop so that the BEST_EDGE goes out from the last block of
434 if (back_edge
->dest
== trace
->first
)
436 trace
->first
= (basic_block
) best_bb
->aux
;
442 for (prev_bb
= trace
->first
;
443 prev_bb
->aux
!= back_edge
->dest
;
444 prev_bb
= (basic_block
) prev_bb
->aux
)
446 prev_bb
->aux
= best_bb
->aux
;
448 /* Try to get rid of uncond jump to cond jump. */
449 if (single_succ_p (prev_bb
))
451 basic_block header
= single_succ (prev_bb
);
453 /* Duplicate HEADER if it is a small block containing cond jump
455 if (any_condjump_p (BB_END (header
)) && copy_bb_p (header
, 0)
456 && !CROSSING_JUMP_P (BB_END (header
)))
457 copy_bb (header
, single_succ_edge (prev_bb
), prev_bb
, trace_n
);
463 /* We have not found suitable loop tail so do no rotation. */
464 best_bb
= back_edge
->src
;
470 /* One round of finding traces. Find traces for BRANCH_TH and EXEC_TH i.e. do
471 not include basic blocks whose probability is lower than BRANCH_TH or whose
472 frequency is lower than EXEC_TH into traces (or whose count is lower than
473 COUNT_TH). Store the new traces into TRACES and modify the number of
474 traces *N_TRACES. Set the round (which the trace belongs to) to ROUND.
475 The function expects starting basic blocks to be in *HEAP and will delete
476 *HEAP and store starting points for the next round into new *HEAP. */
479 find_traces_1_round (int branch_th
, int exec_th
, gcov_type count_th
,
480 struct trace
*traces
, int *n_traces
, int round
,
481 bb_heap_t
**heap
, int number_of_rounds
)
483 /* Heap for discarded basic blocks which are possible starting points for
485 bb_heap_t
*new_heap
= new bb_heap_t (LONG_MIN
);
486 bool for_size
= optimize_function_for_size_p (cfun
);
488 while (!(*heap
)->empty ())
496 bb
= (*heap
)->extract_min ();
497 bbd
[bb
->index
].heap
= NULL
;
498 bbd
[bb
->index
].node
= NULL
;
501 fprintf (dump_file
, "Getting bb %d\n", bb
->index
);
503 /* If the BB's frequency is too low, send BB to the next round. When
504 partitioning hot/cold blocks into separate sections, make sure all
505 the cold blocks (and ONLY the cold blocks) go into the (extra) final
506 round. When optimizing for size, do not push to next round. */
509 && push_to_next_round_p (bb
, round
, number_of_rounds
, exec_th
,
512 int key
= bb_to_key (bb
);
513 bbd
[bb
->index
].heap
= new_heap
;
514 bbd
[bb
->index
].node
= new_heap
->insert (key
, bb
);
518 " Possible start point of next round: %d (key: %d)\n",
523 trace
= traces
+ *n_traces
;
525 trace
->round
= round
;
527 bbd
[bb
->index
].in_trace
= *n_traces
;
535 /* The probability and frequency of the best edge. */
536 int best_prob
= INT_MIN
/ 2;
537 int best_freq
= INT_MIN
/ 2;
540 mark_bb_visited (bb
, *n_traces
);
544 fprintf (dump_file
, "Basic block %d was visited in trace %d\n",
545 bb
->index
, *n_traces
- 1);
547 ends_in_call
= block_ends_with_call_p (bb
);
549 /* Select the successor that will be placed after BB. */
550 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
552 gcc_assert (!(e
->flags
& EDGE_FAKE
));
554 if (e
->dest
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
557 if (bb_visited_trace (e
->dest
)
558 && bb_visited_trace (e
->dest
) != *n_traces
)
561 if (BB_PARTITION (e
->dest
) != BB_PARTITION (bb
))
564 prob
= e
->probability
;
565 freq
= e
->dest
->frequency
;
567 /* The only sensible preference for a call instruction is the
568 fallthru edge. Don't bother selecting anything else. */
571 if (e
->flags
& EDGE_CAN_FALLTHRU
)
580 /* Edge that cannot be fallthru or improbable or infrequent
581 successor (i.e. it is unsuitable successor). When optimizing
582 for size, ignore the probability and frequency. */
583 if (!(e
->flags
& EDGE_CAN_FALLTHRU
) || (e
->flags
& EDGE_COMPLEX
)
584 || ((prob
< branch_th
|| EDGE_FREQUENCY (e
) < exec_th
585 || e
->count
< count_th
) && (!for_size
)))
588 /* If partitioning hot/cold basic blocks, don't consider edges
589 that cross section boundaries. */
591 if (better_edge_p (bb
, e
, prob
, freq
, best_prob
, best_freq
,
600 /* If the best destination has multiple predecessors, and can be
601 duplicated cheaper than a jump, don't allow it to be added
602 to a trace. We'll duplicate it when connecting traces. */
603 if (best_edge
&& EDGE_COUNT (best_edge
->dest
->preds
) >= 2
604 && copy_bb_p (best_edge
->dest
, 0))
607 /* If the best destination has multiple successors or predecessors,
608 don't allow it to be added when optimizing for size. This makes
609 sure predecessors with smaller index are handled before the best
610 destinarion. It breaks long trace and reduces long jumps.
612 Take if-then-else as an example.
618 If we do not remove the best edge B->D/C->D, the final order might
619 be A B D ... C. C is at the end of the program. If D's successors
620 and D are complicated, might need long jumps for A->C and C->D.
621 Similar issue for order: A C D ... B.
623 After removing the best edge, the final result will be ABCD/ ACBD.
624 It does not add jump compared with the previous order. But it
625 reduces the possibility of long jumps. */
626 if (best_edge
&& for_size
627 && (EDGE_COUNT (best_edge
->dest
->succs
) > 1
628 || EDGE_COUNT (best_edge
->dest
->preds
) > 1))
631 /* Add all non-selected successors to the heaps. */
632 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
635 || e
->dest
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
636 || bb_visited_trace (e
->dest
))
639 key
= bb_to_key (e
->dest
);
641 if (bbd
[e
->dest
->index
].heap
)
643 /* E->DEST is already in some heap. */
644 if (key
!= bbd
[e
->dest
->index
].node
->get_key ())
649 "Changing key for bb %d from %ld to %ld.\n",
651 (long) bbd
[e
->dest
->index
].node
->get_key (),
654 bbd
[e
->dest
->index
].heap
->replace_key
655 (bbd
[e
->dest
->index
].node
, key
);
660 bb_heap_t
*which_heap
= *heap
;
662 prob
= e
->probability
;
663 freq
= EDGE_FREQUENCY (e
);
665 if (!(e
->flags
& EDGE_CAN_FALLTHRU
)
666 || (e
->flags
& EDGE_COMPLEX
)
667 || prob
< branch_th
|| freq
< exec_th
668 || e
->count
< count_th
)
670 /* When partitioning hot/cold basic blocks, make sure
671 the cold blocks (and only the cold blocks) all get
672 pushed to the last round of trace collection. When
673 optimizing for size, do not push to next round. */
675 if (!for_size
&& push_to_next_round_p (e
->dest
, round
,
678 which_heap
= new_heap
;
681 bbd
[e
->dest
->index
].heap
= which_heap
;
682 bbd
[e
->dest
->index
].node
= which_heap
->insert (key
, e
->dest
);
687 " Possible start of %s round: %d (key: %ld)\n",
688 (which_heap
== new_heap
) ? "next" : "this",
689 e
->dest
->index
, (long) key
);
695 if (best_edge
) /* Suitable successor was found. */
697 if (bb_visited_trace (best_edge
->dest
) == *n_traces
)
699 /* We do nothing with one basic block loops. */
700 if (best_edge
->dest
!= bb
)
702 if (EDGE_FREQUENCY (best_edge
)
703 > 4 * best_edge
->dest
->frequency
/ 5)
705 /* The loop has at least 4 iterations. If the loop
706 header is not the first block of the function
707 we can rotate the loop. */
710 != ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
)
715 "Rotating loop %d - %d\n",
716 best_edge
->dest
->index
, bb
->index
);
718 bb
->aux
= best_edge
->dest
;
719 bbd
[best_edge
->dest
->index
].in_trace
=
721 bb
= rotate_loop (best_edge
, trace
, *n_traces
);
726 /* The loop has less than 4 iterations. */
728 if (single_succ_p (bb
)
729 && copy_bb_p (best_edge
->dest
,
730 optimize_edge_for_speed_p
733 bb
= copy_bb (best_edge
->dest
, best_edge
, bb
,
740 /* Terminate the trace. */
745 /* Check for a situation
754 EDGE_FREQUENCY (AB) + EDGE_FREQUENCY (BC)
755 >= EDGE_FREQUENCY (AC).
756 (i.e. 2 * B->frequency >= EDGE_FREQUENCY (AC) )
757 Best ordering is then A B C.
759 When optimizing for size, A B C is always the best order.
761 This situation is created for example by:
768 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
770 && (e
->flags
& EDGE_CAN_FALLTHRU
)
771 && !(e
->flags
& EDGE_COMPLEX
)
772 && !bb_visited_trace (e
->dest
)
773 && single_pred_p (e
->dest
)
774 && !(e
->flags
& EDGE_CROSSING
)
775 && single_succ_p (e
->dest
)
776 && (single_succ_edge (e
->dest
)->flags
778 && !(single_succ_edge (e
->dest
)->flags
& EDGE_COMPLEX
)
779 && single_succ (e
->dest
) == best_edge
->dest
780 && (2 * e
->dest
->frequency
>= EDGE_FREQUENCY (best_edge
)
785 fprintf (dump_file
, "Selecting BB %d\n",
786 best_edge
->dest
->index
);
790 bb
->aux
= best_edge
->dest
;
791 bbd
[best_edge
->dest
->index
].in_trace
= (*n_traces
) - 1;
792 bb
= best_edge
->dest
;
798 bbd
[trace
->first
->index
].start_of_trace
= *n_traces
- 1;
799 bbd
[trace
->last
->index
].end_of_trace
= *n_traces
- 1;
801 /* The trace is terminated so we have to recount the keys in heap
802 (some block can have a lower key because now one of its predecessors
803 is an end of the trace). */
804 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
806 if (e
->dest
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
807 || bb_visited_trace (e
->dest
))
810 if (bbd
[e
->dest
->index
].heap
)
812 key
= bb_to_key (e
->dest
);
813 if (key
!= bbd
[e
->dest
->index
].node
->get_key ())
818 "Changing key for bb %d from %ld to %ld.\n",
820 (long) bbd
[e
->dest
->index
].node
->get_key (), key
);
822 bbd
[e
->dest
->index
].heap
->replace_key
823 (bbd
[e
->dest
->index
].node
, key
);
831 /* "Return" the new heap. */
835 /* Create a duplicate of the basic block OLD_BB and redirect edge E to it, add
836 it to trace after BB, mark OLD_BB visited and update pass' data structures
837 (TRACE is a number of trace which OLD_BB is duplicated to). */
840 copy_bb (basic_block old_bb
, edge e
, basic_block bb
, int trace
)
844 new_bb
= duplicate_block (old_bb
, e
, bb
);
845 BB_COPY_PARTITION (new_bb
, old_bb
);
847 gcc_assert (e
->dest
== new_bb
);
851 "Duplicated bb %d (created bb %d)\n",
852 old_bb
->index
, new_bb
->index
);
854 if (new_bb
->index
>= array_size
855 || last_basic_block_for_fn (cfun
) > array_size
)
860 new_size
= MAX (last_basic_block_for_fn (cfun
), new_bb
->index
+ 1);
861 new_size
= GET_ARRAY_SIZE (new_size
);
862 bbd
= XRESIZEVEC (bbro_basic_block_data
, bbd
, new_size
);
863 for (i
= array_size
; i
< new_size
; i
++)
865 bbd
[i
].start_of_trace
= -1;
866 bbd
[i
].end_of_trace
= -1;
867 bbd
[i
].in_trace
= -1;
872 array_size
= new_size
;
877 "Growing the dynamic array to %d elements.\n",
882 gcc_assert (!bb_visited_trace (e
->dest
));
883 mark_bb_visited (new_bb
, trace
);
884 new_bb
->aux
= bb
->aux
;
887 bbd
[new_bb
->index
].in_trace
= trace
;
892 /* Compute and return the key (for the heap) of the basic block BB. */
895 bb_to_key (basic_block bb
)
901 /* Use index as key to align with its original order. */
902 if (optimize_function_for_size_p (cfun
))
905 /* Do not start in probably never executed blocks. */
907 if (BB_PARTITION (bb
) == BB_COLD_PARTITION
908 || probably_never_executed_bb_p (cfun
, bb
))
911 /* Prefer blocks whose predecessor is an end of some trace
912 or whose predecessor edge is EDGE_DFS_BACK. */
913 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
915 if ((e
->src
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
)
916 && bbd
[e
->src
->index
].end_of_trace
>= 0)
917 || (e
->flags
& EDGE_DFS_BACK
))
919 int edge_freq
= EDGE_FREQUENCY (e
);
921 if (edge_freq
> priority
)
922 priority
= edge_freq
;
927 /* The block with priority should have significantly lower key. */
928 return -(100 * BB_FREQ_MAX
+ 100 * priority
+ bb
->frequency
);
930 return -bb
->frequency
;
933 /* Return true when the edge E from basic block BB is better than the temporary
934 best edge (details are in function). The probability of edge E is PROB. The
935 frequency of the successor is FREQ. The current best probability is
936 BEST_PROB, the best frequency is BEST_FREQ.
937 The edge is considered to be equivalent when PROB does not differ much from
938 BEST_PROB; similarly for frequency. */
941 better_edge_p (const_basic_block bb
, const_edge e
, int prob
, int freq
,
942 int best_prob
, int best_freq
, const_edge cur_best_edge
)
946 /* The BEST_* values do not have to be best, but can be a bit smaller than
948 int diff_prob
= best_prob
/ 10;
949 int diff_freq
= best_freq
/ 10;
951 /* The smaller one is better to keep the original order. */
952 if (optimize_function_for_size_p (cfun
))
953 return !cur_best_edge
954 || cur_best_edge
->dest
->index
> e
->dest
->index
;
956 if (prob
> best_prob
+ diff_prob
)
957 /* The edge has higher probability than the temporary best edge. */
958 is_better_edge
= true;
959 else if (prob
< best_prob
- diff_prob
)
960 /* The edge has lower probability than the temporary best edge. */
961 is_better_edge
= false;
962 else if (freq
< best_freq
- diff_freq
)
963 /* The edge and the temporary best edge have almost equivalent
964 probabilities. The higher frequency of a successor now means
965 that there is another edge going into that successor.
966 This successor has lower frequency so it is better. */
967 is_better_edge
= true;
968 else if (freq
> best_freq
+ diff_freq
)
969 /* This successor has higher frequency so it is worse. */
970 is_better_edge
= false;
971 else if (e
->dest
->prev_bb
== bb
)
972 /* The edges have equivalent probabilities and the successors
973 have equivalent frequencies. Select the previous successor. */
974 is_better_edge
= true;
976 is_better_edge
= false;
978 /* If we are doing hot/cold partitioning, make sure that we always favor
979 non-crossing edges over crossing edges. */
982 && flag_reorder_blocks_and_partition
984 && (cur_best_edge
->flags
& EDGE_CROSSING
)
985 && !(e
->flags
& EDGE_CROSSING
))
986 is_better_edge
= true;
988 return is_better_edge
;
991 /* Return true when the edge E is better than the temporary best edge
992 CUR_BEST_EDGE. If SRC_INDEX_P is true, the function compares the src bb of
993 E and CUR_BEST_EDGE; otherwise it will compare the dest bb.
994 BEST_LEN is the trace length of src (or dest) bb in CUR_BEST_EDGE.
995 TRACES record the information about traces.
996 When optimizing for size, the edge with smaller index is better.
997 When optimizing for speed, the edge with bigger probability or longer trace
1001 connect_better_edge_p (const_edge e
, bool src_index_p
, int best_len
,
1002 const_edge cur_best_edge
, struct trace
*traces
)
1006 bool is_better_edge
;
1011 if (optimize_function_for_size_p (cfun
))
1013 e_index
= src_index_p
? e
->src
->index
: e
->dest
->index
;
1014 b_index
= src_index_p
? cur_best_edge
->src
->index
1015 : cur_best_edge
->dest
->index
;
1016 /* The smaller one is better to keep the original order. */
1017 return b_index
> e_index
;
1022 e_index
= e
->src
->index
;
1024 if (e
->probability
> cur_best_edge
->probability
)
1025 /* The edge has higher probability than the temporary best edge. */
1026 is_better_edge
= true;
1027 else if (e
->probability
< cur_best_edge
->probability
)
1028 /* The edge has lower probability than the temporary best edge. */
1029 is_better_edge
= false;
1030 else if (traces
[bbd
[e_index
].end_of_trace
].length
> best_len
)
1031 /* The edge and the temporary best edge have equivalent probabilities.
1032 The edge with longer trace is better. */
1033 is_better_edge
= true;
1035 is_better_edge
= false;
1039 e_index
= e
->dest
->index
;
1041 if (e
->probability
> cur_best_edge
->probability
)
1042 /* The edge has higher probability than the temporary best edge. */
1043 is_better_edge
= true;
1044 else if (e
->probability
< cur_best_edge
->probability
)
1045 /* The edge has lower probability than the temporary best edge. */
1046 is_better_edge
= false;
1047 else if (traces
[bbd
[e_index
].start_of_trace
].length
> best_len
)
1048 /* The edge and the temporary best edge have equivalent probabilities.
1049 The edge with longer trace is better. */
1050 is_better_edge
= true;
1052 is_better_edge
= false;
1055 return is_better_edge
;
1058 /* Connect traces in array TRACES, N_TRACES is the count of traces. */
1061 connect_traces (int n_traces
, struct trace
*traces
)
1068 int current_partition
;
1070 gcov_type count_threshold
;
1071 bool for_size
= optimize_function_for_size_p (cfun
);
1073 freq_threshold
= max_entry_frequency
* DUPLICATION_THRESHOLD
/ 1000;
1074 if (max_entry_count
< INT_MAX
/ 1000)
1075 count_threshold
= max_entry_count
* DUPLICATION_THRESHOLD
/ 1000;
1077 count_threshold
= max_entry_count
/ 1000 * DUPLICATION_THRESHOLD
;
1079 connected
= XCNEWVEC (bool, n_traces
);
1082 current_partition
= BB_PARTITION (traces
[0].first
);
1085 if (crtl
->has_bb_partition
)
1086 for (i
= 0; i
< n_traces
&& !two_passes
; i
++)
1087 if (BB_PARTITION (traces
[0].first
)
1088 != BB_PARTITION (traces
[i
].first
))
1091 for (i
= 0; i
< n_traces
|| (two_passes
&& current_pass
== 1) ; i
++)
1100 gcc_assert (two_passes
&& current_pass
== 1);
1104 if (current_partition
== BB_HOT_PARTITION
)
1105 current_partition
= BB_COLD_PARTITION
;
1107 current_partition
= BB_HOT_PARTITION
;
1114 && BB_PARTITION (traces
[t
].first
) != current_partition
)
1117 connected
[t
] = true;
1119 /* Find the predecessor traces. */
1120 for (t2
= t
; t2
> 0;)
1125 FOR_EACH_EDGE (e
, ei
, traces
[t2
].first
->preds
)
1127 int si
= e
->src
->index
;
1129 if (e
->src
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
)
1130 && (e
->flags
& EDGE_CAN_FALLTHRU
)
1131 && !(e
->flags
& EDGE_COMPLEX
)
1132 && bbd
[si
].end_of_trace
>= 0
1133 && !connected
[bbd
[si
].end_of_trace
]
1134 && (BB_PARTITION (e
->src
) == current_partition
)
1135 && connect_better_edge_p (e
, true, best_len
, best
, traces
))
1138 best_len
= traces
[bbd
[si
].end_of_trace
].length
;
1143 best
->src
->aux
= best
->dest
;
1144 t2
= bbd
[best
->src
->index
].end_of_trace
;
1145 connected
[t2
] = true;
1149 fprintf (dump_file
, "Connection: %d %d\n",
1150 best
->src
->index
, best
->dest
->index
);
1157 if (last_trace
>= 0)
1158 traces
[last_trace
].last
->aux
= traces
[t2
].first
;
1161 /* Find the successor traces. */
1164 /* Find the continuation of the chain. */
1168 FOR_EACH_EDGE (e
, ei
, traces
[t
].last
->succs
)
1170 int di
= e
->dest
->index
;
1172 if (e
->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
1173 && (e
->flags
& EDGE_CAN_FALLTHRU
)
1174 && !(e
->flags
& EDGE_COMPLEX
)
1175 && bbd
[di
].start_of_trace
>= 0
1176 && !connected
[bbd
[di
].start_of_trace
]
1177 && (BB_PARTITION (e
->dest
) == current_partition
)
1178 && connect_better_edge_p (e
, false, best_len
, best
, traces
))
1181 best_len
= traces
[bbd
[di
].start_of_trace
].length
;
1188 /* Stop finding the successor traces. */
1191 /* It is OK to connect block n with block n + 1 or a block
1192 before n. For others, only connect to the loop header. */
1193 if (best
->dest
->index
> (traces
[t
].last
->index
+ 1))
1195 int count
= EDGE_COUNT (best
->dest
->preds
);
1197 FOR_EACH_EDGE (e
, ei
, best
->dest
->preds
)
1198 if (e
->flags
& EDGE_DFS_BACK
)
1201 /* If dest has multiple predecessors, skip it. We expect
1202 that one predecessor with smaller index connects with it
1208 /* Only connect Trace n with Trace n + 1. It is conservative
1209 to keep the order as close as possible to the original order.
1210 It also helps to reduce long jumps. */
1211 if (last_trace
!= bbd
[best
->dest
->index
].start_of_trace
- 1)
1215 fprintf (dump_file
, "Connection: %d %d\n",
1216 best
->src
->index
, best
->dest
->index
);
1218 t
= bbd
[best
->dest
->index
].start_of_trace
;
1219 traces
[last_trace
].last
->aux
= traces
[t
].first
;
1220 connected
[t
] = true;
1227 fprintf (dump_file
, "Connection: %d %d\n",
1228 best
->src
->index
, best
->dest
->index
);
1230 t
= bbd
[best
->dest
->index
].start_of_trace
;
1231 traces
[last_trace
].last
->aux
= traces
[t
].first
;
1232 connected
[t
] = true;
1237 /* Try to connect the traces by duplication of 1 block. */
1239 basic_block next_bb
= NULL
;
1240 bool try_copy
= false;
1242 FOR_EACH_EDGE (e
, ei
, traces
[t
].last
->succs
)
1243 if (e
->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
1244 && (e
->flags
& EDGE_CAN_FALLTHRU
)
1245 && !(e
->flags
& EDGE_COMPLEX
)
1246 && (!best
|| e
->probability
> best
->probability
))
1252 /* If the destination is a start of a trace which is only
1253 one block long, then no need to search the successor
1254 blocks of the trace. Accept it. */
1255 if (bbd
[e
->dest
->index
].start_of_trace
>= 0
1256 && traces
[bbd
[e
->dest
->index
].start_of_trace
].length
1264 FOR_EACH_EDGE (e2
, ei
, e
->dest
->succs
)
1266 int di
= e2
->dest
->index
;
1268 if (e2
->dest
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
1269 || ((e2
->flags
& EDGE_CAN_FALLTHRU
)
1270 && !(e2
->flags
& EDGE_COMPLEX
)
1271 && bbd
[di
].start_of_trace
>= 0
1272 && !connected
[bbd
[di
].start_of_trace
]
1273 && BB_PARTITION (e2
->dest
) == current_partition
1274 && EDGE_FREQUENCY (e2
) >= freq_threshold
1275 && e2
->count
>= count_threshold
1277 || e2
->probability
> best2
->probability
1278 || (e2
->probability
== best2
->probability
1279 && traces
[bbd
[di
].start_of_trace
].length
1284 if (e2
->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
1285 best2_len
= traces
[bbd
[di
].start_of_trace
].length
;
1287 best2_len
= INT_MAX
;
1294 if (crtl
->has_bb_partition
)
1297 /* Copy tiny blocks always; copy larger blocks only when the
1298 edge is traversed frequently enough. */
1300 && copy_bb_p (best
->dest
,
1301 optimize_edge_for_speed_p (best
)
1302 && EDGE_FREQUENCY (best
) >= freq_threshold
1303 && best
->count
>= count_threshold
))
1309 fprintf (dump_file
, "Connection: %d %d ",
1310 traces
[t
].last
->index
, best
->dest
->index
);
1312 fputc ('\n', dump_file
);
1313 else if (next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
1314 fprintf (dump_file
, "exit\n");
1316 fprintf (dump_file
, "%d\n", next_bb
->index
);
1319 new_bb
= copy_bb (best
->dest
, best
, traces
[t
].last
, t
);
1320 traces
[t
].last
= new_bb
;
1321 if (next_bb
&& next_bb
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
1323 t
= bbd
[next_bb
->index
].start_of_trace
;
1324 traces
[last_trace
].last
->aux
= traces
[t
].first
;
1325 connected
[t
] = true;
1329 break; /* Stop finding the successor traces. */
1332 break; /* Stop finding the successor traces. */
1341 fprintf (dump_file
, "Final order:\n");
1342 for (bb
= traces
[0].first
; bb
; bb
= (basic_block
) bb
->aux
)
1343 fprintf (dump_file
, "%d ", bb
->index
);
1344 fprintf (dump_file
, "\n");
1351 /* Return true when BB can and should be copied. CODE_MAY_GROW is true
1352 when code size is allowed to grow by duplication. */
1355 copy_bb_p (const_basic_block bb
, int code_may_grow
)
1358 int max_size
= uncond_jump_length
;
1363 if (EDGE_COUNT (bb
->preds
) < 2)
1365 if (!can_duplicate_block_p (bb
))
1368 /* Avoid duplicating blocks which have many successors (PR/13430). */
1369 if (EDGE_COUNT (bb
->succs
) > 8)
1372 if (code_may_grow
&& optimize_bb_for_speed_p (bb
))
1373 max_size
*= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS
);
1375 FOR_BB_INSNS (bb
, insn
)
1378 size
+= get_attr_min_length (insn
);
1381 if (size
<= max_size
)
1387 "Block %d can't be copied because its size = %d.\n",
1394 /* Return the length of unconditional jump instruction. */
1397 get_uncond_jump_length (void)
1399 rtx_insn
*label
, *jump
;
1403 label
= emit_label (gen_label_rtx ());
1404 jump
= emit_jump_insn (gen_jump (label
));
1405 length
= get_attr_min_length (jump
);
1411 /* The landing pad OLD_LP, in block OLD_BB, has edges from both partitions.
1412 Duplicate the landing pad and split the edges so that no EH edge
1413 crosses partitions. */
1416 fix_up_crossing_landing_pad (eh_landing_pad old_lp
, basic_block old_bb
)
1418 eh_landing_pad new_lp
;
1419 basic_block new_bb
, last_bb
, post_bb
;
1420 rtx_insn
*new_label
, *jump
;
1422 unsigned new_partition
;
1426 /* Generate the new landing-pad structure. */
1427 new_lp
= gen_eh_landing_pad (old_lp
->region
);
1428 new_lp
->post_landing_pad
= old_lp
->post_landing_pad
;
1429 new_lp
->landing_pad
= gen_label_rtx ();
1430 LABEL_PRESERVE_P (new_lp
->landing_pad
) = 1;
1432 /* Put appropriate instructions in new bb. */
1433 new_label
= emit_label (new_lp
->landing_pad
);
1435 expand_dw2_landing_pad_for_region (old_lp
->region
);
1437 post_bb
= BLOCK_FOR_INSN (old_lp
->landing_pad
);
1438 post_bb
= single_succ (post_bb
);
1439 post_label
= block_label (post_bb
);
1440 jump
= emit_jump_insn (gen_jump (post_label
));
1441 JUMP_LABEL (jump
) = post_label
;
1443 /* Create new basic block to be dest for lp. */
1444 last_bb
= EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
;
1445 new_bb
= create_basic_block (new_label
, jump
, last_bb
);
1446 new_bb
->aux
= last_bb
->aux
;
1447 last_bb
->aux
= new_bb
;
1449 emit_barrier_after_bb (new_bb
);
1451 make_edge (new_bb
, post_bb
, 0);
1453 /* Make sure new bb is in the other partition. */
1454 new_partition
= BB_PARTITION (old_bb
);
1455 new_partition
^= BB_HOT_PARTITION
| BB_COLD_PARTITION
;
1456 BB_SET_PARTITION (new_bb
, new_partition
);
1458 /* Fix up the edges. */
1459 for (ei
= ei_start (old_bb
->preds
); (e
= ei_safe_edge (ei
)) != NULL
; )
1460 if (BB_PARTITION (e
->src
) == new_partition
)
1462 rtx_insn
*insn
= BB_END (e
->src
);
1463 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
1465 gcc_assert (note
!= NULL
);
1466 gcc_checking_assert (INTVAL (XEXP (note
, 0)) == old_lp
->index
);
1467 XEXP (note
, 0) = GEN_INT (new_lp
->index
);
1469 /* Adjust the edge to the new destination. */
1470 redirect_edge_succ (e
, new_bb
);
1477 /* Ensure that all hot bbs are included in a hot path through the
1478 procedure. This is done by calling this function twice, once
1479 with WALK_UP true (to look for paths from the entry to hot bbs) and
1480 once with WALK_UP false (to look for paths from hot bbs to the exit).
1481 Returns the updated value of COLD_BB_COUNT and adds newly-hot bbs
1482 to BBS_IN_HOT_PARTITION. */
1485 sanitize_hot_paths (bool walk_up
, unsigned int cold_bb_count
,
1486 vec
<basic_block
> *bbs_in_hot_partition
)
1488 /* Callers check this. */
1489 gcc_checking_assert (cold_bb_count
);
1491 /* Keep examining hot bbs while we still have some left to check
1492 and there are remaining cold bbs. */
1493 vec
<basic_block
> hot_bbs_to_check
= bbs_in_hot_partition
->copy ();
1494 while (! hot_bbs_to_check
.is_empty ()
1497 basic_block bb
= hot_bbs_to_check
.pop ();
1498 vec
<edge
, va_gc
> *edges
= walk_up
? bb
->preds
: bb
->succs
;
1501 int highest_probability
= 0;
1502 int highest_freq
= 0;
1503 gcov_type highest_count
= 0;
1506 /* Walk the preds/succs and check if there is at least one already
1507 marked hot. Keep track of the most frequent pred/succ so that we
1508 can mark it hot if we don't find one. */
1509 FOR_EACH_EDGE (e
, ei
, edges
)
1511 basic_block reach_bb
= walk_up
? e
->src
: e
->dest
;
1513 if (e
->flags
& EDGE_DFS_BACK
)
1516 if (BB_PARTITION (reach_bb
) != BB_COLD_PARTITION
)
1521 /* The following loop will look for the hottest edge via
1522 the edge count, if it is non-zero, then fallback to the edge
1523 frequency and finally the edge probability. */
1524 if (e
->count
> highest_count
)
1525 highest_count
= e
->count
;
1526 int edge_freq
= EDGE_FREQUENCY (e
);
1527 if (edge_freq
> highest_freq
)
1528 highest_freq
= edge_freq
;
1529 if (e
->probability
> highest_probability
)
1530 highest_probability
= e
->probability
;
1533 /* If bb is reached by (or reaches, in the case of !WALK_UP) another hot
1534 block (or unpartitioned, e.g. the entry block) then it is ok. If not,
1535 then the most frequent pred (or succ) needs to be adjusted. In the
1536 case where multiple preds/succs have the same frequency (e.g. a
1537 50-50 branch), then both will be adjusted. */
1541 FOR_EACH_EDGE (e
, ei
, edges
)
1543 if (e
->flags
& EDGE_DFS_BACK
)
1545 /* Select the hottest edge using the edge count, if it is non-zero,
1546 then fallback to the edge frequency and finally the edge
1550 if (e
->count
< highest_count
)
1553 else if (highest_freq
)
1555 if (EDGE_FREQUENCY (e
) < highest_freq
)
1558 else if (e
->probability
< highest_probability
)
1561 basic_block reach_bb
= walk_up
? e
->src
: e
->dest
;
1563 /* We have a hot bb with an immediate dominator that is cold.
1564 The dominator needs to be re-marked hot. */
1565 BB_SET_PARTITION (reach_bb
, BB_HOT_PARTITION
);
1568 /* Now we need to examine newly-hot reach_bb to see if it is also
1569 dominated by a cold bb. */
1570 bbs_in_hot_partition
->safe_push (reach_bb
);
1571 hot_bbs_to_check
.safe_push (reach_bb
);
1575 return cold_bb_count
;
1579 /* Find the basic blocks that are rarely executed and need to be moved to
1580 a separate section of the .o file (to cut down on paging and improve
1581 cache locality). Return a vector of all edges that cross. */
1584 find_rarely_executed_basic_blocks_and_crossing_edges (void)
1586 vec
<edge
> crossing_edges
= vNULL
;
1590 unsigned int cold_bb_count
= 0;
1591 auto_vec
<basic_block
> bbs_in_hot_partition
;
1593 /* Mark which partition (hot/cold) each basic block belongs in. */
1594 FOR_EACH_BB_FN (bb
, cfun
)
1596 bool cold_bb
= false;
1598 if (probably_never_executed_bb_p (cfun
, bb
))
1600 /* Handle profile insanities created by upstream optimizations
1601 by also checking the incoming edge weights. If there is a non-cold
1602 incoming edge, conservatively prevent this block from being split
1603 into the cold section. */
1605 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1606 if (!probably_never_executed_edge_p (cfun
, e
))
1614 BB_SET_PARTITION (bb
, BB_COLD_PARTITION
);
1619 BB_SET_PARTITION (bb
, BB_HOT_PARTITION
);
1620 bbs_in_hot_partition
.safe_push (bb
);
1624 /* Ensure that hot bbs are included along a hot path from the entry to exit.
1625 Several different possibilities may include cold bbs along all paths
1626 to/from a hot bb. One is that there are edge weight insanities
1627 due to optimization phases that do not properly update basic block profile
1628 counts. The second is that the entry of the function may not be hot, because
1629 it is entered fewer times than the number of profile training runs, but there
1630 is a loop inside the function that causes blocks within the function to be
1631 above the threshold for hotness. This is fixed by walking up from hot bbs
1632 to the entry block, and then down from hot bbs to the exit, performing
1633 partitioning fixups as necessary. */
1636 mark_dfs_back_edges ();
1637 cold_bb_count
= sanitize_hot_paths (true, cold_bb_count
,
1638 &bbs_in_hot_partition
);
1640 sanitize_hot_paths (false, cold_bb_count
, &bbs_in_hot_partition
);
1643 /* The format of .gcc_except_table does not allow landing pads to
1644 be in a different partition as the throw. Fix this by either
1645 moving or duplicating the landing pads. */
1646 if (cfun
->eh
->lp_array
)
1651 FOR_EACH_VEC_ELT (*cfun
->eh
->lp_array
, i
, lp
)
1653 bool all_same
, all_diff
;
1656 || lp
->landing_pad
== NULL_RTX
1657 || !LABEL_P (lp
->landing_pad
))
1660 all_same
= all_diff
= true;
1661 bb
= BLOCK_FOR_INSN (lp
->landing_pad
);
1662 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1664 gcc_assert (e
->flags
& EDGE_EH
);
1665 if (BB_PARTITION (bb
) == BB_PARTITION (e
->src
))
1675 int which
= BB_PARTITION (bb
);
1676 which
^= BB_HOT_PARTITION
| BB_COLD_PARTITION
;
1677 BB_SET_PARTITION (bb
, which
);
1680 fix_up_crossing_landing_pad (lp
, bb
);
1684 /* Mark every edge that crosses between sections. */
1686 FOR_EACH_BB_FN (bb
, cfun
)
1687 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1689 unsigned int flags
= e
->flags
;
1691 /* We should never have EDGE_CROSSING set yet. */
1692 gcc_checking_assert ((flags
& EDGE_CROSSING
) == 0);
1694 if (e
->src
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
)
1695 && e
->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
1696 && BB_PARTITION (e
->src
) != BB_PARTITION (e
->dest
))
1698 crossing_edges
.safe_push (e
);
1699 flags
|= EDGE_CROSSING
;
1702 /* Now that we've split eh edges as appropriate, allow landing pads
1703 to be merged with the post-landing pads. */
1704 flags
&= ~EDGE_PRESERVE
;
1709 return crossing_edges
;
1712 /* Set the flag EDGE_CAN_FALLTHRU for edges that can be fallthru. */
1715 set_edge_can_fallthru_flag (void)
1719 FOR_EACH_BB_FN (bb
, cfun
)
1724 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1726 e
->flags
&= ~EDGE_CAN_FALLTHRU
;
1728 /* The FALLTHRU edge is also CAN_FALLTHRU edge. */
1729 if (e
->flags
& EDGE_FALLTHRU
)
1730 e
->flags
|= EDGE_CAN_FALLTHRU
;
1733 /* If the BB ends with an invertible condjump all (2) edges are
1734 CAN_FALLTHRU edges. */
1735 if (EDGE_COUNT (bb
->succs
) != 2)
1737 if (!any_condjump_p (BB_END (bb
)))
1740 rtx_jump_insn
*bb_end_jump
= as_a
<rtx_jump_insn
*> (BB_END (bb
));
1741 if (!invert_jump (bb_end_jump
, JUMP_LABEL (bb_end_jump
), 0))
1743 invert_jump (bb_end_jump
, JUMP_LABEL (bb_end_jump
), 0);
1744 EDGE_SUCC (bb
, 0)->flags
|= EDGE_CAN_FALLTHRU
;
1745 EDGE_SUCC (bb
, 1)->flags
|= EDGE_CAN_FALLTHRU
;
1749 /* If any destination of a crossing edge does not have a label, add label;
1750 Convert any easy fall-through crossing edges to unconditional jumps. */
1753 add_labels_and_missing_jumps (vec
<edge
> crossing_edges
)
1758 FOR_EACH_VEC_ELT (crossing_edges
, i
, e
)
1760 basic_block src
= e
->src
;
1761 basic_block dest
= e
->dest
;
1765 if (dest
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
1768 /* Make sure dest has a label. */
1769 label
= block_label (dest
);
1771 /* Nothing to do for non-fallthru edges. */
1772 if (src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
1774 if ((e
->flags
& EDGE_FALLTHRU
) == 0)
1777 /* If the block does not end with a control flow insn, then we
1778 can trivially add a jump to the end to fixup the crossing.
1779 Otherwise the jump will have to go in a new bb, which will
1780 be handled by fix_up_fall_thru_edges function. */
1781 if (control_flow_insn_p (BB_END (src
)))
1784 /* Make sure there's only one successor. */
1785 gcc_assert (single_succ_p (src
));
1787 new_jump
= emit_jump_insn_after (gen_jump (label
), BB_END (src
));
1788 BB_END (src
) = new_jump
;
1789 JUMP_LABEL (new_jump
) = label
;
1790 LABEL_NUSES (label
) += 1;
1792 emit_barrier_after_bb (src
);
1794 /* Mark edge as non-fallthru. */
1795 e
->flags
&= ~EDGE_FALLTHRU
;
1799 /* Find any bb's where the fall-through edge is a crossing edge (note that
1800 these bb's must also contain a conditional jump or end with a call
1801 instruction; we've already dealt with fall-through edges for blocks
1802 that didn't have a conditional jump or didn't end with call instruction
1803 in the call to add_labels_and_missing_jumps). Convert the fall-through
1804 edge to non-crossing edge by inserting a new bb to fall-through into.
1805 The new bb will contain an unconditional jump (crossing edge) to the
1806 original fall through destination. */
1809 fix_up_fall_thru_edges (void)
1816 edge cond_jump
= NULL
;
1817 bool cond_jump_crosses
;
1820 rtx fall_thru_label
;
1822 FOR_EACH_BB_FN (cur_bb
, cfun
)
1825 if (EDGE_COUNT (cur_bb
->succs
) > 0)
1826 succ1
= EDGE_SUCC (cur_bb
, 0);
1830 if (EDGE_COUNT (cur_bb
->succs
) > 1)
1831 succ2
= EDGE_SUCC (cur_bb
, 1);
1835 /* Find the fall-through edge. */
1838 && (succ1
->flags
& EDGE_FALLTHRU
))
1844 && (succ2
->flags
& EDGE_FALLTHRU
))
1850 && (block_ends_with_call_p (cur_bb
)
1851 || can_throw_internal (BB_END (cur_bb
))))
1856 FOR_EACH_EDGE (e
, ei
, cur_bb
->succs
)
1857 if (e
->flags
& EDGE_FALLTHRU
)
1864 if (fall_thru
&& (fall_thru
->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)))
1866 /* Check to see if the fall-thru edge is a crossing edge. */
1868 if (fall_thru
->flags
& EDGE_CROSSING
)
1870 /* The fall_thru edge crosses; now check the cond jump edge, if
1873 cond_jump_crosses
= true;
1875 old_jump
= BB_END (cur_bb
);
1877 /* Find the jump instruction, if there is one. */
1881 if (!(cond_jump
->flags
& EDGE_CROSSING
))
1882 cond_jump_crosses
= false;
1884 /* We know the fall-thru edge crosses; if the cond
1885 jump edge does NOT cross, and its destination is the
1886 next block in the bb order, invert the jump
1887 (i.e. fix it so the fall through does not cross and
1888 the cond jump does). */
1890 if (!cond_jump_crosses
)
1892 /* Find label in fall_thru block. We've already added
1893 any missing labels, so there must be one. */
1895 fall_thru_label
= block_label (fall_thru
->dest
);
1897 if (old_jump
&& fall_thru_label
)
1899 rtx_jump_insn
*old_jump_insn
=
1900 dyn_cast
<rtx_jump_insn
*> (old_jump
);
1902 invert_worked
= invert_jump (old_jump_insn
,
1903 fall_thru_label
, 0);
1908 fall_thru
->flags
&= ~EDGE_FALLTHRU
;
1909 cond_jump
->flags
|= EDGE_FALLTHRU
;
1910 update_br_prob_note (cur_bb
);
1911 std::swap (fall_thru
, cond_jump
);
1912 cond_jump
->flags
|= EDGE_CROSSING
;
1913 fall_thru
->flags
&= ~EDGE_CROSSING
;
1918 if (cond_jump_crosses
|| !invert_worked
)
1920 /* This is the case where both edges out of the basic
1921 block are crossing edges. Here we will fix up the
1922 fall through edge. The jump edge will be taken care
1923 of later. The EDGE_CROSSING flag of fall_thru edge
1924 is unset before the call to force_nonfallthru
1925 function because if a new basic-block is created
1926 this edge remains in the current section boundary
1927 while the edge between new_bb and the fall_thru->dest
1928 becomes EDGE_CROSSING. */
1930 fall_thru
->flags
&= ~EDGE_CROSSING
;
1931 new_bb
= force_nonfallthru (fall_thru
);
1935 new_bb
->aux
= cur_bb
->aux
;
1936 cur_bb
->aux
= new_bb
;
1938 /* This is done by force_nonfallthru_and_redirect. */
1939 gcc_assert (BB_PARTITION (new_bb
)
1940 == BB_PARTITION (cur_bb
));
1942 single_succ_edge (new_bb
)->flags
|= EDGE_CROSSING
;
1946 /* If a new basic-block was not created; restore
1947 the EDGE_CROSSING flag. */
1948 fall_thru
->flags
|= EDGE_CROSSING
;
1951 /* Add barrier after new jump */
1952 emit_barrier_after_bb (new_bb
? new_bb
: cur_bb
);
1959 /* This function checks the destination block of a "crossing jump" to
1960 see if it has any crossing predecessors that begin with a code label
1961 and end with an unconditional jump. If so, it returns that predecessor
1962 block. (This is to avoid creating lots of new basic blocks that all
1963 contain unconditional jumps to the same destination). */
1966 find_jump_block (basic_block jump_dest
)
1968 basic_block source_bb
= NULL
;
1973 FOR_EACH_EDGE (e
, ei
, jump_dest
->preds
)
1974 if (e
->flags
& EDGE_CROSSING
)
1976 basic_block src
= e
->src
;
1978 /* Check each predecessor to see if it has a label, and contains
1979 only one executable instruction, which is an unconditional jump.
1980 If so, we can use it. */
1982 if (LABEL_P (BB_HEAD (src
)))
1983 for (insn
= BB_HEAD (src
);
1984 !INSN_P (insn
) && insn
!= NEXT_INSN (BB_END (src
));
1985 insn
= NEXT_INSN (insn
))
1988 && insn
== BB_END (src
)
1990 && !any_condjump_p (insn
))
2004 /* Find all BB's with conditional jumps that are crossing edges;
2005 insert a new bb and make the conditional jump branch to the new
2006 bb instead (make the new bb same color so conditional branch won't
2007 be a 'crossing' edge). Insert an unconditional jump from the
2008 new bb to the original destination of the conditional jump. */
2011 fix_crossing_conditional_branches (void)
2021 rtx old_label
= NULL_RTX
;
2022 rtx_code_label
*new_label
;
2024 FOR_EACH_BB_FN (cur_bb
, cfun
)
2026 crossing_edge
= NULL
;
2027 if (EDGE_COUNT (cur_bb
->succs
) > 0)
2028 succ1
= EDGE_SUCC (cur_bb
, 0);
2032 if (EDGE_COUNT (cur_bb
->succs
) > 1)
2033 succ2
= EDGE_SUCC (cur_bb
, 1);
2037 /* We already took care of fall-through edges, so only one successor
2038 can be a crossing edge. */
2040 if (succ1
&& (succ1
->flags
& EDGE_CROSSING
))
2041 crossing_edge
= succ1
;
2042 else if (succ2
&& (succ2
->flags
& EDGE_CROSSING
))
2043 crossing_edge
= succ2
;
2047 rtx_insn
*old_jump
= BB_END (cur_bb
);
2049 /* Check to make sure the jump instruction is a
2050 conditional jump. */
2054 if (any_condjump_p (old_jump
))
2056 if (GET_CODE (PATTERN (old_jump
)) == SET
)
2057 set_src
= SET_SRC (PATTERN (old_jump
));
2058 else if (GET_CODE (PATTERN (old_jump
)) == PARALLEL
)
2060 set_src
= XVECEXP (PATTERN (old_jump
), 0,0);
2061 if (GET_CODE (set_src
) == SET
)
2062 set_src
= SET_SRC (set_src
);
2068 if (set_src
&& (GET_CODE (set_src
) == IF_THEN_ELSE
))
2070 rtx_jump_insn
*old_jump_insn
=
2071 as_a
<rtx_jump_insn
*> (old_jump
);
2073 if (GET_CODE (XEXP (set_src
, 1)) == PC
)
2074 old_label
= XEXP (set_src
, 2);
2075 else if (GET_CODE (XEXP (set_src
, 2)) == PC
)
2076 old_label
= XEXP (set_src
, 1);
2078 /* Check to see if new bb for jumping to that dest has
2079 already been created; if so, use it; if not, create
2082 new_bb
= find_jump_block (crossing_edge
->dest
);
2085 new_label
= block_label (new_bb
);
2088 basic_block last_bb
;
2089 rtx_code_label
*old_jump_target
;
2090 rtx_jump_insn
*new_jump
;
2092 /* Create new basic block to be dest for
2093 conditional jump. */
2095 /* Put appropriate instructions in new bb. */
2097 new_label
= gen_label_rtx ();
2098 emit_label (new_label
);
2100 gcc_assert (GET_CODE (old_label
) == LABEL_REF
);
2101 old_jump_target
= old_jump_insn
->jump_target ();
2102 new_jump
= as_a
<rtx_jump_insn
*>
2103 (emit_jump_insn (gen_jump (old_jump_target
)));
2104 new_jump
->set_jump_target (old_jump_target
);
2106 last_bb
= EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
;
2107 new_bb
= create_basic_block (new_label
, new_jump
, last_bb
);
2108 new_bb
->aux
= last_bb
->aux
;
2109 last_bb
->aux
= new_bb
;
2111 emit_barrier_after_bb (new_bb
);
2113 /* Make sure new bb is in same partition as source
2114 of conditional branch. */
2115 BB_COPY_PARTITION (new_bb
, cur_bb
);
2118 /* Make old jump branch to new bb. */
2120 redirect_jump (old_jump_insn
, new_label
, 0);
2122 /* Remove crossing_edge as predecessor of 'dest'. */
2124 dest
= crossing_edge
->dest
;
2126 redirect_edge_succ (crossing_edge
, new_bb
);
2128 /* Make a new edge from new_bb to old dest; new edge
2129 will be a successor for new_bb and a predecessor
2132 if (EDGE_COUNT (new_bb
->succs
) == 0)
2133 new_edge
= make_edge (new_bb
, dest
, 0);
2135 new_edge
= EDGE_SUCC (new_bb
, 0);
2137 crossing_edge
->flags
&= ~EDGE_CROSSING
;
2138 new_edge
->flags
|= EDGE_CROSSING
;
2144 /* Find any unconditional branches that cross between hot and cold
2145 sections. Convert them into indirect jumps instead. */
2148 fix_crossing_unconditional_branches (void)
2151 rtx_insn
*last_insn
;
2154 rtx_insn
*indirect_jump_sequence
;
2155 rtx_insn
*jump_insn
= NULL
;
2160 FOR_EACH_BB_FN (cur_bb
, cfun
)
2162 last_insn
= BB_END (cur_bb
);
2164 if (EDGE_COUNT (cur_bb
->succs
) < 1)
2167 succ
= EDGE_SUCC (cur_bb
, 0);
2169 /* Check to see if bb ends in a crossing (unconditional) jump. At
2170 this point, no crossing jumps should be conditional. */
2172 if (JUMP_P (last_insn
)
2173 && (succ
->flags
& EDGE_CROSSING
))
2175 gcc_assert (!any_condjump_p (last_insn
));
2177 /* Make sure the jump is not already an indirect or table jump. */
2179 if (!computed_jump_p (last_insn
)
2180 && !tablejump_p (last_insn
, NULL
, NULL
))
2182 /* We have found a "crossing" unconditional branch. Now
2183 we must convert it to an indirect jump. First create
2184 reference of label, as target for jump. */
2186 label
= JUMP_LABEL (last_insn
);
2187 label_addr
= gen_rtx_LABEL_REF (Pmode
, label
);
2188 LABEL_NUSES (label
) += 1;
2190 /* Get a register to use for the indirect jump. */
2192 new_reg
= gen_reg_rtx (Pmode
);
2194 /* Generate indirect the jump sequence. */
2197 emit_move_insn (new_reg
, label_addr
);
2198 emit_indirect_jump (new_reg
);
2199 indirect_jump_sequence
= get_insns ();
2202 /* Make sure every instruction in the new jump sequence has
2203 its basic block set to be cur_bb. */
2205 for (cur_insn
= indirect_jump_sequence
; cur_insn
;
2206 cur_insn
= NEXT_INSN (cur_insn
))
2208 if (!BARRIER_P (cur_insn
))
2209 BLOCK_FOR_INSN (cur_insn
) = cur_bb
;
2210 if (JUMP_P (cur_insn
))
2211 jump_insn
= cur_insn
;
2214 /* Insert the new (indirect) jump sequence immediately before
2215 the unconditional jump, then delete the unconditional jump. */
2217 emit_insn_before (indirect_jump_sequence
, last_insn
);
2218 delete_insn (last_insn
);
2220 JUMP_LABEL (jump_insn
) = label
;
2221 LABEL_NUSES (label
)++;
2223 /* Make BB_END for cur_bb be the jump instruction (NOT the
2224 barrier instruction at the end of the sequence...). */
2226 BB_END (cur_bb
) = jump_insn
;
2232 /* Update CROSSING_JUMP_P flags on all jump insns. */
2235 update_crossing_jump_flags (void)
2241 FOR_EACH_BB_FN (bb
, cfun
)
2242 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2243 if (e
->flags
& EDGE_CROSSING
)
2245 if (JUMP_P (BB_END (bb
))
2246 /* Some flags were added during fix_up_fall_thru_edges, via
2247 force_nonfallthru_and_redirect. */
2248 && !CROSSING_JUMP_P (BB_END (bb
)))
2249 CROSSING_JUMP_P (BB_END (bb
)) = 1;
2254 /* Reorder basic blocks. The main entry point to this file. FLAGS is
2255 the set of flags to pass to cfg_layout_initialize(). */
2258 reorder_basic_blocks (void)
2262 struct trace
*traces
;
2264 gcc_assert (current_ir_type () == IR_RTL_CFGLAYOUT
);
2266 if (n_basic_blocks_for_fn (cfun
) <= NUM_FIXED_BLOCKS
+ 1)
2269 set_edge_can_fallthru_flag ();
2270 mark_dfs_back_edges ();
2272 /* We are estimating the length of uncond jump insn only once since the code
2273 for getting the insn length always returns the minimal length now. */
2274 if (uncond_jump_length
== 0)
2275 uncond_jump_length
= get_uncond_jump_length ();
2277 /* We need to know some information for each basic block. */
2278 array_size
= GET_ARRAY_SIZE (last_basic_block_for_fn (cfun
));
2279 bbd
= XNEWVEC (bbro_basic_block_data
, array_size
);
2280 for (i
= 0; i
< array_size
; i
++)
2282 bbd
[i
].start_of_trace
= -1;
2283 bbd
[i
].end_of_trace
= -1;
2284 bbd
[i
].in_trace
= -1;
2290 traces
= XNEWVEC (struct trace
, n_basic_blocks_for_fn (cfun
));
2292 find_traces (&n_traces
, traces
);
2293 connect_traces (n_traces
, traces
);
2297 relink_block_chain (/*stay_in_cfglayout_mode=*/true);
2301 if (dump_flags
& TDF_DETAILS
)
2302 dump_reg_info (dump_file
);
2303 dump_flow_info (dump_file
, dump_flags
);
2306 /* Signal that rtl_verify_flow_info_1 can now verify that there
2307 is at most one switch between hot/cold sections. */
2308 crtl
->bb_reorder_complete
= true;
2311 /* Determine which partition the first basic block in the function
2312 belongs to, then find the first basic block in the current function
2313 that belongs to a different section, and insert a
2314 NOTE_INSN_SWITCH_TEXT_SECTIONS note immediately before it in the
2315 instruction stream. When writing out the assembly code,
2316 encountering this note will make the compiler switch between the
2317 hot and cold text sections. */
2320 insert_section_boundary_note (void)
2323 bool switched_sections
= false;
2324 int current_partition
= 0;
2326 if (!crtl
->has_bb_partition
)
2329 FOR_EACH_BB_FN (bb
, cfun
)
2331 if (!current_partition
)
2332 current_partition
= BB_PARTITION (bb
);
2333 if (BB_PARTITION (bb
) != current_partition
)
2335 gcc_assert (!switched_sections
);
2336 switched_sections
= true;
2337 emit_note_before (NOTE_INSN_SWITCH_TEXT_SECTIONS
, BB_HEAD (bb
));
2338 current_partition
= BB_PARTITION (bb
);
2345 const pass_data pass_data_reorder_blocks
=
2347 RTL_PASS
, /* type */
2349 OPTGROUP_NONE
, /* optinfo_flags */
2350 TV_REORDER_BLOCKS
, /* tv_id */
2351 0, /* properties_required */
2352 0, /* properties_provided */
2353 0, /* properties_destroyed */
2354 0, /* todo_flags_start */
2355 0, /* todo_flags_finish */
2358 class pass_reorder_blocks
: public rtl_opt_pass
2361 pass_reorder_blocks (gcc::context
*ctxt
)
2362 : rtl_opt_pass (pass_data_reorder_blocks
, ctxt
)
2365 /* opt_pass methods: */
2366 virtual bool gate (function
*)
2368 if (targetm
.cannot_modify_jumps_p ())
2370 return (optimize
> 0
2371 && (flag_reorder_blocks
|| flag_reorder_blocks_and_partition
));
2374 virtual unsigned int execute (function
*);
2376 }; // class pass_reorder_blocks
2379 pass_reorder_blocks::execute (function
*fun
)
2383 /* Last attempt to optimize CFG, as scheduling, peepholing and insn
2384 splitting possibly introduced more crossjumping opportunities. */
2385 cfg_layout_initialize (CLEANUP_EXPENSIVE
);
2387 reorder_basic_blocks ();
2388 cleanup_cfg (CLEANUP_EXPENSIVE
);
2390 FOR_EACH_BB_FN (bb
, fun
)
2391 if (bb
->next_bb
!= EXIT_BLOCK_PTR_FOR_FN (fun
))
2392 bb
->aux
= bb
->next_bb
;
2393 cfg_layout_finalize ();
2401 make_pass_reorder_blocks (gcc::context
*ctxt
)
2403 return new pass_reorder_blocks (ctxt
);
2406 /* Duplicate the blocks containing computed gotos. This basically unfactors
2407 computed gotos that were factored early on in the compilation process to
2408 speed up edge based data flow. We used to not unfactoring them again,
2409 which can seriously pessimize code with many computed jumps in the source
2410 code, such as interpreters. See e.g. PR15242. */
2414 const pass_data pass_data_duplicate_computed_gotos
=
2416 RTL_PASS
, /* type */
2417 "compgotos", /* name */
2418 OPTGROUP_NONE
, /* optinfo_flags */
2419 TV_REORDER_BLOCKS
, /* tv_id */
2420 0, /* properties_required */
2421 0, /* properties_provided */
2422 0, /* properties_destroyed */
2423 0, /* todo_flags_start */
2424 0, /* todo_flags_finish */
2427 class pass_duplicate_computed_gotos
: public rtl_opt_pass
2430 pass_duplicate_computed_gotos (gcc::context
*ctxt
)
2431 : rtl_opt_pass (pass_data_duplicate_computed_gotos
, ctxt
)
2434 /* opt_pass methods: */
2435 virtual bool gate (function
*);
2436 virtual unsigned int execute (function
*);
2438 }; // class pass_duplicate_computed_gotos
2441 pass_duplicate_computed_gotos::gate (function
*fun
)
2443 if (targetm
.cannot_modify_jumps_p ())
2445 return (optimize
> 0
2446 && flag_expensive_optimizations
2447 && ! optimize_function_for_size_p (fun
));
2451 pass_duplicate_computed_gotos::execute (function
*fun
)
2453 basic_block bb
, new_bb
;
2456 bool changed
= false;
2458 if (n_basic_blocks_for_fn (fun
) <= NUM_FIXED_BLOCKS
+ 1)
2462 cfg_layout_initialize (0);
2464 /* We are estimating the length of uncond jump insn only once
2465 since the code for getting the insn length always returns
2466 the minimal length now. */
2467 if (uncond_jump_length
== 0)
2468 uncond_jump_length
= get_uncond_jump_length ();
2471 = uncond_jump_length
* PARAM_VALUE (PARAM_MAX_GOTO_DUPLICATION_INSNS
);
2472 candidates
= BITMAP_ALLOC (NULL
);
2474 /* Look for blocks that end in a computed jump, and see if such blocks
2475 are suitable for unfactoring. If a block is a candidate for unfactoring,
2476 mark it in the candidates. */
2477 FOR_EACH_BB_FN (bb
, fun
)
2482 int size
, all_flags
;
2484 /* Build the reorder chain for the original order of blocks. */
2485 if (bb
->next_bb
!= EXIT_BLOCK_PTR_FOR_FN (fun
))
2486 bb
->aux
= bb
->next_bb
;
2488 /* Obviously the block has to end in a computed jump. */
2489 if (!computed_jump_p (BB_END (bb
)))
2492 /* Only consider blocks that can be duplicated. */
2493 if (CROSSING_JUMP_P (BB_END (bb
))
2494 || !can_duplicate_block_p (bb
))
2497 /* Make sure that the block is small enough. */
2499 FOR_BB_INSNS (bb
, insn
)
2502 size
+= get_attr_min_length (insn
);
2503 if (size
> max_size
)
2506 if (size
> max_size
)
2509 /* Final check: there must not be any incoming abnormal edges. */
2511 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
2512 all_flags
|= e
->flags
;
2513 if (all_flags
& EDGE_COMPLEX
)
2516 bitmap_set_bit (candidates
, bb
->index
);
2519 /* Nothing to do if there is no computed jump here. */
2520 if (bitmap_empty_p (candidates
))
2523 /* Duplicate computed gotos. */
2524 FOR_EACH_BB_FN (bb
, fun
)
2526 if (bb
->flags
& BB_VISITED
)
2529 bb
->flags
|= BB_VISITED
;
2531 /* BB must have one outgoing edge. That edge must not lead to
2532 the exit block or the next block.
2533 The destination must have more than one predecessor. */
2534 if (!single_succ_p (bb
)
2535 || single_succ (bb
) == EXIT_BLOCK_PTR_FOR_FN (fun
)
2536 || single_succ (bb
) == bb
->next_bb
2537 || single_pred_p (single_succ (bb
)))
2540 /* The successor block has to be a duplication candidate. */
2541 if (!bitmap_bit_p (candidates
, single_succ (bb
)->index
))
2544 /* Don't duplicate a partition crossing edge, which requires difficult
2546 if (JUMP_P (BB_END (bb
)) && CROSSING_JUMP_P (BB_END (bb
)))
2549 new_bb
= duplicate_block (single_succ (bb
), single_succ_edge (bb
), bb
);
2550 new_bb
->aux
= bb
->aux
;
2552 new_bb
->flags
|= BB_VISITED
;
2559 /* Duplicating blocks above will redirect edges and may cause hot
2560 blocks previously reached by both hot and cold blocks to become
2561 dominated only by cold blocks. */
2562 fixup_partitions ();
2564 /* Merge the duplicated blocks into predecessors, when possible. */
2565 cfg_layout_finalize ();
2569 cfg_layout_finalize ();
2571 BITMAP_FREE (candidates
);
2578 make_pass_duplicate_computed_gotos (gcc::context
*ctxt
)
2580 return new pass_duplicate_computed_gotos (ctxt
);
2583 /* This function is the main 'entrance' for the optimization that
2584 partitions hot and cold basic blocks into separate sections of the
2585 .o file (to improve performance and cache locality). Ideally it
2586 would be called after all optimizations that rearrange the CFG have
2587 been called. However part of this optimization may introduce new
2588 register usage, so it must be called before register allocation has
2589 occurred. This means that this optimization is actually called
2590 well before the optimization that reorders basic blocks (see
2593 This optimization checks the feedback information to determine
2594 which basic blocks are hot/cold, updates flags on the basic blocks
2595 to indicate which section they belong in. This information is
2596 later used for writing out sections in the .o file. Because hot
2597 and cold sections can be arbitrarily large (within the bounds of
2598 memory), far beyond the size of a single function, it is necessary
2599 to fix up all edges that cross section boundaries, to make sure the
2600 instructions used can actually span the required distance. The
2601 fixes are described below.
2603 Fall-through edges must be changed into jumps; it is not safe or
2604 legal to fall through across a section boundary. Whenever a
2605 fall-through edge crossing a section boundary is encountered, a new
2606 basic block is inserted (in the same section as the fall-through
2607 source), and the fall through edge is redirected to the new basic
2608 block. The new basic block contains an unconditional jump to the
2609 original fall-through target. (If the unconditional jump is
2610 insufficient to cross section boundaries, that is dealt with a
2611 little later, see below).
2613 In order to deal with architectures that have short conditional
2614 branches (which cannot span all of memory) we take any conditional
2615 jump that attempts to cross a section boundary and add a level of
2616 indirection: it becomes a conditional jump to a new basic block, in
2617 the same section. The new basic block contains an unconditional
2618 jump to the original target, in the other section.
2620 For those architectures whose unconditional branch is also
2621 incapable of reaching all of memory, those unconditional jumps are
2622 converted into indirect jumps, through a register.
2624 IMPORTANT NOTE: This optimization causes some messy interactions
2625 with the cfg cleanup optimizations; those optimizations want to
2626 merge blocks wherever possible, and to collapse indirect jump
2627 sequences (change "A jumps to B jumps to C" directly into "A jumps
2628 to C"). Those optimizations can undo the jump fixes that
2629 partitioning is required to make (see above), in order to ensure
2630 that jumps attempting to cross section boundaries are really able
2631 to cover whatever distance the jump requires (on many architectures
2632 conditional or unconditional jumps are not able to reach all of
2633 memory). Therefore tests have to be inserted into each such
2634 optimization to make sure that it does not undo stuff necessary to
2635 cross partition boundaries. This would be much less of a problem
2636 if we could perform this optimization later in the compilation, but
2637 unfortunately the fact that we may need to create indirect jumps
2638 (through registers) requires that this optimization be performed
2639 before register allocation.
2641 Hot and cold basic blocks are partitioned and put in separate
2642 sections of the .o file, to reduce paging and improve cache
2643 performance (hopefully). This can result in bits of code from the
2644 same function being widely separated in the .o file. However this
2645 is not obvious to the current bb structure. Therefore we must take
2646 care to ensure that: 1). There are no fall_thru edges that cross
2647 between sections; 2). For those architectures which have "short"
2648 conditional branches, all conditional branches that attempt to
2649 cross between sections are converted to unconditional branches;
2650 and, 3). For those architectures which have "short" unconditional
2651 branches, all unconditional branches that attempt to cross between
2652 sections are converted to indirect jumps.
2654 The code for fixing up fall_thru edges that cross between hot and
2655 cold basic blocks does so by creating new basic blocks containing
2656 unconditional branches to the appropriate label in the "other"
2657 section. The new basic block is then put in the same (hot or cold)
2658 section as the original conditional branch, and the fall_thru edge
2659 is modified to fall into the new basic block instead. By adding
2660 this level of indirection we end up with only unconditional branches
2661 crossing between hot and cold sections.
2663 Conditional branches are dealt with by adding a level of indirection.
2664 A new basic block is added in the same (hot/cold) section as the
2665 conditional branch, and the conditional branch is retargeted to the
2666 new basic block. The new basic block contains an unconditional branch
2667 to the original target of the conditional branch (in the other section).
2669 Unconditional branches are dealt with by converting them into
2674 const pass_data pass_data_partition_blocks
=
2676 RTL_PASS
, /* type */
2677 "bbpart", /* name */
2678 OPTGROUP_NONE
, /* optinfo_flags */
2679 TV_REORDER_BLOCKS
, /* tv_id */
2680 PROP_cfglayout
, /* properties_required */
2681 0, /* properties_provided */
2682 0, /* properties_destroyed */
2683 0, /* todo_flags_start */
2684 0, /* todo_flags_finish */
2687 class pass_partition_blocks
: public rtl_opt_pass
2690 pass_partition_blocks (gcc::context
*ctxt
)
2691 : rtl_opt_pass (pass_data_partition_blocks
, ctxt
)
2694 /* opt_pass methods: */
2695 virtual bool gate (function
*);
2696 virtual unsigned int execute (function
*);
2698 }; // class pass_partition_blocks
2701 pass_partition_blocks::gate (function
*fun
)
2703 /* The optimization to partition hot/cold basic blocks into separate
2704 sections of the .o file does not work well with linkonce or with
2705 user defined section attributes. Don't call it if either case
2707 return (flag_reorder_blocks_and_partition
2709 /* See gate_handle_reorder_blocks. We should not partition if
2710 we are going to omit the reordering. */
2711 && optimize_function_for_speed_p (fun
)
2712 && !DECL_COMDAT_GROUP (current_function_decl
)
2713 && !user_defined_section_attribute
);
2717 pass_partition_blocks::execute (function
*fun
)
2719 vec
<edge
> crossing_edges
;
2721 if (n_basic_blocks_for_fn (fun
) <= NUM_FIXED_BLOCKS
+ 1)
2724 df_set_flags (DF_DEFER_INSN_RESCAN
);
2726 crossing_edges
= find_rarely_executed_basic_blocks_and_crossing_edges ();
2727 if (!crossing_edges
.exists ())
2730 crtl
->has_bb_partition
= true;
2732 /* Make sure the source of any crossing edge ends in a jump and the
2733 destination of any crossing edge has a label. */
2734 add_labels_and_missing_jumps (crossing_edges
);
2736 /* Convert all crossing fall_thru edges to non-crossing fall
2737 thrus to unconditional jumps (that jump to the original fall
2739 fix_up_fall_thru_edges ();
2741 /* If the architecture does not have conditional branches that can
2742 span all of memory, convert crossing conditional branches into
2743 crossing unconditional branches. */
2744 if (!HAS_LONG_COND_BRANCH
)
2745 fix_crossing_conditional_branches ();
2747 /* If the architecture does not have unconditional branches that
2748 can span all of memory, convert crossing unconditional branches
2749 into indirect jumps. Since adding an indirect jump also adds
2750 a new register usage, update the register usage information as
2752 if (!HAS_LONG_UNCOND_BRANCH
)
2753 fix_crossing_unconditional_branches ();
2755 update_crossing_jump_flags ();
2757 /* Clear bb->aux fields that the above routines were using. */
2758 clear_aux_for_blocks ();
2760 crossing_edges
.release ();
2762 /* ??? FIXME: DF generates the bb info for a block immediately.
2763 And by immediately, I mean *during* creation of the block.
2765 #0 df_bb_refs_collect
2766 #1 in df_bb_refs_record
2767 #2 in create_basic_block_structure
2769 Which means that the bb_has_eh_pred test in df_bb_refs_collect
2770 will *always* fail, because no edges can have been added to the
2771 block yet. Which of course means we don't add the right
2772 artificial refs, which means we fail df_verify (much) later.
2774 Cleanest solution would seem to make DF_DEFER_INSN_RESCAN imply
2775 that we also shouldn't grab data from the new blocks those new
2776 insns are in either. In this way one can create the block, link
2777 it up properly, and have everything Just Work later, when deferred
2778 insns are processed.
2780 In the meantime, we have no other option but to throw away all
2781 of the DF data and recompute it all. */
2782 if (fun
->eh
->lp_array
)
2784 df_finish_pass (true);
2785 df_scan_alloc (NULL
);
2787 /* Not all post-landing pads use all of the EH_RETURN_DATA_REGNO
2788 data. We blindly generated all of them when creating the new
2789 landing pad. Delete those assignments we don't use. */
2790 df_set_flags (DF_LR_RUN_DCE
);
2800 make_pass_partition_blocks (gcc::context
*ctxt
)
2802 return new pass_partition_blocks (ctxt
);