1 /* Shrink-wrapping related optimizations.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This file handles shrink-wrapping related optimizations. */
24 #include "coretypes.h"
26 #include "rtl-error.h"
30 #include "fold-const.h"
31 #include "stor-layout.h"
33 #include "stringpool.h"
36 #include "hard-reg-set.h"
39 #include "insn-config.h"
47 #include "insn-codes.h"
54 #include "langhooks.h"
56 #include "common/common-target.h"
57 #include "gimple-expr.h"
59 #include "tree-pass.h"
61 #include "dominance.h"
64 #include "basic-block.h"
67 #include "bb-reorder.h"
68 #include "shrink-wrap.h"
73 /* Return true if INSN requires the stack frame to be set up.
74 PROLOGUE_USED contains the hard registers used in the function
75 prologue. SET_UP_BY_PROLOGUE is the set of registers we expect the
76 prologue to set up for the function. */
78 requires_stack_frame_p (rtx_insn
*insn
, HARD_REG_SET prologue_used
,
79 HARD_REG_SET set_up_by_prologue
)
82 HARD_REG_SET hardregs
;
86 return !SIBLING_CALL_P (insn
);
88 /* We need a frame to get the unique CFA expected by the unwinder. */
89 if (cfun
->can_throw_non_call_exceptions
&& can_throw_internal (insn
))
92 CLEAR_HARD_REG_SET (hardregs
);
93 FOR_EACH_INSN_DEF (def
, insn
)
95 rtx dreg
= DF_REF_REG (def
);
100 add_to_hard_reg_set (&hardregs
, GET_MODE (dreg
),
103 if (hard_reg_set_intersect_p (hardregs
, prologue_used
))
105 AND_COMPL_HARD_REG_SET (hardregs
, call_used_reg_set
);
106 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
107 if (TEST_HARD_REG_BIT (hardregs
, regno
)
108 && df_regs_ever_live_p (regno
))
111 FOR_EACH_INSN_USE (use
, insn
)
113 rtx reg
= DF_REF_REG (use
);
118 add_to_hard_reg_set (&hardregs
, GET_MODE (reg
),
121 if (hard_reg_set_intersect_p (hardregs
, set_up_by_prologue
))
127 /* See whether there has a single live edge from BB, which dest uses
128 [REGNO, END_REGNO). Return the live edge if its dest bb has
129 one or two predecessors. Otherwise return NULL. */
132 live_edge_for_reg (basic_block bb
, int regno
, int end_regno
)
140 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
142 live
= df_get_live_in (e
->dest
);
143 for (i
= regno
; i
< end_regno
; i
++)
144 if (REGNO_REG_SET_P (live
, i
))
146 if (live_edge
&& live_edge
!= e
)
152 /* We can sometimes encounter dead code. Don't try to move it
153 into the exit block. */
154 if (!live_edge
|| live_edge
->dest
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
157 /* Reject targets of abnormal edges. This is needed for correctness
158 on ports like Alpha and MIPS, whose pic_offset_table_rtx can die on
159 exception edges even though it is generally treated as call-saved
160 for the majority of the compilation. Moving across abnormal edges
161 isn't going to be interesting for shrink-wrap usage anyway. */
162 if (live_edge
->flags
& EDGE_ABNORMAL
)
165 /* When live_edge->dest->preds == 2, we can create a new block on
166 the edge to make it meet the requirement. */
167 if (EDGE_COUNT (live_edge
->dest
->preds
) > 2)
173 /* Try to move INSN from BB to a successor. Return true on success.
174 USES and DEFS are the set of registers that are used and defined
175 after INSN in BB. SPLIT_P indicates whether a live edge from BB
176 is splitted or not. */
179 move_insn_for_shrink_wrap (basic_block bb
, rtx_insn
*insn
,
180 const HARD_REG_SET uses
,
181 const HARD_REG_SET defs
,
185 bitmap live_out
, live_in
, bb_uses
, bb_defs
;
186 unsigned int i
, dregno
, end_dregno
;
187 unsigned int sregno
= FIRST_PSEUDO_REGISTER
;
188 unsigned int end_sregno
= FIRST_PSEUDO_REGISTER
;
189 basic_block next_block
;
192 /* Look for a simple register assignment. We don't use single_set here
193 because we can't deal with any CLOBBERs, USEs, or REG_UNUSED secondary
197 set
= PATTERN (insn
);
198 if (GET_CODE (set
) != SET
)
201 dest
= SET_DEST (set
);
203 /* For the destination, we want only a register. Also disallow STACK
204 or FRAME related adjustments. They are likely part of the prologue,
205 so keep them in the entry block. */
207 || dest
== stack_pointer_rtx
208 || dest
== frame_pointer_rtx
209 || dest
== hard_frame_pointer_rtx
)
212 /* For the source, we want one of:
213 (1) A (non-overlapping) register
215 (3) An expression involving no more than one register.
217 That last point comes from the code following, which was originally
218 written to handle only register move operations, and still only handles
219 a single source register when checking for overlaps. Happily, the
220 same checks can be applied to expressions like (plus reg const). */
222 if (CONSTANT_P (src
))
224 else if (!REG_P (src
))
226 rtx src_inner
= NULL_RTX
;
228 if (can_throw_internal (insn
))
231 subrtx_var_iterator::array_type array
;
232 FOR_EACH_SUBRTX_VAR (iter
, array
, src
, ALL
)
235 switch (GET_RTX_CLASS (GET_CODE (x
)))
239 case RTX_COMM_COMPARE
:
244 /* Constant or expression. Continue. */
249 switch (GET_CODE (x
))
253 case STRICT_LOW_PART
:
260 /* Fail if we see a second inner register. */
261 if (src_inner
!= NULL
)
276 if (src_inner
!= NULL
)
280 /* Make sure that the source register isn't defined later in BB. */
283 sregno
= REGNO (src
);
284 end_sregno
= END_REGNO (src
);
285 if (overlaps_hard_reg_set_p (defs
, GET_MODE (src
), sregno
))
289 /* Make sure that the destination register isn't referenced later in BB. */
290 dregno
= REGNO (dest
);
291 end_dregno
= END_REGNO (dest
);
292 if (overlaps_hard_reg_set_p (uses
, GET_MODE (dest
), dregno
)
293 || overlaps_hard_reg_set_p (defs
, GET_MODE (dest
), dregno
))
296 /* See whether there is a successor block to which we could move INSN. */
297 live_edge
= live_edge_for_reg (bb
, dregno
, end_dregno
);
301 next_block
= live_edge
->dest
;
302 /* Create a new basic block on the edge. */
303 if (EDGE_COUNT (next_block
->preds
) == 2)
305 /* split_edge for a block with only one successor is meaningless. */
306 if (EDGE_COUNT (bb
->succs
) == 1)
309 /* If DF_LIVE doesn't exist, i.e. at -O1, just give up. */
313 basic_block old_dest
= live_edge
->dest
;
314 next_block
= split_edge (live_edge
);
316 /* We create a new basic block. Call df_grow_bb_info to make sure
317 all data structures are allocated. */
318 df_grow_bb_info (df_live
);
320 bitmap_and (df_get_live_in (next_block
), df_get_live_out (bb
),
321 df_get_live_in (old_dest
));
322 df_set_bb_dirty (next_block
);
324 /* We should not split more than once for a function. */
331 /* At this point we are committed to moving INSN, but let's try to
332 move it as far as we can. */
335 live_out
= df_get_live_out (bb
);
336 live_in
= df_get_live_in (next_block
);
339 /* Check whether BB uses DEST or clobbers DEST. We need to add
340 INSN to BB if so. Either way, DEST is no longer live on entry,
341 except for any part that overlaps SRC (next loop). */
342 bb_uses
= &DF_LR_BB_INFO (bb
)->use
;
343 bb_defs
= &DF_LR_BB_INFO (bb
)->def
;
346 for (i
= dregno
; i
< end_dregno
; i
++)
349 || REGNO_REG_SET_P (bb_uses
, i
)
350 || REGNO_REG_SET_P (bb_defs
, i
)
351 || REGNO_REG_SET_P (&DF_LIVE_BB_INFO (bb
)->gen
, i
))
353 CLEAR_REGNO_REG_SET (live_out
, i
);
354 CLEAR_REGNO_REG_SET (live_in
, i
);
357 /* Check whether BB clobbers SRC. We need to add INSN to BB if so.
358 Either way, SRC is now live on entry. */
359 for (i
= sregno
; i
< end_sregno
; i
++)
362 || REGNO_REG_SET_P (bb_defs
, i
)
363 || REGNO_REG_SET_P (&DF_LIVE_BB_INFO (bb
)->gen
, i
))
365 SET_REGNO_REG_SET (live_out
, i
);
366 SET_REGNO_REG_SET (live_in
, i
);
371 /* DF_LR_BB_INFO (bb)->def does not comprise the DF_REF_PARTIAL and
372 DF_REF_CONDITIONAL defs. So if DF_LIVE doesn't exist, i.e.
373 at -O1, just give up searching NEXT_BLOCK. */
375 for (i
= dregno
; i
< end_dregno
; i
++)
377 CLEAR_REGNO_REG_SET (live_out
, i
);
378 CLEAR_REGNO_REG_SET (live_in
, i
);
381 for (i
= sregno
; i
< end_sregno
; i
++)
383 SET_REGNO_REG_SET (live_out
, i
);
384 SET_REGNO_REG_SET (live_in
, i
);
388 /* If we don't need to add the move to BB, look for a single
392 live_edge
= live_edge_for_reg (next_block
, dregno
, end_dregno
);
393 if (!live_edge
|| EDGE_COUNT (live_edge
->dest
->preds
) > 1)
395 next_block
= live_edge
->dest
;
400 /* For the new created basic block, there is no dataflow info at all.
401 So skip the following dataflow update and check. */
404 /* BB now defines DEST. It only uses the parts of DEST that overlap SRC
406 for (i
= dregno
; i
< end_dregno
; i
++)
408 CLEAR_REGNO_REG_SET (bb_uses
, i
);
409 SET_REGNO_REG_SET (bb_defs
, i
);
412 /* BB now uses SRC. */
413 for (i
= sregno
; i
< end_sregno
; i
++)
414 SET_REGNO_REG_SET (bb_uses
, i
);
417 emit_insn_after (PATTERN (insn
), bb_note (bb
));
422 /* Look for register copies in the first block of the function, and move
423 them down into successor blocks if the register is used only on one
424 path. This exposes more opportunities for shrink-wrapping. These
425 kinds of sets often occur when incoming argument registers are moved
426 to call-saved registers because their values are live across one or
427 more calls during the function. */
430 prepare_shrink_wrap (basic_block entry_block
)
432 rtx_insn
*insn
, *curr
;
434 HARD_REG_SET uses
, defs
;
436 bool split_p
= false;
438 if (JUMP_P (BB_END (entry_block
)))
440 /* To have more shrink-wrapping opportunities, prepare_shrink_wrap tries
441 to sink the copies from parameter to callee saved register out of
442 entry block. copyprop_hardreg_forward_bb_without_debug_insn is called
443 to release some dependences. */
444 copyprop_hardreg_forward_bb_without_debug_insn (entry_block
);
447 CLEAR_HARD_REG_SET (uses
);
448 CLEAR_HARD_REG_SET (defs
);
449 FOR_BB_INSNS_REVERSE_SAFE (entry_block
, insn
, curr
)
450 if (NONDEBUG_INSN_P (insn
)
451 && !move_insn_for_shrink_wrap (entry_block
, insn
, uses
, defs
,
454 /* Add all defined registers to DEFs. */
455 FOR_EACH_INSN_DEF (def
, insn
)
457 x
= DF_REF_REG (def
);
458 if (REG_P (x
) && HARD_REGISTER_P (x
))
459 SET_HARD_REG_BIT (defs
, REGNO (x
));
462 /* Add all used registers to USESs. */
463 FOR_EACH_INSN_USE (use
, insn
)
465 x
= DF_REF_REG (use
);
466 if (REG_P (x
) && HARD_REGISTER_P (x
))
467 SET_HARD_REG_BIT (uses
, REGNO (x
));
472 /* Create a copy of BB instructions and insert at BEFORE. Redirect
473 preds of BB to COPY_BB if they don't appear in NEED_PROLOGUE. */
475 dup_block_and_redirect (basic_block bb
, basic_block copy_bb
, rtx_insn
*before
,
476 bitmap_head
*need_prologue
)
480 rtx_insn
*insn
= BB_END (bb
);
482 /* We know BB has a single successor, so there is no need to copy a
483 simple jump at the end of BB. */
484 if (simplejump_p (insn
))
485 insn
= PREV_INSN (insn
);
488 duplicate_insn_chain (BB_HEAD (bb
), insn
);
492 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
493 if (active_insn_p (insn
))
495 fprintf (dump_file
, "Duplicating bb %d to bb %d, %u active insns.\n",
496 bb
->index
, copy_bb
->index
, count
);
500 emit_insn_before (insn
, before
);
502 /* Redirect all the paths that need no prologue into copy_bb. */
503 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
));)
504 if (!bitmap_bit_p (need_prologue
, e
->src
->index
))
506 int freq
= EDGE_FREQUENCY (e
);
507 copy_bb
->count
+= e
->count
;
508 copy_bb
->frequency
+= EDGE_FREQUENCY (e
);
509 e
->dest
->count
-= e
->count
;
510 if (e
->dest
->count
< 0)
512 e
->dest
->frequency
-= freq
;
513 if (e
->dest
->frequency
< 0)
514 e
->dest
->frequency
= 0;
515 redirect_edge_and_branch_force (e
, copy_bb
);
523 /* Try to perform a kind of shrink-wrapping, making sure the
524 prologue/epilogue is emitted only around those parts of the
525 function that require it. */
528 try_shrink_wrapping (edge
*entry_edge
, edge orig_entry_edge
,
529 bitmap_head
*bb_flags
, rtx_insn
*prologue_seq
)
533 bool nonempty_prologue
= false;
534 unsigned max_grow_size
;
537 for (seq
= prologue_seq
; seq
; seq
= NEXT_INSN (seq
))
538 if (!NOTE_P (seq
) || NOTE_KIND (seq
) != NOTE_INSN_PROLOGUE_END
)
540 nonempty_prologue
= true;
544 if (SHRINK_WRAPPING_ENABLED
545 && (targetm
.profile_before_prologue () || !crtl
->profile
)
546 && nonempty_prologue
&& !crtl
->calls_eh_return
)
548 HARD_REG_SET prologue_clobbered
, prologue_used
, live_on_edge
;
549 struct hard_reg_set_container set_up_by_prologue
;
551 vec
<basic_block
> vec
;
553 bitmap_head bb_antic_flags
;
554 bitmap_head bb_on_list
;
558 fprintf (dump_file
, "Attempting shrink-wrapping optimization.\n");
560 /* Compute the registers set and used in the prologue. */
561 CLEAR_HARD_REG_SET (prologue_clobbered
);
562 CLEAR_HARD_REG_SET (prologue_used
);
563 for (p_insn
= prologue_seq
; p_insn
; p_insn
= NEXT_INSN (p_insn
))
565 HARD_REG_SET this_used
;
566 if (!NONDEBUG_INSN_P (p_insn
))
569 CLEAR_HARD_REG_SET (this_used
);
570 note_uses (&PATTERN (p_insn
), record_hard_reg_uses
,
572 AND_COMPL_HARD_REG_SET (this_used
, prologue_clobbered
);
573 IOR_HARD_REG_SET (prologue_used
, this_used
);
574 note_stores (PATTERN (p_insn
), record_hard_reg_sets
,
575 &prologue_clobbered
);
578 prepare_shrink_wrap ((*entry_edge
)->dest
);
580 bitmap_initialize (&bb_antic_flags
, &bitmap_default_obstack
);
581 bitmap_initialize (&bb_on_list
, &bitmap_default_obstack
);
582 bitmap_initialize (&bb_tail
, &bitmap_default_obstack
);
584 /* Find the set of basic blocks that require a stack frame,
585 and blocks that are too big to be duplicated. */
587 vec
.create (n_basic_blocks_for_fn (cfun
));
589 CLEAR_HARD_REG_SET (set_up_by_prologue
.set
);
590 add_to_hard_reg_set (&set_up_by_prologue
.set
, Pmode
,
591 STACK_POINTER_REGNUM
);
592 add_to_hard_reg_set (&set_up_by_prologue
.set
, Pmode
, ARG_POINTER_REGNUM
);
593 if (frame_pointer_needed
)
594 add_to_hard_reg_set (&set_up_by_prologue
.set
, Pmode
,
595 HARD_FRAME_POINTER_REGNUM
);
596 if (pic_offset_table_rtx
597 && (unsigned) PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
)
598 add_to_hard_reg_set (&set_up_by_prologue
.set
, Pmode
,
599 PIC_OFFSET_TABLE_REGNUM
);
601 add_to_hard_reg_set (&set_up_by_prologue
.set
,
602 GET_MODE (crtl
->drap_reg
),
603 REGNO (crtl
->drap_reg
));
604 if (targetm
.set_up_by_prologue
)
605 targetm
.set_up_by_prologue (&set_up_by_prologue
);
607 /* We don't use a different max size depending on
608 optimize_bb_for_speed_p because increasing shrink-wrapping
609 opportunities by duplicating tail blocks can actually result
610 in an overall decrease in code size. */
611 max_grow_size
= get_uncond_jump_length ();
612 max_grow_size
*= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS
);
614 FOR_EACH_BB_FN (bb
, cfun
)
619 FOR_BB_INSNS (bb
, insn
)
620 if (NONDEBUG_INSN_P (insn
))
622 if (requires_stack_frame_p (insn
, prologue_used
,
623 set_up_by_prologue
.set
))
625 if (bb
== (*entry_edge
)->dest
)
626 goto fail_shrinkwrap
;
627 bitmap_set_bit (bb_flags
, bb
->index
);
631 else if (size
<= max_grow_size
)
633 size
+= get_attr_min_length (insn
);
634 if (size
> max_grow_size
)
635 bitmap_set_bit (&bb_on_list
, bb
->index
);
640 /* Blocks that really need a prologue, or are too big for tails. */
641 bitmap_ior_into (&bb_on_list
, bb_flags
);
643 /* For every basic block that needs a prologue, mark all blocks
644 reachable from it, so as to ensure they are also seen as
645 requiring a prologue. */
646 while (!vec
.is_empty ())
648 basic_block tmp_bb
= vec
.pop ();
650 FOR_EACH_EDGE (e
, ei
, tmp_bb
->succs
)
651 if (e
->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
652 && bitmap_set_bit (bb_flags
, e
->dest
->index
))
653 vec
.quick_push (e
->dest
);
656 /* Find the set of basic blocks that need no prologue, have a
657 single successor, can be duplicated, meet a max size
658 requirement, and go to the exit via like blocks. */
659 vec
.quick_push (EXIT_BLOCK_PTR_FOR_FN (cfun
));
660 while (!vec
.is_empty ())
662 basic_block tmp_bb
= vec
.pop ();
664 FOR_EACH_EDGE (e
, ei
, tmp_bb
->preds
)
665 if (single_succ_p (e
->src
)
666 && !bitmap_bit_p (&bb_on_list
, e
->src
->index
)
667 && can_duplicate_block_p (e
->src
))
672 /* If there is predecessor of e->src which doesn't
673 need prologue and the edge is complex,
674 we might not be able to redirect the branch
675 to a copy of e->src. */
676 FOR_EACH_EDGE (pe
, pei
, e
->src
->preds
)
677 if ((pe
->flags
& EDGE_COMPLEX
) != 0
678 && !bitmap_bit_p (bb_flags
, pe
->src
->index
))
680 if (pe
== NULL
&& bitmap_set_bit (&bb_tail
, e
->src
->index
))
681 vec
.quick_push (e
->src
);
685 /* Now walk backwards from every block that is marked as needing
686 a prologue to compute the bb_antic_flags bitmap. Exclude
687 tail blocks; They can be duplicated to be used on paths not
688 needing a prologue. */
689 bitmap_clear (&bb_on_list
);
690 bitmap_and_compl (&bb_antic_flags
, bb_flags
, &bb_tail
);
691 FOR_EACH_BB_FN (bb
, cfun
)
693 if (!bitmap_bit_p (&bb_antic_flags
, bb
->index
))
695 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
696 if (!bitmap_bit_p (&bb_antic_flags
, e
->src
->index
)
697 && bitmap_set_bit (&bb_on_list
, e
->src
->index
))
698 vec
.quick_push (e
->src
);
700 while (!vec
.is_empty ())
702 basic_block tmp_bb
= vec
.pop ();
705 bitmap_clear_bit (&bb_on_list
, tmp_bb
->index
);
706 FOR_EACH_EDGE (e
, ei
, tmp_bb
->succs
)
707 if (!bitmap_bit_p (&bb_antic_flags
, e
->dest
->index
))
715 bitmap_set_bit (&bb_antic_flags
, tmp_bb
->index
);
716 FOR_EACH_EDGE (e
, ei
, tmp_bb
->preds
)
717 if (!bitmap_bit_p (&bb_antic_flags
, e
->src
->index
)
718 && bitmap_set_bit (&bb_on_list
, e
->src
->index
))
719 vec
.quick_push (e
->src
);
722 /* Find exactly one edge that leads to a block in ANTIC from
723 a block that isn't. */
724 if (!bitmap_bit_p (&bb_antic_flags
, (*entry_edge
)->dest
->index
))
725 FOR_EACH_BB_FN (bb
, cfun
)
727 if (!bitmap_bit_p (&bb_antic_flags
, bb
->index
))
729 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
730 if (!bitmap_bit_p (&bb_antic_flags
, e
->src
->index
))
732 if (*entry_edge
!= orig_entry_edge
)
734 *entry_edge
= orig_entry_edge
;
736 fprintf (dump_file
, "More than one candidate edge.\n");
737 goto fail_shrinkwrap
;
740 fprintf (dump_file
, "Found candidate edge for "
741 "shrink-wrapping, %d->%d.\n", e
->src
->index
,
747 if (*entry_edge
!= orig_entry_edge
)
749 /* Test whether the prologue is known to clobber any register
750 (other than FP or SP) which are live on the edge. */
751 CLEAR_HARD_REG_BIT (prologue_clobbered
, STACK_POINTER_REGNUM
);
752 if (frame_pointer_needed
)
753 CLEAR_HARD_REG_BIT (prologue_clobbered
, HARD_FRAME_POINTER_REGNUM
);
754 REG_SET_TO_HARD_REG_SET (live_on_edge
,
755 df_get_live_in ((*entry_edge
)->dest
));
756 if (hard_reg_set_intersect_p (live_on_edge
, prologue_clobbered
))
758 *entry_edge
= orig_entry_edge
;
761 "Shrink-wrapping aborted due to clobber.\n");
764 if (*entry_edge
!= orig_entry_edge
)
766 crtl
->shrink_wrapped
= true;
768 fprintf (dump_file
, "Performing shrink-wrapping.\n");
770 /* Find tail blocks reachable from both blocks needing a
771 prologue and blocks not needing a prologue. */
772 if (!bitmap_empty_p (&bb_tail
))
773 FOR_EACH_BB_FN (bb
, cfun
)
775 bool some_pro
, some_no_pro
;
776 if (!bitmap_bit_p (&bb_tail
, bb
->index
))
778 some_pro
= some_no_pro
= false;
779 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
781 if (bitmap_bit_p (bb_flags
, e
->src
->index
))
786 if (some_pro
&& some_no_pro
)
789 bitmap_clear_bit (&bb_tail
, bb
->index
);
791 /* Find the head of each tail. */
792 while (!vec
.is_empty ())
794 basic_block tbb
= vec
.pop ();
796 if (!bitmap_bit_p (&bb_tail
, tbb
->index
))
799 while (single_succ_p (tbb
))
801 tbb
= single_succ (tbb
);
802 bitmap_clear_bit (&bb_tail
, tbb
->index
);
805 /* Now duplicate the tails. */
806 if (!bitmap_empty_p (&bb_tail
))
807 FOR_EACH_BB_REVERSE_FN (bb
, cfun
)
809 basic_block copy_bb
, tbb
;
812 if (!bitmap_clear_bit (&bb_tail
, bb
->index
))
815 /* Create a copy of BB, instructions and all, for
816 use on paths that don't need a prologue.
817 Ideal placement of the copy is on a fall-thru edge
818 or after a block that would jump to the copy. */
819 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
820 if (!bitmap_bit_p (bb_flags
, e
->src
->index
)
821 && single_succ_p (e
->src
))
825 /* Make sure we insert after any barriers. */
826 rtx_insn
*end
= get_last_bb_insn (e
->src
);
827 copy_bb
= create_basic_block (NEXT_INSN (end
),
829 BB_COPY_PARTITION (copy_bb
, e
->src
);
833 /* Otherwise put the copy at the end of the function. */
834 copy_bb
= create_basic_block (NULL_RTX
, NULL_RTX
,
835 EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
);
836 BB_COPY_PARTITION (copy_bb
, bb
);
839 rtx_note
*insert_point
= emit_note_after (NOTE_INSN_DELETED
,
841 emit_barrier_after (BB_END (copy_bb
));
846 dup_block_and_redirect (tbb
, copy_bb
, insert_point
,
848 tbb
= single_succ (tbb
);
849 if (tbb
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
851 e
= split_block (copy_bb
, PREV_INSN (insert_point
));
855 /* Quiet verify_flow_info by (ab)using EDGE_FAKE.
856 We have yet to add a simple_return to the tails,
857 as we'd like to first convert_jumps_to_returns in
858 case the block is no longer used after that. */
860 if (CALL_P (PREV_INSN (insert_point
))
861 && SIBLING_CALL_P (PREV_INSN (insert_point
)))
862 eflags
= EDGE_SIBCALL
| EDGE_ABNORMAL
;
863 make_single_succ_edge (copy_bb
, EXIT_BLOCK_PTR_FOR_FN (cfun
),
866 /* verify_flow_info doesn't like a note after a
868 delete_insn (insert_point
);
869 if (bitmap_empty_p (&bb_tail
))
875 bitmap_clear (&bb_tail
);
876 bitmap_clear (&bb_antic_flags
);
877 bitmap_clear (&bb_on_list
);
882 /* If we're allowed to generate a simple return instruction, then by
883 definition we don't need a full epilogue. If the last basic
884 block before the exit block does not contain active instructions,
885 examine its predecessors and try to emit (conditional) return
889 get_unconverted_simple_return (edge exit_fallthru_edge
, bitmap_head bb_flags
,
890 vec
<edge
> *unconverted_simple_returns
,
891 rtx_insn
**returnjump
)
897 /* convert_jumps_to_returns may add to preds of the exit block
898 (but won't remove). Stop at end of current preds. */
899 last
= EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
);
900 for (i
= 0; i
< last
; i
++)
902 edge e
= EDGE_I (EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
, i
);
903 if (LABEL_P (BB_HEAD (e
->src
))
904 && !bitmap_bit_p (&bb_flags
, e
->src
->index
)
905 && !active_insn_between (BB_HEAD (e
->src
), BB_END (e
->src
)))
906 *unconverted_simple_returns
907 = convert_jumps_to_returns (e
->src
, true,
908 *unconverted_simple_returns
);
912 if (exit_fallthru_edge
!= NULL
913 && EDGE_COUNT (exit_fallthru_edge
->src
->preds
) != 0
914 && !bitmap_bit_p (&bb_flags
, exit_fallthru_edge
->src
->index
))
918 last_bb
= emit_return_for_exit (exit_fallthru_edge
, true);
919 *returnjump
= BB_END (last_bb
);
920 exit_fallthru_edge
= NULL
;
922 return exit_fallthru_edge
;
925 /* If there were branches to an empty LAST_BB which we tried to
926 convert to conditional simple_returns, but couldn't for some
927 reason, create a block to hold a simple_return insn and redirect
928 those remaining edges. */
931 convert_to_simple_return (edge entry_edge
, edge orig_entry_edge
,
932 bitmap_head bb_flags
, rtx_insn
*returnjump
,
933 vec
<edge
> unconverted_simple_returns
)
938 if (!unconverted_simple_returns
.is_empty ())
940 basic_block simple_return_block_hot
= NULL
;
941 basic_block simple_return_block_cold
= NULL
;
942 edge pending_edge_hot
= NULL
;
943 edge pending_edge_cold
= NULL
;
944 basic_block exit_pred
;
947 gcc_assert (entry_edge
!= orig_entry_edge
);
949 /* See if we can reuse the last insn that was emitted for the
951 if (returnjump
!= NULL_RTX
952 && JUMP_LABEL (returnjump
) == simple_return_rtx
)
954 e
= split_block (BLOCK_FOR_INSN (returnjump
), PREV_INSN (returnjump
));
955 if (BB_PARTITION (e
->src
) == BB_HOT_PARTITION
)
956 simple_return_block_hot
= e
->dest
;
958 simple_return_block_cold
= e
->dest
;
961 /* Also check returns we might need to add to tail blocks. */
962 FOR_EACH_EDGE (e
, ei
, EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
)
963 if (EDGE_COUNT (e
->src
->preds
) != 0
964 && (e
->flags
& EDGE_FAKE
) != 0
965 && !bitmap_bit_p (&bb_flags
, e
->src
->index
))
967 if (BB_PARTITION (e
->src
) == BB_HOT_PARTITION
)
968 pending_edge_hot
= e
;
970 pending_edge_cold
= e
;
973 /* Save a pointer to the exit's predecessor BB for use in
974 inserting new BBs at the end of the function. Do this
975 after the call to split_block above which may split
976 the original exit pred. */
977 exit_pred
= EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
;
979 FOR_EACH_VEC_ELT (unconverted_simple_returns
, i
, e
)
981 basic_block
*pdest_bb
;
984 if (BB_PARTITION (e
->src
) == BB_HOT_PARTITION
)
986 pdest_bb
= &simple_return_block_hot
;
987 pending
= pending_edge_hot
;
991 pdest_bb
= &simple_return_block_cold
;
992 pending
= pending_edge_cold
;
995 if (*pdest_bb
== NULL
&& pending
!= NULL
)
997 emit_return_into_block (true, pending
->src
);
998 pending
->flags
&= ~(EDGE_FALLTHRU
| EDGE_FAKE
);
999 *pdest_bb
= pending
->src
;
1001 else if (*pdest_bb
== NULL
)
1005 bb
= create_basic_block (NULL
, NULL
, exit_pred
);
1006 BB_COPY_PARTITION (bb
, e
->src
);
1007 rtx_insn
*ret
= targetm
.gen_simple_return ();
1008 rtx_jump_insn
*start
= emit_jump_insn_after (ret
, BB_END (bb
));
1009 JUMP_LABEL (start
) = simple_return_rtx
;
1010 emit_barrier_after (start
);
1013 make_edge (bb
, EXIT_BLOCK_PTR_FOR_FN (cfun
), 0);
1015 redirect_edge_and_branch_force (e
, *pdest_bb
);
1017 unconverted_simple_returns
.release ();
1020 if (entry_edge
!= orig_entry_edge
)
1022 FOR_EACH_EDGE (e
, ei
, EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
)
1023 if (EDGE_COUNT (e
->src
->preds
) != 0
1024 && (e
->flags
& EDGE_FAKE
) != 0
1025 && !bitmap_bit_p (&bb_flags
, e
->src
->index
))
1027 emit_return_into_block (true, e
->src
);
1028 e
->flags
&= ~(EDGE_FALLTHRU
| EDGE_FAKE
);