2014-10-29 Thomas Preud'homme <thomas.preudhomme@arm.com>
[official-gcc.git] / gcc / shrink-wrap.c
blobcfdcf86dd71fa1908e024352a100fd7105ddebfa
1 /* Shrink-wrapping related optimizations.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This file handles shrink-wrapping related optimizations. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl-error.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "varasm.h"
30 #include "stringpool.h"
31 #include "flags.h"
32 #include "except.h"
33 #include "hashtab.h"
34 #include "hash-set.h"
35 #include "vec.h"
36 #include "machmode.h"
37 #include "hard-reg-set.h"
38 #include "input.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "libfuncs.h"
43 #include "regs.h"
44 #include "insn-config.h"
45 #include "recog.h"
46 #include "output.h"
47 #include "tm_p.h"
48 #include "langhooks.h"
49 #include "target.h"
50 #include "common/common-target.h"
51 #include "gimple-expr.h"
52 #include "gimplify.h"
53 #include "tree-pass.h"
54 #include "predict.h"
55 #include "dominance.h"
56 #include "cfg.h"
57 #include "cfgrtl.h"
58 #include "basic-block.h"
59 #include "df.h"
60 #include "params.h"
61 #include "bb-reorder.h"
62 #include "shrink-wrap.h"
63 #include "regcprop.h"
64 #include "rtl-iter.h"
66 #ifdef HAVE_simple_return
68 /* Return true if INSN requires the stack frame to be set up.
69 PROLOGUE_USED contains the hard registers used in the function
70 prologue. SET_UP_BY_PROLOGUE is the set of registers we expect the
71 prologue to set up for the function. */
72 bool
73 requires_stack_frame_p (rtx_insn *insn, HARD_REG_SET prologue_used,
74 HARD_REG_SET set_up_by_prologue)
76 df_ref def, use;
77 HARD_REG_SET hardregs;
78 unsigned regno;
80 if (CALL_P (insn))
81 return !SIBLING_CALL_P (insn);
83 /* We need a frame to get the unique CFA expected by the unwinder. */
84 if (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
85 return true;
87 CLEAR_HARD_REG_SET (hardregs);
88 FOR_EACH_INSN_DEF (def, insn)
90 rtx dreg = DF_REF_REG (def);
92 if (!REG_P (dreg))
93 continue;
95 add_to_hard_reg_set (&hardregs, GET_MODE (dreg),
96 REGNO (dreg));
98 if (hard_reg_set_intersect_p (hardregs, prologue_used))
99 return true;
100 AND_COMPL_HARD_REG_SET (hardregs, call_used_reg_set);
101 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
102 if (TEST_HARD_REG_BIT (hardregs, regno)
103 && df_regs_ever_live_p (regno))
104 return true;
106 FOR_EACH_INSN_USE (use, insn)
108 rtx reg = DF_REF_REG (use);
110 if (!REG_P (reg))
111 continue;
113 add_to_hard_reg_set (&hardregs, GET_MODE (reg),
114 REGNO (reg));
116 if (hard_reg_set_intersect_p (hardregs, set_up_by_prologue))
117 return true;
119 return false;
122 /* See whether there has a single live edge from BB, which dest uses
123 [REGNO, END_REGNO). Return the live edge if its dest bb has
124 one or two predecessors. Otherwise return NULL. */
126 static edge
127 live_edge_for_reg (basic_block bb, int regno, int end_regno)
129 edge e, live_edge;
130 edge_iterator ei;
131 bitmap live;
132 int i;
134 live_edge = NULL;
135 FOR_EACH_EDGE (e, ei, bb->succs)
137 live = df_get_live_in (e->dest);
138 for (i = regno; i < end_regno; i++)
139 if (REGNO_REG_SET_P (live, i))
141 if (live_edge && live_edge != e)
142 return NULL;
143 live_edge = e;
147 /* We can sometimes encounter dead code. Don't try to move it
148 into the exit block. */
149 if (!live_edge || live_edge->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
150 return NULL;
152 /* Reject targets of abnormal edges. This is needed for correctness
153 on ports like Alpha and MIPS, whose pic_offset_table_rtx can die on
154 exception edges even though it is generally treated as call-saved
155 for the majority of the compilation. Moving across abnormal edges
156 isn't going to be interesting for shrink-wrap usage anyway. */
157 if (live_edge->flags & EDGE_ABNORMAL)
158 return NULL;
160 /* When live_edge->dest->preds == 2, we can create a new block on
161 the edge to make it meet the requirement. */
162 if (EDGE_COUNT (live_edge->dest->preds) > 2)
163 return NULL;
165 return live_edge;
168 /* Try to move INSN from BB to a successor. Return true on success.
169 USES and DEFS are the set of registers that are used and defined
170 after INSN in BB. SPLIT_P indicates whether a live edge from BB
171 is splitted or not. */
173 static bool
174 move_insn_for_shrink_wrap (basic_block bb, rtx_insn *insn,
175 const HARD_REG_SET uses,
176 const HARD_REG_SET defs,
177 bool *split_p)
179 rtx set, src, dest;
180 bitmap live_out, live_in, bb_uses, bb_defs;
181 unsigned int i, dregno, end_dregno;
182 unsigned int sregno = FIRST_PSEUDO_REGISTER;
183 unsigned int end_sregno = FIRST_PSEUDO_REGISTER;
184 basic_block next_block;
185 edge live_edge;
187 /* Look for a simple register assignment. We don't use single_set here
188 because we can't deal with any CLOBBERs, USEs, or REG_UNUSED secondary
189 destinations. */
190 if (!INSN_P (insn))
191 return false;
192 set = PATTERN (insn);
193 if (GET_CODE (set) != SET)
194 return false;
195 src = SET_SRC (set);
196 dest = SET_DEST (set);
198 /* For the destination, we want only a register. Also disallow STACK
199 or FRAME related adjustments. They are likely part of the prologue,
200 so keep them in the entry block. */
201 if (!REG_P (dest)
202 || dest == stack_pointer_rtx
203 || dest == frame_pointer_rtx
204 || dest == hard_frame_pointer_rtx)
205 return false;
207 /* For the source, we want one of:
208 (1) A (non-overlapping) register
209 (2) A constant,
210 (3) An expression involving no more than one register.
212 That last point comes from the code following, which was originally
213 written to handle only register move operations, and still only handles
214 a single source register when checking for overlaps. Happily, the
215 same checks can be applied to expressions like (plus reg const). */
217 if (CONSTANT_P (src))
219 else if (!REG_P (src))
221 rtx src_inner = NULL_RTX;
223 if (can_throw_internal (insn))
224 return false;
226 subrtx_var_iterator::array_type array;
227 FOR_EACH_SUBRTX_VAR (iter, array, src, ALL)
229 rtx x = *iter;
230 switch (GET_RTX_CLASS (GET_CODE (x)))
232 case RTX_CONST_OBJ:
233 case RTX_COMPARE:
234 case RTX_COMM_COMPARE:
235 case RTX_BIN_ARITH:
236 case RTX_COMM_ARITH:
237 case RTX_UNARY:
238 case RTX_TERNARY:
239 /* Constant or expression. Continue. */
240 break;
242 case RTX_OBJ:
243 case RTX_EXTRA:
244 switch (GET_CODE (x))
246 case UNSPEC:
247 case SUBREG:
248 case STRICT_LOW_PART:
249 case PC:
250 /* Ok. Continue. */
251 break;
253 case REG:
254 /* Fail if we see a second inner register. */
255 if (src_inner != NULL)
256 return false;
257 src_inner = x;
258 break;
260 default:
261 return false;
263 break;
265 default:
266 return false;
270 if (src_inner != NULL)
271 src = src_inner;
274 /* Make sure that the source register isn't defined later in BB. */
275 if (REG_P (src))
277 sregno = REGNO (src);
278 end_sregno = END_REGNO (src);
279 if (overlaps_hard_reg_set_p (defs, GET_MODE (src), sregno))
280 return false;
283 /* Make sure that the destination register isn't referenced later in BB. */
284 dregno = REGNO (dest);
285 end_dregno = END_REGNO (dest);
286 if (overlaps_hard_reg_set_p (uses, GET_MODE (dest), dregno)
287 || overlaps_hard_reg_set_p (defs, GET_MODE (dest), dregno))
288 return false;
290 /* See whether there is a successor block to which we could move INSN. */
291 live_edge = live_edge_for_reg (bb, dregno, end_dregno);
292 if (!live_edge)
293 return false;
295 next_block = live_edge->dest;
296 /* Create a new basic block on the edge. */
297 if (EDGE_COUNT (next_block->preds) == 2)
299 /* split_edge for a block with only one successor is meaningless. */
300 if (EDGE_COUNT (bb->succs) == 1)
301 return false;
303 /* If DF_LIVE doesn't exist, i.e. at -O1, just give up. */
304 if (!df_live)
305 return false;
307 basic_block old_dest = live_edge->dest;
308 next_block = split_edge (live_edge);
310 /* We create a new basic block. Call df_grow_bb_info to make sure
311 all data structures are allocated. */
312 df_grow_bb_info (df_live);
314 bitmap_and (df_get_live_in (next_block), df_get_live_out (bb),
315 df_get_live_in (old_dest));
316 df_set_bb_dirty (next_block);
318 /* We should not split more than once for a function. */
319 if (*split_p)
320 return false;
322 *split_p = true;
325 /* At this point we are committed to moving INSN, but let's try to
326 move it as far as we can. */
329 live_out = df_get_live_out (bb);
330 live_in = df_get_live_in (next_block);
331 bb = next_block;
333 /* Check whether BB uses DEST or clobbers DEST. We need to add
334 INSN to BB if so. Either way, DEST is no longer live on entry,
335 except for any part that overlaps SRC (next loop). */
336 bb_uses = &DF_LR_BB_INFO (bb)->use;
337 bb_defs = &DF_LR_BB_INFO (bb)->def;
338 if (df_live)
340 for (i = dregno; i < end_dregno; i++)
342 if (*split_p
343 || REGNO_REG_SET_P (bb_uses, i)
344 || REGNO_REG_SET_P (bb_defs, i)
345 || REGNO_REG_SET_P (&DF_LIVE_BB_INFO (bb)->gen, i))
346 next_block = NULL;
347 CLEAR_REGNO_REG_SET (live_out, i);
348 CLEAR_REGNO_REG_SET (live_in, i);
351 /* Check whether BB clobbers SRC. We need to add INSN to BB if so.
352 Either way, SRC is now live on entry. */
353 for (i = sregno; i < end_sregno; i++)
355 if (*split_p
356 || REGNO_REG_SET_P (bb_defs, i)
357 || REGNO_REG_SET_P (&DF_LIVE_BB_INFO (bb)->gen, i))
358 next_block = NULL;
359 SET_REGNO_REG_SET (live_out, i);
360 SET_REGNO_REG_SET (live_in, i);
363 else
365 /* DF_LR_BB_INFO (bb)->def does not comprise the DF_REF_PARTIAL and
366 DF_REF_CONDITIONAL defs. So if DF_LIVE doesn't exist, i.e.
367 at -O1, just give up searching NEXT_BLOCK. */
368 next_block = NULL;
369 for (i = dregno; i < end_dregno; i++)
371 CLEAR_REGNO_REG_SET (live_out, i);
372 CLEAR_REGNO_REG_SET (live_in, i);
375 for (i = sregno; i < end_sregno; i++)
377 SET_REGNO_REG_SET (live_out, i);
378 SET_REGNO_REG_SET (live_in, i);
382 /* If we don't need to add the move to BB, look for a single
383 successor block. */
384 if (next_block)
386 live_edge = live_edge_for_reg (next_block, dregno, end_dregno);
387 if (!live_edge || EDGE_COUNT (live_edge->dest->preds) > 1)
388 break;
389 next_block = live_edge->dest;
392 while (next_block);
394 /* For the new created basic block, there is no dataflow info at all.
395 So skip the following dataflow update and check. */
396 if (!(*split_p))
398 /* BB now defines DEST. It only uses the parts of DEST that overlap SRC
399 (next loop). */
400 for (i = dregno; i < end_dregno; i++)
402 CLEAR_REGNO_REG_SET (bb_uses, i);
403 SET_REGNO_REG_SET (bb_defs, i);
406 /* BB now uses SRC. */
407 for (i = sregno; i < end_sregno; i++)
408 SET_REGNO_REG_SET (bb_uses, i);
411 emit_insn_after (PATTERN (insn), bb_note (bb));
412 delete_insn (insn);
413 return true;
416 /* Look for register copies in the first block of the function, and move
417 them down into successor blocks if the register is used only on one
418 path. This exposes more opportunities for shrink-wrapping. These
419 kinds of sets often occur when incoming argument registers are moved
420 to call-saved registers because their values are live across one or
421 more calls during the function. */
423 void
424 prepare_shrink_wrap (basic_block entry_block)
426 rtx_insn *insn, *curr;
427 rtx x;
428 HARD_REG_SET uses, defs;
429 df_ref def, use;
430 bool split_p = false;
432 if (JUMP_P (BB_END (entry_block)))
434 /* To have more shrink-wrapping opportunities, prepare_shrink_wrap tries
435 to sink the copies from parameter to callee saved register out of
436 entry block. copyprop_hardreg_forward_bb_without_debug_insn is called
437 to release some dependences. */
438 copyprop_hardreg_forward_bb_without_debug_insn (entry_block);
441 CLEAR_HARD_REG_SET (uses);
442 CLEAR_HARD_REG_SET (defs);
443 FOR_BB_INSNS_REVERSE_SAFE (entry_block, insn, curr)
444 if (NONDEBUG_INSN_P (insn)
445 && !move_insn_for_shrink_wrap (entry_block, insn, uses, defs,
446 &split_p))
448 /* Add all defined registers to DEFs. */
449 FOR_EACH_INSN_DEF (def, insn)
451 x = DF_REF_REG (def);
452 if (REG_P (x) && HARD_REGISTER_P (x))
453 SET_HARD_REG_BIT (defs, REGNO (x));
456 /* Add all used registers to USESs. */
457 FOR_EACH_INSN_USE (use, insn)
459 x = DF_REF_REG (use);
460 if (REG_P (x) && HARD_REGISTER_P (x))
461 SET_HARD_REG_BIT (uses, REGNO (x));
466 /* Create a copy of BB instructions and insert at BEFORE. Redirect
467 preds of BB to COPY_BB if they don't appear in NEED_PROLOGUE. */
468 void
469 dup_block_and_redirect (basic_block bb, basic_block copy_bb, rtx_insn *before,
470 bitmap_head *need_prologue)
472 edge_iterator ei;
473 edge e;
474 rtx_insn *insn = BB_END (bb);
476 /* We know BB has a single successor, so there is no need to copy a
477 simple jump at the end of BB. */
478 if (simplejump_p (insn))
479 insn = PREV_INSN (insn);
481 start_sequence ();
482 duplicate_insn_chain (BB_HEAD (bb), insn);
483 if (dump_file)
485 unsigned count = 0;
486 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
487 if (active_insn_p (insn))
488 ++count;
489 fprintf (dump_file, "Duplicating bb %d to bb %d, %u active insns.\n",
490 bb->index, copy_bb->index, count);
492 insn = get_insns ();
493 end_sequence ();
494 emit_insn_before (insn, before);
496 /* Redirect all the paths that need no prologue into copy_bb. */
497 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei));)
498 if (!bitmap_bit_p (need_prologue, e->src->index))
500 int freq = EDGE_FREQUENCY (e);
501 copy_bb->count += e->count;
502 copy_bb->frequency += EDGE_FREQUENCY (e);
503 e->dest->count -= e->count;
504 if (e->dest->count < 0)
505 e->dest->count = 0;
506 e->dest->frequency -= freq;
507 if (e->dest->frequency < 0)
508 e->dest->frequency = 0;
509 redirect_edge_and_branch_force (e, copy_bb);
510 continue;
512 else
513 ei_next (&ei);
517 /* Try to perform a kind of shrink-wrapping, making sure the
518 prologue/epilogue is emitted only around those parts of the
519 function that require it. */
521 void
522 try_shrink_wrapping (edge *entry_edge, edge orig_entry_edge,
523 bitmap_head *bb_flags, rtx_insn *prologue_seq)
525 edge e;
526 edge_iterator ei;
527 bool nonempty_prologue = false;
528 unsigned max_grow_size;
529 rtx_insn *seq;
531 for (seq = prologue_seq; seq; seq = NEXT_INSN (seq))
532 if (!NOTE_P (seq) || NOTE_KIND (seq) != NOTE_INSN_PROLOGUE_END)
534 nonempty_prologue = true;
535 break;
538 if (flag_shrink_wrap && HAVE_simple_return
539 && (targetm.profile_before_prologue () || !crtl->profile)
540 && nonempty_prologue && !crtl->calls_eh_return)
542 HARD_REG_SET prologue_clobbered, prologue_used, live_on_edge;
543 struct hard_reg_set_container set_up_by_prologue;
544 rtx_insn *p_insn;
545 vec<basic_block> vec;
546 basic_block bb;
547 bitmap_head bb_antic_flags;
548 bitmap_head bb_on_list;
549 bitmap_head bb_tail;
551 if (dump_file)
552 fprintf (dump_file, "Attempting shrink-wrapping optimization.\n");
554 /* Compute the registers set and used in the prologue. */
555 CLEAR_HARD_REG_SET (prologue_clobbered);
556 CLEAR_HARD_REG_SET (prologue_used);
557 for (p_insn = prologue_seq; p_insn; p_insn = NEXT_INSN (p_insn))
559 HARD_REG_SET this_used;
560 if (!NONDEBUG_INSN_P (p_insn))
561 continue;
563 CLEAR_HARD_REG_SET (this_used);
564 note_uses (&PATTERN (p_insn), record_hard_reg_uses,
565 &this_used);
566 AND_COMPL_HARD_REG_SET (this_used, prologue_clobbered);
567 IOR_HARD_REG_SET (prologue_used, this_used);
568 note_stores (PATTERN (p_insn), record_hard_reg_sets,
569 &prologue_clobbered);
572 prepare_shrink_wrap ((*entry_edge)->dest);
574 bitmap_initialize (&bb_antic_flags, &bitmap_default_obstack);
575 bitmap_initialize (&bb_on_list, &bitmap_default_obstack);
576 bitmap_initialize (&bb_tail, &bitmap_default_obstack);
578 /* Find the set of basic blocks that require a stack frame,
579 and blocks that are too big to be duplicated. */
581 vec.create (n_basic_blocks_for_fn (cfun));
583 CLEAR_HARD_REG_SET (set_up_by_prologue.set);
584 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode,
585 STACK_POINTER_REGNUM);
586 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode, ARG_POINTER_REGNUM);
587 if (frame_pointer_needed)
588 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode,
589 HARD_FRAME_POINTER_REGNUM);
590 if (pic_offset_table_rtx
591 && (unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
592 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode,
593 PIC_OFFSET_TABLE_REGNUM);
594 if (crtl->drap_reg)
595 add_to_hard_reg_set (&set_up_by_prologue.set,
596 GET_MODE (crtl->drap_reg),
597 REGNO (crtl->drap_reg));
598 if (targetm.set_up_by_prologue)
599 targetm.set_up_by_prologue (&set_up_by_prologue);
601 /* We don't use a different max size depending on
602 optimize_bb_for_speed_p because increasing shrink-wrapping
603 opportunities by duplicating tail blocks can actually result
604 in an overall decrease in code size. */
605 max_grow_size = get_uncond_jump_length ();
606 max_grow_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
608 FOR_EACH_BB_FN (bb, cfun)
610 rtx_insn *insn;
611 unsigned size = 0;
613 FOR_BB_INSNS (bb, insn)
614 if (NONDEBUG_INSN_P (insn))
616 if (requires_stack_frame_p (insn, prologue_used,
617 set_up_by_prologue.set))
619 if (bb == (*entry_edge)->dest)
620 goto fail_shrinkwrap;
621 bitmap_set_bit (bb_flags, bb->index);
622 vec.quick_push (bb);
623 break;
625 else if (size <= max_grow_size)
627 size += get_attr_min_length (insn);
628 if (size > max_grow_size)
629 bitmap_set_bit (&bb_on_list, bb->index);
634 /* Blocks that really need a prologue, or are too big for tails. */
635 bitmap_ior_into (&bb_on_list, bb_flags);
637 /* For every basic block that needs a prologue, mark all blocks
638 reachable from it, so as to ensure they are also seen as
639 requiring a prologue. */
640 while (!vec.is_empty ())
642 basic_block tmp_bb = vec.pop ();
644 FOR_EACH_EDGE (e, ei, tmp_bb->succs)
645 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
646 && bitmap_set_bit (bb_flags, e->dest->index))
647 vec.quick_push (e->dest);
650 /* Find the set of basic blocks that need no prologue, have a
651 single successor, can be duplicated, meet a max size
652 requirement, and go to the exit via like blocks. */
653 vec.quick_push (EXIT_BLOCK_PTR_FOR_FN (cfun));
654 while (!vec.is_empty ())
656 basic_block tmp_bb = vec.pop ();
658 FOR_EACH_EDGE (e, ei, tmp_bb->preds)
659 if (single_succ_p (e->src)
660 && !bitmap_bit_p (&bb_on_list, e->src->index)
661 && can_duplicate_block_p (e->src))
663 edge pe;
664 edge_iterator pei;
666 /* If there is predecessor of e->src which doesn't
667 need prologue and the edge is complex,
668 we might not be able to redirect the branch
669 to a copy of e->src. */
670 FOR_EACH_EDGE (pe, pei, e->src->preds)
671 if ((pe->flags & EDGE_COMPLEX) != 0
672 && !bitmap_bit_p (bb_flags, pe->src->index))
673 break;
674 if (pe == NULL && bitmap_set_bit (&bb_tail, e->src->index))
675 vec.quick_push (e->src);
679 /* Now walk backwards from every block that is marked as needing
680 a prologue to compute the bb_antic_flags bitmap. Exclude
681 tail blocks; They can be duplicated to be used on paths not
682 needing a prologue. */
683 bitmap_clear (&bb_on_list);
684 bitmap_and_compl (&bb_antic_flags, bb_flags, &bb_tail);
685 FOR_EACH_BB_FN (bb, cfun)
687 if (!bitmap_bit_p (&bb_antic_flags, bb->index))
688 continue;
689 FOR_EACH_EDGE (e, ei, bb->preds)
690 if (!bitmap_bit_p (&bb_antic_flags, e->src->index)
691 && bitmap_set_bit (&bb_on_list, e->src->index))
692 vec.quick_push (e->src);
694 while (!vec.is_empty ())
696 basic_block tmp_bb = vec.pop ();
697 bool all_set = true;
699 bitmap_clear_bit (&bb_on_list, tmp_bb->index);
700 FOR_EACH_EDGE (e, ei, tmp_bb->succs)
701 if (!bitmap_bit_p (&bb_antic_flags, e->dest->index))
703 all_set = false;
704 break;
707 if (all_set)
709 bitmap_set_bit (&bb_antic_flags, tmp_bb->index);
710 FOR_EACH_EDGE (e, ei, tmp_bb->preds)
711 if (!bitmap_bit_p (&bb_antic_flags, e->src->index)
712 && bitmap_set_bit (&bb_on_list, e->src->index))
713 vec.quick_push (e->src);
716 /* Find exactly one edge that leads to a block in ANTIC from
717 a block that isn't. */
718 if (!bitmap_bit_p (&bb_antic_flags, (*entry_edge)->dest->index))
719 FOR_EACH_BB_FN (bb, cfun)
721 if (!bitmap_bit_p (&bb_antic_flags, bb->index))
722 continue;
723 FOR_EACH_EDGE (e, ei, bb->preds)
724 if (!bitmap_bit_p (&bb_antic_flags, e->src->index))
726 if (*entry_edge != orig_entry_edge)
728 *entry_edge = orig_entry_edge;
729 if (dump_file)
730 fprintf (dump_file, "More than one candidate edge.\n");
731 goto fail_shrinkwrap;
733 if (dump_file)
734 fprintf (dump_file, "Found candidate edge for "
735 "shrink-wrapping, %d->%d.\n", e->src->index,
736 e->dest->index);
737 *entry_edge = e;
741 if (*entry_edge != orig_entry_edge)
743 /* Test whether the prologue is known to clobber any register
744 (other than FP or SP) which are live on the edge. */
745 CLEAR_HARD_REG_BIT (prologue_clobbered, STACK_POINTER_REGNUM);
746 if (frame_pointer_needed)
747 CLEAR_HARD_REG_BIT (prologue_clobbered, HARD_FRAME_POINTER_REGNUM);
748 REG_SET_TO_HARD_REG_SET (live_on_edge,
749 df_get_live_in ((*entry_edge)->dest));
750 if (hard_reg_set_intersect_p (live_on_edge, prologue_clobbered))
752 *entry_edge = orig_entry_edge;
753 if (dump_file)
754 fprintf (dump_file,
755 "Shrink-wrapping aborted due to clobber.\n");
758 if (*entry_edge != orig_entry_edge)
760 crtl->shrink_wrapped = true;
761 if (dump_file)
762 fprintf (dump_file, "Performing shrink-wrapping.\n");
764 /* Find tail blocks reachable from both blocks needing a
765 prologue and blocks not needing a prologue. */
766 if (!bitmap_empty_p (&bb_tail))
767 FOR_EACH_BB_FN (bb, cfun)
769 bool some_pro, some_no_pro;
770 if (!bitmap_bit_p (&bb_tail, bb->index))
771 continue;
772 some_pro = some_no_pro = false;
773 FOR_EACH_EDGE (e, ei, bb->preds)
775 if (bitmap_bit_p (bb_flags, e->src->index))
776 some_pro = true;
777 else
778 some_no_pro = true;
780 if (some_pro && some_no_pro)
781 vec.quick_push (bb);
782 else
783 bitmap_clear_bit (&bb_tail, bb->index);
785 /* Find the head of each tail. */
786 while (!vec.is_empty ())
788 basic_block tbb = vec.pop ();
790 if (!bitmap_bit_p (&bb_tail, tbb->index))
791 continue;
793 while (single_succ_p (tbb))
795 tbb = single_succ (tbb);
796 bitmap_clear_bit (&bb_tail, tbb->index);
799 /* Now duplicate the tails. */
800 if (!bitmap_empty_p (&bb_tail))
801 FOR_EACH_BB_REVERSE_FN (bb, cfun)
803 basic_block copy_bb, tbb;
804 rtx_insn *insert_point;
805 int eflags;
807 if (!bitmap_clear_bit (&bb_tail, bb->index))
808 continue;
810 /* Create a copy of BB, instructions and all, for
811 use on paths that don't need a prologue.
812 Ideal placement of the copy is on a fall-thru edge
813 or after a block that would jump to the copy. */
814 FOR_EACH_EDGE (e, ei, bb->preds)
815 if (!bitmap_bit_p (bb_flags, e->src->index)
816 && single_succ_p (e->src))
817 break;
818 if (e)
820 /* Make sure we insert after any barriers. */
821 rtx_insn *end = get_last_bb_insn (e->src);
822 copy_bb = create_basic_block (NEXT_INSN (end),
823 NULL_RTX, e->src);
824 BB_COPY_PARTITION (copy_bb, e->src);
826 else
828 /* Otherwise put the copy at the end of the function. */
829 copy_bb = create_basic_block (NULL_RTX, NULL_RTX,
830 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
831 BB_COPY_PARTITION (copy_bb, bb);
834 insert_point = emit_note_after (NOTE_INSN_DELETED,
835 BB_END (copy_bb));
836 emit_barrier_after (BB_END (copy_bb));
838 tbb = bb;
839 while (1)
841 dup_block_and_redirect (tbb, copy_bb, insert_point,
842 bb_flags);
843 tbb = single_succ (tbb);
844 if (tbb == EXIT_BLOCK_PTR_FOR_FN (cfun))
845 break;
846 e = split_block (copy_bb, PREV_INSN (insert_point));
847 copy_bb = e->dest;
850 /* Quiet verify_flow_info by (ab)using EDGE_FAKE.
851 We have yet to add a simple_return to the tails,
852 as we'd like to first convert_jumps_to_returns in
853 case the block is no longer used after that. */
854 eflags = EDGE_FAKE;
855 if (CALL_P (PREV_INSN (insert_point))
856 && SIBLING_CALL_P (PREV_INSN (insert_point)))
857 eflags = EDGE_SIBCALL | EDGE_ABNORMAL;
858 make_single_succ_edge (copy_bb, EXIT_BLOCK_PTR_FOR_FN (cfun),
859 eflags);
861 /* verify_flow_info doesn't like a note after a
862 sibling call. */
863 delete_insn (insert_point);
864 if (bitmap_empty_p (&bb_tail))
865 break;
869 fail_shrinkwrap:
870 bitmap_clear (&bb_tail);
871 bitmap_clear (&bb_antic_flags);
872 bitmap_clear (&bb_on_list);
873 vec.release ();
877 /* If we're allowed to generate a simple return instruction, then by
878 definition we don't need a full epilogue. If the last basic
879 block before the exit block does not contain active instructions,
880 examine its predecessors and try to emit (conditional) return
881 instructions. */
883 edge
884 get_unconverted_simple_return (edge exit_fallthru_edge, bitmap_head bb_flags,
885 vec<edge> *unconverted_simple_returns,
886 rtx_insn **returnjump)
888 if (optimize)
890 unsigned i, last;
892 /* convert_jumps_to_returns may add to preds of the exit block
893 (but won't remove). Stop at end of current preds. */
894 last = EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
895 for (i = 0; i < last; i++)
897 edge e = EDGE_I (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds, i);
898 if (LABEL_P (BB_HEAD (e->src))
899 && !bitmap_bit_p (&bb_flags, e->src->index)
900 && !active_insn_between (BB_HEAD (e->src), BB_END (e->src)))
901 *unconverted_simple_returns
902 = convert_jumps_to_returns (e->src, true,
903 *unconverted_simple_returns);
907 if (exit_fallthru_edge != NULL
908 && EDGE_COUNT (exit_fallthru_edge->src->preds) != 0
909 && !bitmap_bit_p (&bb_flags, exit_fallthru_edge->src->index))
911 basic_block last_bb;
913 last_bb = emit_return_for_exit (exit_fallthru_edge, true);
914 *returnjump = BB_END (last_bb);
915 exit_fallthru_edge = NULL;
917 return exit_fallthru_edge;
920 /* If there were branches to an empty LAST_BB which we tried to
921 convert to conditional simple_returns, but couldn't for some
922 reason, create a block to hold a simple_return insn and redirect
923 those remaining edges. */
925 void
926 convert_to_simple_return (edge entry_edge, edge orig_entry_edge,
927 bitmap_head bb_flags, rtx_insn *returnjump,
928 vec<edge> unconverted_simple_returns)
930 edge e;
931 edge_iterator ei;
933 if (!unconverted_simple_returns.is_empty ())
935 basic_block simple_return_block_hot = NULL;
936 basic_block simple_return_block_cold = NULL;
937 edge pending_edge_hot = NULL;
938 edge pending_edge_cold = NULL;
939 basic_block exit_pred;
940 int i;
942 gcc_assert (entry_edge != orig_entry_edge);
944 /* See if we can reuse the last insn that was emitted for the
945 epilogue. */
946 if (returnjump != NULL_RTX
947 && JUMP_LABEL (returnjump) == simple_return_rtx)
949 e = split_block (BLOCK_FOR_INSN (returnjump), PREV_INSN (returnjump));
950 if (BB_PARTITION (e->src) == BB_HOT_PARTITION)
951 simple_return_block_hot = e->dest;
952 else
953 simple_return_block_cold = e->dest;
956 /* Also check returns we might need to add to tail blocks. */
957 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
958 if (EDGE_COUNT (e->src->preds) != 0
959 && (e->flags & EDGE_FAKE) != 0
960 && !bitmap_bit_p (&bb_flags, e->src->index))
962 if (BB_PARTITION (e->src) == BB_HOT_PARTITION)
963 pending_edge_hot = e;
964 else
965 pending_edge_cold = e;
968 /* Save a pointer to the exit's predecessor BB for use in
969 inserting new BBs at the end of the function. Do this
970 after the call to split_block above which may split
971 the original exit pred. */
972 exit_pred = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
974 FOR_EACH_VEC_ELT (unconverted_simple_returns, i, e)
976 basic_block *pdest_bb;
977 edge pending;
979 if (BB_PARTITION (e->src) == BB_HOT_PARTITION)
981 pdest_bb = &simple_return_block_hot;
982 pending = pending_edge_hot;
984 else
986 pdest_bb = &simple_return_block_cold;
987 pending = pending_edge_cold;
990 if (*pdest_bb == NULL && pending != NULL)
992 emit_return_into_block (true, pending->src);
993 pending->flags &= ~(EDGE_FALLTHRU | EDGE_FAKE);
994 *pdest_bb = pending->src;
996 else if (*pdest_bb == NULL)
998 basic_block bb;
999 rtx_insn *start;
1001 bb = create_basic_block (NULL, NULL, exit_pred);
1002 BB_COPY_PARTITION (bb, e->src);
1003 start = emit_jump_insn_after (gen_simple_return (),
1004 BB_END (bb));
1005 JUMP_LABEL (start) = simple_return_rtx;
1006 emit_barrier_after (start);
1008 *pdest_bb = bb;
1009 make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
1011 redirect_edge_and_branch_force (e, *pdest_bb);
1013 unconverted_simple_returns.release ();
1016 if (entry_edge != orig_entry_edge)
1018 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
1019 if (EDGE_COUNT (e->src->preds) != 0
1020 && (e->flags & EDGE_FAKE) != 0
1021 && !bitmap_bit_p (&bb_flags, e->src->index))
1023 emit_return_into_block (true, e->src);
1024 e->flags &= ~(EDGE_FALLTHRU | EDGE_FAKE);
1029 #endif