* gcc.dg/store-motion-fgcse-sm.c (dg-final): Cleanup
[official-gcc.git] / gcc / shrink-wrap.c
blobaae6643343941af8c1fb11d42db0b6b1c2aa7b51
1 /* Shrink-wrapping related optimizations.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This file handles shrink-wrapping related optimizations. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl-error.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "varasm.h"
30 #include "stringpool.h"
31 #include "flags.h"
32 #include "except.h"
33 #include "hashtab.h"
34 #include "hash-set.h"
35 #include "vec.h"
36 #include "machmode.h"
37 #include "hard-reg-set.h"
38 #include "input.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "insn-codes.h"
42 #include "optabs.h"
43 #include "libfuncs.h"
44 #include "regs.h"
45 #include "insn-config.h"
46 #include "recog.h"
47 #include "output.h"
48 #include "tm_p.h"
49 #include "langhooks.h"
50 #include "target.h"
51 #include "common/common-target.h"
52 #include "gimple-expr.h"
53 #include "gimplify.h"
54 #include "tree-pass.h"
55 #include "predict.h"
56 #include "dominance.h"
57 #include "cfg.h"
58 #include "cfgrtl.h"
59 #include "basic-block.h"
60 #include "df.h"
61 #include "params.h"
62 #include "bb-reorder.h"
63 #include "shrink-wrap.h"
64 #include "regcprop.h"
65 #include "rtl-iter.h"
67 #ifdef HAVE_simple_return
69 /* Return true if INSN requires the stack frame to be set up.
70 PROLOGUE_USED contains the hard registers used in the function
71 prologue. SET_UP_BY_PROLOGUE is the set of registers we expect the
72 prologue to set up for the function. */
73 bool
74 requires_stack_frame_p (rtx_insn *insn, HARD_REG_SET prologue_used,
75 HARD_REG_SET set_up_by_prologue)
77 df_ref def, use;
78 HARD_REG_SET hardregs;
79 unsigned regno;
81 if (CALL_P (insn))
82 return !SIBLING_CALL_P (insn);
84 /* We need a frame to get the unique CFA expected by the unwinder. */
85 if (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
86 return true;
88 CLEAR_HARD_REG_SET (hardregs);
89 FOR_EACH_INSN_DEF (def, insn)
91 rtx dreg = DF_REF_REG (def);
93 if (!REG_P (dreg))
94 continue;
96 add_to_hard_reg_set (&hardregs, GET_MODE (dreg),
97 REGNO (dreg));
99 if (hard_reg_set_intersect_p (hardregs, prologue_used))
100 return true;
101 AND_COMPL_HARD_REG_SET (hardregs, call_used_reg_set);
102 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
103 if (TEST_HARD_REG_BIT (hardregs, regno)
104 && df_regs_ever_live_p (regno))
105 return true;
107 FOR_EACH_INSN_USE (use, insn)
109 rtx reg = DF_REF_REG (use);
111 if (!REG_P (reg))
112 continue;
114 add_to_hard_reg_set (&hardregs, GET_MODE (reg),
115 REGNO (reg));
117 if (hard_reg_set_intersect_p (hardregs, set_up_by_prologue))
118 return true;
120 return false;
123 /* See whether there has a single live edge from BB, which dest uses
124 [REGNO, END_REGNO). Return the live edge if its dest bb has
125 one or two predecessors. Otherwise return NULL. */
127 static edge
128 live_edge_for_reg (basic_block bb, int regno, int end_regno)
130 edge e, live_edge;
131 edge_iterator ei;
132 bitmap live;
133 int i;
135 live_edge = NULL;
136 FOR_EACH_EDGE (e, ei, bb->succs)
138 live = df_get_live_in (e->dest);
139 for (i = regno; i < end_regno; i++)
140 if (REGNO_REG_SET_P (live, i))
142 if (live_edge && live_edge != e)
143 return NULL;
144 live_edge = e;
148 /* We can sometimes encounter dead code. Don't try to move it
149 into the exit block. */
150 if (!live_edge || live_edge->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
151 return NULL;
153 /* Reject targets of abnormal edges. This is needed for correctness
154 on ports like Alpha and MIPS, whose pic_offset_table_rtx can die on
155 exception edges even though it is generally treated as call-saved
156 for the majority of the compilation. Moving across abnormal edges
157 isn't going to be interesting for shrink-wrap usage anyway. */
158 if (live_edge->flags & EDGE_ABNORMAL)
159 return NULL;
161 /* When live_edge->dest->preds == 2, we can create a new block on
162 the edge to make it meet the requirement. */
163 if (EDGE_COUNT (live_edge->dest->preds) > 2)
164 return NULL;
166 return live_edge;
169 /* Try to move INSN from BB to a successor. Return true on success.
170 USES and DEFS are the set of registers that are used and defined
171 after INSN in BB. SPLIT_P indicates whether a live edge from BB
172 is splitted or not. */
174 static bool
175 move_insn_for_shrink_wrap (basic_block bb, rtx_insn *insn,
176 const HARD_REG_SET uses,
177 const HARD_REG_SET defs,
178 bool *split_p)
180 rtx set, src, dest;
181 bitmap live_out, live_in, bb_uses, bb_defs;
182 unsigned int i, dregno, end_dregno;
183 unsigned int sregno = FIRST_PSEUDO_REGISTER;
184 unsigned int end_sregno = FIRST_PSEUDO_REGISTER;
185 basic_block next_block;
186 edge live_edge;
188 /* Look for a simple register assignment. We don't use single_set here
189 because we can't deal with any CLOBBERs, USEs, or REG_UNUSED secondary
190 destinations. */
191 if (!INSN_P (insn))
192 return false;
193 set = PATTERN (insn);
194 if (GET_CODE (set) != SET)
195 return false;
196 src = SET_SRC (set);
197 dest = SET_DEST (set);
199 /* For the destination, we want only a register. Also disallow STACK
200 or FRAME related adjustments. They are likely part of the prologue,
201 so keep them in the entry block. */
202 if (!REG_P (dest)
203 || dest == stack_pointer_rtx
204 || dest == frame_pointer_rtx
205 || dest == hard_frame_pointer_rtx)
206 return false;
208 /* For the source, we want one of:
209 (1) A (non-overlapping) register
210 (2) A constant,
211 (3) An expression involving no more than one register.
213 That last point comes from the code following, which was originally
214 written to handle only register move operations, and still only handles
215 a single source register when checking for overlaps. Happily, the
216 same checks can be applied to expressions like (plus reg const). */
218 if (CONSTANT_P (src))
220 else if (!REG_P (src))
222 rtx src_inner = NULL_RTX;
224 if (can_throw_internal (insn))
225 return false;
227 subrtx_var_iterator::array_type array;
228 FOR_EACH_SUBRTX_VAR (iter, array, src, ALL)
230 rtx x = *iter;
231 switch (GET_RTX_CLASS (GET_CODE (x)))
233 case RTX_CONST_OBJ:
234 case RTX_COMPARE:
235 case RTX_COMM_COMPARE:
236 case RTX_BIN_ARITH:
237 case RTX_COMM_ARITH:
238 case RTX_UNARY:
239 case RTX_TERNARY:
240 /* Constant or expression. Continue. */
241 break;
243 case RTX_OBJ:
244 case RTX_EXTRA:
245 switch (GET_CODE (x))
247 case UNSPEC:
248 case SUBREG:
249 case STRICT_LOW_PART:
250 case PC:
251 /* Ok. Continue. */
252 break;
254 case REG:
255 /* Fail if we see a second inner register. */
256 if (src_inner != NULL)
257 return false;
258 src_inner = x;
259 break;
261 default:
262 return false;
264 break;
266 default:
267 return false;
271 if (src_inner != NULL)
272 src = src_inner;
275 /* Make sure that the source register isn't defined later in BB. */
276 if (REG_P (src))
278 sregno = REGNO (src);
279 end_sregno = END_REGNO (src);
280 if (overlaps_hard_reg_set_p (defs, GET_MODE (src), sregno))
281 return false;
284 /* Make sure that the destination register isn't referenced later in BB. */
285 dregno = REGNO (dest);
286 end_dregno = END_REGNO (dest);
287 if (overlaps_hard_reg_set_p (uses, GET_MODE (dest), dregno)
288 || overlaps_hard_reg_set_p (defs, GET_MODE (dest), dregno))
289 return false;
291 /* See whether there is a successor block to which we could move INSN. */
292 live_edge = live_edge_for_reg (bb, dregno, end_dregno);
293 if (!live_edge)
294 return false;
296 next_block = live_edge->dest;
297 /* Create a new basic block on the edge. */
298 if (EDGE_COUNT (next_block->preds) == 2)
300 /* split_edge for a block with only one successor is meaningless. */
301 if (EDGE_COUNT (bb->succs) == 1)
302 return false;
304 /* If DF_LIVE doesn't exist, i.e. at -O1, just give up. */
305 if (!df_live)
306 return false;
308 basic_block old_dest = live_edge->dest;
309 next_block = split_edge (live_edge);
311 /* We create a new basic block. Call df_grow_bb_info to make sure
312 all data structures are allocated. */
313 df_grow_bb_info (df_live);
315 bitmap_and (df_get_live_in (next_block), df_get_live_out (bb),
316 df_get_live_in (old_dest));
317 df_set_bb_dirty (next_block);
319 /* We should not split more than once for a function. */
320 if (*split_p)
321 return false;
323 *split_p = true;
326 /* At this point we are committed to moving INSN, but let's try to
327 move it as far as we can. */
330 live_out = df_get_live_out (bb);
331 live_in = df_get_live_in (next_block);
332 bb = next_block;
334 /* Check whether BB uses DEST or clobbers DEST. We need to add
335 INSN to BB if so. Either way, DEST is no longer live on entry,
336 except for any part that overlaps SRC (next loop). */
337 bb_uses = &DF_LR_BB_INFO (bb)->use;
338 bb_defs = &DF_LR_BB_INFO (bb)->def;
339 if (df_live)
341 for (i = dregno; i < end_dregno; i++)
343 if (*split_p
344 || REGNO_REG_SET_P (bb_uses, i)
345 || REGNO_REG_SET_P (bb_defs, i)
346 || REGNO_REG_SET_P (&DF_LIVE_BB_INFO (bb)->gen, i))
347 next_block = NULL;
348 CLEAR_REGNO_REG_SET (live_out, i);
349 CLEAR_REGNO_REG_SET (live_in, i);
352 /* Check whether BB clobbers SRC. We need to add INSN to BB if so.
353 Either way, SRC is now live on entry. */
354 for (i = sregno; i < end_sregno; i++)
356 if (*split_p
357 || REGNO_REG_SET_P (bb_defs, i)
358 || REGNO_REG_SET_P (&DF_LIVE_BB_INFO (bb)->gen, i))
359 next_block = NULL;
360 SET_REGNO_REG_SET (live_out, i);
361 SET_REGNO_REG_SET (live_in, i);
364 else
366 /* DF_LR_BB_INFO (bb)->def does not comprise the DF_REF_PARTIAL and
367 DF_REF_CONDITIONAL defs. So if DF_LIVE doesn't exist, i.e.
368 at -O1, just give up searching NEXT_BLOCK. */
369 next_block = NULL;
370 for (i = dregno; i < end_dregno; i++)
372 CLEAR_REGNO_REG_SET (live_out, i);
373 CLEAR_REGNO_REG_SET (live_in, i);
376 for (i = sregno; i < end_sregno; i++)
378 SET_REGNO_REG_SET (live_out, i);
379 SET_REGNO_REG_SET (live_in, i);
383 /* If we don't need to add the move to BB, look for a single
384 successor block. */
385 if (next_block)
387 live_edge = live_edge_for_reg (next_block, dregno, end_dregno);
388 if (!live_edge || EDGE_COUNT (live_edge->dest->preds) > 1)
389 break;
390 next_block = live_edge->dest;
393 while (next_block);
395 /* For the new created basic block, there is no dataflow info at all.
396 So skip the following dataflow update and check. */
397 if (!(*split_p))
399 /* BB now defines DEST. It only uses the parts of DEST that overlap SRC
400 (next loop). */
401 for (i = dregno; i < end_dregno; i++)
403 CLEAR_REGNO_REG_SET (bb_uses, i);
404 SET_REGNO_REG_SET (bb_defs, i);
407 /* BB now uses SRC. */
408 for (i = sregno; i < end_sregno; i++)
409 SET_REGNO_REG_SET (bb_uses, i);
412 emit_insn_after (PATTERN (insn), bb_note (bb));
413 delete_insn (insn);
414 return true;
417 /* Look for register copies in the first block of the function, and move
418 them down into successor blocks if the register is used only on one
419 path. This exposes more opportunities for shrink-wrapping. These
420 kinds of sets often occur when incoming argument registers are moved
421 to call-saved registers because their values are live across one or
422 more calls during the function. */
424 void
425 prepare_shrink_wrap (basic_block entry_block)
427 rtx_insn *insn, *curr;
428 rtx x;
429 HARD_REG_SET uses, defs;
430 df_ref def, use;
431 bool split_p = false;
433 if (JUMP_P (BB_END (entry_block)))
435 /* To have more shrink-wrapping opportunities, prepare_shrink_wrap tries
436 to sink the copies from parameter to callee saved register out of
437 entry block. copyprop_hardreg_forward_bb_without_debug_insn is called
438 to release some dependences. */
439 copyprop_hardreg_forward_bb_without_debug_insn (entry_block);
442 CLEAR_HARD_REG_SET (uses);
443 CLEAR_HARD_REG_SET (defs);
444 FOR_BB_INSNS_REVERSE_SAFE (entry_block, insn, curr)
445 if (NONDEBUG_INSN_P (insn)
446 && !move_insn_for_shrink_wrap (entry_block, insn, uses, defs,
447 &split_p))
449 /* Add all defined registers to DEFs. */
450 FOR_EACH_INSN_DEF (def, insn)
452 x = DF_REF_REG (def);
453 if (REG_P (x) && HARD_REGISTER_P (x))
454 SET_HARD_REG_BIT (defs, REGNO (x));
457 /* Add all used registers to USESs. */
458 FOR_EACH_INSN_USE (use, insn)
460 x = DF_REF_REG (use);
461 if (REG_P (x) && HARD_REGISTER_P (x))
462 SET_HARD_REG_BIT (uses, REGNO (x));
467 /* Create a copy of BB instructions and insert at BEFORE. Redirect
468 preds of BB to COPY_BB if they don't appear in NEED_PROLOGUE. */
469 void
470 dup_block_and_redirect (basic_block bb, basic_block copy_bb, rtx_insn *before,
471 bitmap_head *need_prologue)
473 edge_iterator ei;
474 edge e;
475 rtx_insn *insn = BB_END (bb);
477 /* We know BB has a single successor, so there is no need to copy a
478 simple jump at the end of BB. */
479 if (simplejump_p (insn))
480 insn = PREV_INSN (insn);
482 start_sequence ();
483 duplicate_insn_chain (BB_HEAD (bb), insn);
484 if (dump_file)
486 unsigned count = 0;
487 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
488 if (active_insn_p (insn))
489 ++count;
490 fprintf (dump_file, "Duplicating bb %d to bb %d, %u active insns.\n",
491 bb->index, copy_bb->index, count);
493 insn = get_insns ();
494 end_sequence ();
495 emit_insn_before (insn, before);
497 /* Redirect all the paths that need no prologue into copy_bb. */
498 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei));)
499 if (!bitmap_bit_p (need_prologue, e->src->index))
501 int freq = EDGE_FREQUENCY (e);
502 copy_bb->count += e->count;
503 copy_bb->frequency += EDGE_FREQUENCY (e);
504 e->dest->count -= e->count;
505 if (e->dest->count < 0)
506 e->dest->count = 0;
507 e->dest->frequency -= freq;
508 if (e->dest->frequency < 0)
509 e->dest->frequency = 0;
510 redirect_edge_and_branch_force (e, copy_bb);
511 continue;
513 else
514 ei_next (&ei);
518 /* Try to perform a kind of shrink-wrapping, making sure the
519 prologue/epilogue is emitted only around those parts of the
520 function that require it. */
522 void
523 try_shrink_wrapping (edge *entry_edge, edge orig_entry_edge,
524 bitmap_head *bb_flags, rtx_insn *prologue_seq)
526 edge e;
527 edge_iterator ei;
528 bool nonempty_prologue = false;
529 unsigned max_grow_size;
530 rtx_insn *seq;
532 for (seq = prologue_seq; seq; seq = NEXT_INSN (seq))
533 if (!NOTE_P (seq) || NOTE_KIND (seq) != NOTE_INSN_PROLOGUE_END)
535 nonempty_prologue = true;
536 break;
539 if (flag_shrink_wrap && HAVE_simple_return
540 && (targetm.profile_before_prologue () || !crtl->profile)
541 && nonempty_prologue && !crtl->calls_eh_return)
543 HARD_REG_SET prologue_clobbered, prologue_used, live_on_edge;
544 struct hard_reg_set_container set_up_by_prologue;
545 rtx_insn *p_insn;
546 vec<basic_block> vec;
547 basic_block bb;
548 bitmap_head bb_antic_flags;
549 bitmap_head bb_on_list;
550 bitmap_head bb_tail;
552 if (dump_file)
553 fprintf (dump_file, "Attempting shrink-wrapping optimization.\n");
555 /* Compute the registers set and used in the prologue. */
556 CLEAR_HARD_REG_SET (prologue_clobbered);
557 CLEAR_HARD_REG_SET (prologue_used);
558 for (p_insn = prologue_seq; p_insn; p_insn = NEXT_INSN (p_insn))
560 HARD_REG_SET this_used;
561 if (!NONDEBUG_INSN_P (p_insn))
562 continue;
564 CLEAR_HARD_REG_SET (this_used);
565 note_uses (&PATTERN (p_insn), record_hard_reg_uses,
566 &this_used);
567 AND_COMPL_HARD_REG_SET (this_used, prologue_clobbered);
568 IOR_HARD_REG_SET (prologue_used, this_used);
569 note_stores (PATTERN (p_insn), record_hard_reg_sets,
570 &prologue_clobbered);
573 prepare_shrink_wrap ((*entry_edge)->dest);
575 bitmap_initialize (&bb_antic_flags, &bitmap_default_obstack);
576 bitmap_initialize (&bb_on_list, &bitmap_default_obstack);
577 bitmap_initialize (&bb_tail, &bitmap_default_obstack);
579 /* Find the set of basic blocks that require a stack frame,
580 and blocks that are too big to be duplicated. */
582 vec.create (n_basic_blocks_for_fn (cfun));
584 CLEAR_HARD_REG_SET (set_up_by_prologue.set);
585 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode,
586 STACK_POINTER_REGNUM);
587 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode, ARG_POINTER_REGNUM);
588 if (frame_pointer_needed)
589 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode,
590 HARD_FRAME_POINTER_REGNUM);
591 if (pic_offset_table_rtx
592 && (unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
593 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode,
594 PIC_OFFSET_TABLE_REGNUM);
595 if (crtl->drap_reg)
596 add_to_hard_reg_set (&set_up_by_prologue.set,
597 GET_MODE (crtl->drap_reg),
598 REGNO (crtl->drap_reg));
599 if (targetm.set_up_by_prologue)
600 targetm.set_up_by_prologue (&set_up_by_prologue);
602 /* We don't use a different max size depending on
603 optimize_bb_for_speed_p because increasing shrink-wrapping
604 opportunities by duplicating tail blocks can actually result
605 in an overall decrease in code size. */
606 max_grow_size = get_uncond_jump_length ();
607 max_grow_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
609 FOR_EACH_BB_FN (bb, cfun)
611 rtx_insn *insn;
612 unsigned size = 0;
614 FOR_BB_INSNS (bb, insn)
615 if (NONDEBUG_INSN_P (insn))
617 if (requires_stack_frame_p (insn, prologue_used,
618 set_up_by_prologue.set))
620 if (bb == (*entry_edge)->dest)
621 goto fail_shrinkwrap;
622 bitmap_set_bit (bb_flags, bb->index);
623 vec.quick_push (bb);
624 break;
626 else if (size <= max_grow_size)
628 size += get_attr_min_length (insn);
629 if (size > max_grow_size)
630 bitmap_set_bit (&bb_on_list, bb->index);
635 /* Blocks that really need a prologue, or are too big for tails. */
636 bitmap_ior_into (&bb_on_list, bb_flags);
638 /* For every basic block that needs a prologue, mark all blocks
639 reachable from it, so as to ensure they are also seen as
640 requiring a prologue. */
641 while (!vec.is_empty ())
643 basic_block tmp_bb = vec.pop ();
645 FOR_EACH_EDGE (e, ei, tmp_bb->succs)
646 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
647 && bitmap_set_bit (bb_flags, e->dest->index))
648 vec.quick_push (e->dest);
651 /* Find the set of basic blocks that need no prologue, have a
652 single successor, can be duplicated, meet a max size
653 requirement, and go to the exit via like blocks. */
654 vec.quick_push (EXIT_BLOCK_PTR_FOR_FN (cfun));
655 while (!vec.is_empty ())
657 basic_block tmp_bb = vec.pop ();
659 FOR_EACH_EDGE (e, ei, tmp_bb->preds)
660 if (single_succ_p (e->src)
661 && !bitmap_bit_p (&bb_on_list, e->src->index)
662 && can_duplicate_block_p (e->src))
664 edge pe;
665 edge_iterator pei;
667 /* If there is predecessor of e->src which doesn't
668 need prologue and the edge is complex,
669 we might not be able to redirect the branch
670 to a copy of e->src. */
671 FOR_EACH_EDGE (pe, pei, e->src->preds)
672 if ((pe->flags & EDGE_COMPLEX) != 0
673 && !bitmap_bit_p (bb_flags, pe->src->index))
674 break;
675 if (pe == NULL && bitmap_set_bit (&bb_tail, e->src->index))
676 vec.quick_push (e->src);
680 /* Now walk backwards from every block that is marked as needing
681 a prologue to compute the bb_antic_flags bitmap. Exclude
682 tail blocks; They can be duplicated to be used on paths not
683 needing a prologue. */
684 bitmap_clear (&bb_on_list);
685 bitmap_and_compl (&bb_antic_flags, bb_flags, &bb_tail);
686 FOR_EACH_BB_FN (bb, cfun)
688 if (!bitmap_bit_p (&bb_antic_flags, bb->index))
689 continue;
690 FOR_EACH_EDGE (e, ei, bb->preds)
691 if (!bitmap_bit_p (&bb_antic_flags, e->src->index)
692 && bitmap_set_bit (&bb_on_list, e->src->index))
693 vec.quick_push (e->src);
695 while (!vec.is_empty ())
697 basic_block tmp_bb = vec.pop ();
698 bool all_set = true;
700 bitmap_clear_bit (&bb_on_list, tmp_bb->index);
701 FOR_EACH_EDGE (e, ei, tmp_bb->succs)
702 if (!bitmap_bit_p (&bb_antic_flags, e->dest->index))
704 all_set = false;
705 break;
708 if (all_set)
710 bitmap_set_bit (&bb_antic_flags, tmp_bb->index);
711 FOR_EACH_EDGE (e, ei, tmp_bb->preds)
712 if (!bitmap_bit_p (&bb_antic_flags, e->src->index)
713 && bitmap_set_bit (&bb_on_list, e->src->index))
714 vec.quick_push (e->src);
717 /* Find exactly one edge that leads to a block in ANTIC from
718 a block that isn't. */
719 if (!bitmap_bit_p (&bb_antic_flags, (*entry_edge)->dest->index))
720 FOR_EACH_BB_FN (bb, cfun)
722 if (!bitmap_bit_p (&bb_antic_flags, bb->index))
723 continue;
724 FOR_EACH_EDGE (e, ei, bb->preds)
725 if (!bitmap_bit_p (&bb_antic_flags, e->src->index))
727 if (*entry_edge != orig_entry_edge)
729 *entry_edge = orig_entry_edge;
730 if (dump_file)
731 fprintf (dump_file, "More than one candidate edge.\n");
732 goto fail_shrinkwrap;
734 if (dump_file)
735 fprintf (dump_file, "Found candidate edge for "
736 "shrink-wrapping, %d->%d.\n", e->src->index,
737 e->dest->index);
738 *entry_edge = e;
742 if (*entry_edge != orig_entry_edge)
744 /* Test whether the prologue is known to clobber any register
745 (other than FP or SP) which are live on the edge. */
746 CLEAR_HARD_REG_BIT (prologue_clobbered, STACK_POINTER_REGNUM);
747 if (frame_pointer_needed)
748 CLEAR_HARD_REG_BIT (prologue_clobbered, HARD_FRAME_POINTER_REGNUM);
749 REG_SET_TO_HARD_REG_SET (live_on_edge,
750 df_get_live_in ((*entry_edge)->dest));
751 if (hard_reg_set_intersect_p (live_on_edge, prologue_clobbered))
753 *entry_edge = orig_entry_edge;
754 if (dump_file)
755 fprintf (dump_file,
756 "Shrink-wrapping aborted due to clobber.\n");
759 if (*entry_edge != orig_entry_edge)
761 crtl->shrink_wrapped = true;
762 if (dump_file)
763 fprintf (dump_file, "Performing shrink-wrapping.\n");
765 /* Find tail blocks reachable from both blocks needing a
766 prologue and blocks not needing a prologue. */
767 if (!bitmap_empty_p (&bb_tail))
768 FOR_EACH_BB_FN (bb, cfun)
770 bool some_pro, some_no_pro;
771 if (!bitmap_bit_p (&bb_tail, bb->index))
772 continue;
773 some_pro = some_no_pro = false;
774 FOR_EACH_EDGE (e, ei, bb->preds)
776 if (bitmap_bit_p (bb_flags, e->src->index))
777 some_pro = true;
778 else
779 some_no_pro = true;
781 if (some_pro && some_no_pro)
782 vec.quick_push (bb);
783 else
784 bitmap_clear_bit (&bb_tail, bb->index);
786 /* Find the head of each tail. */
787 while (!vec.is_empty ())
789 basic_block tbb = vec.pop ();
791 if (!bitmap_bit_p (&bb_tail, tbb->index))
792 continue;
794 while (single_succ_p (tbb))
796 tbb = single_succ (tbb);
797 bitmap_clear_bit (&bb_tail, tbb->index);
800 /* Now duplicate the tails. */
801 if (!bitmap_empty_p (&bb_tail))
802 FOR_EACH_BB_REVERSE_FN (bb, cfun)
804 basic_block copy_bb, tbb;
805 rtx_insn *insert_point;
806 int eflags;
808 if (!bitmap_clear_bit (&bb_tail, bb->index))
809 continue;
811 /* Create a copy of BB, instructions and all, for
812 use on paths that don't need a prologue.
813 Ideal placement of the copy is on a fall-thru edge
814 or after a block that would jump to the copy. */
815 FOR_EACH_EDGE (e, ei, bb->preds)
816 if (!bitmap_bit_p (bb_flags, e->src->index)
817 && single_succ_p (e->src))
818 break;
819 if (e)
821 /* Make sure we insert after any barriers. */
822 rtx_insn *end = get_last_bb_insn (e->src);
823 copy_bb = create_basic_block (NEXT_INSN (end),
824 NULL_RTX, e->src);
825 BB_COPY_PARTITION (copy_bb, e->src);
827 else
829 /* Otherwise put the copy at the end of the function. */
830 copy_bb = create_basic_block (NULL_RTX, NULL_RTX,
831 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
832 BB_COPY_PARTITION (copy_bb, bb);
835 insert_point = emit_note_after (NOTE_INSN_DELETED,
836 BB_END (copy_bb));
837 emit_barrier_after (BB_END (copy_bb));
839 tbb = bb;
840 while (1)
842 dup_block_and_redirect (tbb, copy_bb, insert_point,
843 bb_flags);
844 tbb = single_succ (tbb);
845 if (tbb == EXIT_BLOCK_PTR_FOR_FN (cfun))
846 break;
847 e = split_block (copy_bb, PREV_INSN (insert_point));
848 copy_bb = e->dest;
851 /* Quiet verify_flow_info by (ab)using EDGE_FAKE.
852 We have yet to add a simple_return to the tails,
853 as we'd like to first convert_jumps_to_returns in
854 case the block is no longer used after that. */
855 eflags = EDGE_FAKE;
856 if (CALL_P (PREV_INSN (insert_point))
857 && SIBLING_CALL_P (PREV_INSN (insert_point)))
858 eflags = EDGE_SIBCALL | EDGE_ABNORMAL;
859 make_single_succ_edge (copy_bb, EXIT_BLOCK_PTR_FOR_FN (cfun),
860 eflags);
862 /* verify_flow_info doesn't like a note after a
863 sibling call. */
864 delete_insn (insert_point);
865 if (bitmap_empty_p (&bb_tail))
866 break;
870 fail_shrinkwrap:
871 bitmap_clear (&bb_tail);
872 bitmap_clear (&bb_antic_flags);
873 bitmap_clear (&bb_on_list);
874 vec.release ();
878 /* If we're allowed to generate a simple return instruction, then by
879 definition we don't need a full epilogue. If the last basic
880 block before the exit block does not contain active instructions,
881 examine its predecessors and try to emit (conditional) return
882 instructions. */
884 edge
885 get_unconverted_simple_return (edge exit_fallthru_edge, bitmap_head bb_flags,
886 vec<edge> *unconverted_simple_returns,
887 rtx_insn **returnjump)
889 if (optimize)
891 unsigned i, last;
893 /* convert_jumps_to_returns may add to preds of the exit block
894 (but won't remove). Stop at end of current preds. */
895 last = EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
896 for (i = 0; i < last; i++)
898 edge e = EDGE_I (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds, i);
899 if (LABEL_P (BB_HEAD (e->src))
900 && !bitmap_bit_p (&bb_flags, e->src->index)
901 && !active_insn_between (BB_HEAD (e->src), BB_END (e->src)))
902 *unconverted_simple_returns
903 = convert_jumps_to_returns (e->src, true,
904 *unconverted_simple_returns);
908 if (exit_fallthru_edge != NULL
909 && EDGE_COUNT (exit_fallthru_edge->src->preds) != 0
910 && !bitmap_bit_p (&bb_flags, exit_fallthru_edge->src->index))
912 basic_block last_bb;
914 last_bb = emit_return_for_exit (exit_fallthru_edge, true);
915 *returnjump = BB_END (last_bb);
916 exit_fallthru_edge = NULL;
918 return exit_fallthru_edge;
921 /* If there were branches to an empty LAST_BB which we tried to
922 convert to conditional simple_returns, but couldn't for some
923 reason, create a block to hold a simple_return insn and redirect
924 those remaining edges. */
926 void
927 convert_to_simple_return (edge entry_edge, edge orig_entry_edge,
928 bitmap_head bb_flags, rtx_insn *returnjump,
929 vec<edge> unconverted_simple_returns)
931 edge e;
932 edge_iterator ei;
934 if (!unconverted_simple_returns.is_empty ())
936 basic_block simple_return_block_hot = NULL;
937 basic_block simple_return_block_cold = NULL;
938 edge pending_edge_hot = NULL;
939 edge pending_edge_cold = NULL;
940 basic_block exit_pred;
941 int i;
943 gcc_assert (entry_edge != orig_entry_edge);
945 /* See if we can reuse the last insn that was emitted for the
946 epilogue. */
947 if (returnjump != NULL_RTX
948 && JUMP_LABEL (returnjump) == simple_return_rtx)
950 e = split_block (BLOCK_FOR_INSN (returnjump), PREV_INSN (returnjump));
951 if (BB_PARTITION (e->src) == BB_HOT_PARTITION)
952 simple_return_block_hot = e->dest;
953 else
954 simple_return_block_cold = e->dest;
957 /* Also check returns we might need to add to tail blocks. */
958 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
959 if (EDGE_COUNT (e->src->preds) != 0
960 && (e->flags & EDGE_FAKE) != 0
961 && !bitmap_bit_p (&bb_flags, e->src->index))
963 if (BB_PARTITION (e->src) == BB_HOT_PARTITION)
964 pending_edge_hot = e;
965 else
966 pending_edge_cold = e;
969 /* Save a pointer to the exit's predecessor BB for use in
970 inserting new BBs at the end of the function. Do this
971 after the call to split_block above which may split
972 the original exit pred. */
973 exit_pred = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
975 FOR_EACH_VEC_ELT (unconverted_simple_returns, i, e)
977 basic_block *pdest_bb;
978 edge pending;
980 if (BB_PARTITION (e->src) == BB_HOT_PARTITION)
982 pdest_bb = &simple_return_block_hot;
983 pending = pending_edge_hot;
985 else
987 pdest_bb = &simple_return_block_cold;
988 pending = pending_edge_cold;
991 if (*pdest_bb == NULL && pending != NULL)
993 emit_return_into_block (true, pending->src);
994 pending->flags &= ~(EDGE_FALLTHRU | EDGE_FAKE);
995 *pdest_bb = pending->src;
997 else if (*pdest_bb == NULL)
999 basic_block bb;
1000 rtx_insn *start;
1002 bb = create_basic_block (NULL, NULL, exit_pred);
1003 BB_COPY_PARTITION (bb, e->src);
1004 start = emit_jump_insn_after (gen_simple_return (),
1005 BB_END (bb));
1006 JUMP_LABEL (start) = simple_return_rtx;
1007 emit_barrier_after (start);
1009 *pdest_bb = bb;
1010 make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
1012 redirect_edge_and_branch_force (e, *pdest_bb);
1014 unconverted_simple_returns.release ();
1017 if (entry_edge != orig_entry_edge)
1019 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
1020 if (EDGE_COUNT (e->src->preds) != 0
1021 && (e->flags & EDGE_FAKE) != 0
1022 && !bitmap_bit_p (&bb_flags, e->src->index))
1024 emit_return_into_block (true, e->src);
1025 e->flags &= ~(EDGE_FALLTHRU | EDGE_FAKE);
1030 #endif