* gnat.dg/nan_max.adb: New test.
[official-gcc.git] / gcc / shrink-wrap.c
blob3392cdba0f145b979ef8f7e71b6c93e7d08139c9
1 /* Shrink-wrapping related optimizations.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This file handles shrink-wrapping related optimizations. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl-error.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "varasm.h"
30 #include "stringpool.h"
31 #include "flags.h"
32 #include "except.h"
33 #include "hashtab.h"
34 #include "hash-set.h"
35 #include "vec.h"
36 #include "machmode.h"
37 #include "hard-reg-set.h"
38 #include "input.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "insn-codes.h"
42 #include "optabs.h"
43 #include "libfuncs.h"
44 #include "regs.h"
45 #include "insn-config.h"
46 #include "recog.h"
47 #include "output.h"
48 #include "tm_p.h"
49 #include "langhooks.h"
50 #include "target.h"
51 #include "common/common-target.h"
52 #include "gimple-expr.h"
53 #include "gimplify.h"
54 #include "tree-pass.h"
55 #include "predict.h"
56 #include "dominance.h"
57 #include "cfg.h"
58 #include "cfgrtl.h"
59 #include "basic-block.h"
60 #include "df.h"
61 #include "params.h"
62 #include "bb-reorder.h"
63 #include "shrink-wrap.h"
64 #include "regcprop.h"
65 #include "rtl-iter.h"
67 #ifdef HAVE_simple_return
69 /* Return true if INSN requires the stack frame to be set up.
70 PROLOGUE_USED contains the hard registers used in the function
71 prologue. SET_UP_BY_PROLOGUE is the set of registers we expect the
72 prologue to set up for the function. */
73 bool
74 requires_stack_frame_p (rtx_insn *insn, HARD_REG_SET prologue_used,
75 HARD_REG_SET set_up_by_prologue)
77 df_ref def, use;
78 HARD_REG_SET hardregs;
79 unsigned regno;
81 if (CALL_P (insn))
82 return !SIBLING_CALL_P (insn);
84 /* We need a frame to get the unique CFA expected by the unwinder. */
85 if (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
86 return true;
88 CLEAR_HARD_REG_SET (hardregs);
89 FOR_EACH_INSN_DEF (def, insn)
91 rtx dreg = DF_REF_REG (def);
93 if (!REG_P (dreg))
94 continue;
96 add_to_hard_reg_set (&hardregs, GET_MODE (dreg),
97 REGNO (dreg));
99 if (hard_reg_set_intersect_p (hardregs, prologue_used))
100 return true;
101 AND_COMPL_HARD_REG_SET (hardregs, call_used_reg_set);
102 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
103 if (TEST_HARD_REG_BIT (hardregs, regno)
104 && df_regs_ever_live_p (regno))
105 return true;
107 FOR_EACH_INSN_USE (use, insn)
109 rtx reg = DF_REF_REG (use);
111 if (!REG_P (reg))
112 continue;
114 add_to_hard_reg_set (&hardregs, GET_MODE (reg),
115 REGNO (reg));
117 if (hard_reg_set_intersect_p (hardregs, set_up_by_prologue))
118 return true;
120 return false;
123 /* See whether there has a single live edge from BB, which dest uses
124 [REGNO, END_REGNO). Return the live edge if its dest bb has
125 one or two predecessors. Otherwise return NULL. */
127 static edge
128 live_edge_for_reg (basic_block bb, int regno, int end_regno)
130 edge e, live_edge;
131 edge_iterator ei;
132 bitmap live;
133 int i;
135 live_edge = NULL;
136 FOR_EACH_EDGE (e, ei, bb->succs)
138 live = df_get_live_in (e->dest);
139 for (i = regno; i < end_regno; i++)
140 if (REGNO_REG_SET_P (live, i))
142 if (live_edge && live_edge != e)
143 return NULL;
144 live_edge = e;
148 /* We can sometimes encounter dead code. Don't try to move it
149 into the exit block. */
150 if (!live_edge || live_edge->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
151 return NULL;
153 /* Reject targets of abnormal edges. This is needed for correctness
154 on ports like Alpha and MIPS, whose pic_offset_table_rtx can die on
155 exception edges even though it is generally treated as call-saved
156 for the majority of the compilation. Moving across abnormal edges
157 isn't going to be interesting for shrink-wrap usage anyway. */
158 if (live_edge->flags & EDGE_ABNORMAL)
159 return NULL;
161 /* When live_edge->dest->preds == 2, we can create a new block on
162 the edge to make it meet the requirement. */
163 if (EDGE_COUNT (live_edge->dest->preds) > 2)
164 return NULL;
166 return live_edge;
169 /* Try to move INSN from BB to a successor. Return true on success.
170 USES and DEFS are the set of registers that are used and defined
171 after INSN in BB. SPLIT_P indicates whether a live edge from BB
172 is splitted or not. */
174 static bool
175 move_insn_for_shrink_wrap (basic_block bb, rtx_insn *insn,
176 const HARD_REG_SET uses,
177 const HARD_REG_SET defs,
178 bool *split_p)
180 rtx set, src, dest;
181 bitmap live_out, live_in, bb_uses, bb_defs;
182 unsigned int i, dregno, end_dregno;
183 unsigned int sregno = FIRST_PSEUDO_REGISTER;
184 unsigned int end_sregno = FIRST_PSEUDO_REGISTER;
185 basic_block next_block;
186 edge live_edge;
188 /* Look for a simple register assignment. We don't use single_set here
189 because we can't deal with any CLOBBERs, USEs, or REG_UNUSED secondary
190 destinations. */
191 if (!INSN_P (insn))
192 return false;
193 set = PATTERN (insn);
194 if (GET_CODE (set) != SET)
195 return false;
196 src = SET_SRC (set);
197 dest = SET_DEST (set);
199 /* For the destination, we want only a register. Also disallow STACK
200 or FRAME related adjustments. They are likely part of the prologue,
201 so keep them in the entry block. */
202 if (!REG_P (dest)
203 || dest == stack_pointer_rtx
204 || dest == frame_pointer_rtx
205 || dest == hard_frame_pointer_rtx)
206 return false;
208 /* For the source, we want one of:
209 (1) A (non-overlapping) register
210 (2) A constant,
211 (3) An expression involving no more than one register.
213 That last point comes from the code following, which was originally
214 written to handle only register move operations, and still only handles
215 a single source register when checking for overlaps. Happily, the
216 same checks can be applied to expressions like (plus reg const). */
218 if (CONSTANT_P (src))
220 else if (!REG_P (src))
222 rtx src_inner = NULL_RTX;
224 if (can_throw_internal (insn))
225 return false;
227 subrtx_var_iterator::array_type array;
228 FOR_EACH_SUBRTX_VAR (iter, array, src, ALL)
230 rtx x = *iter;
231 switch (GET_RTX_CLASS (GET_CODE (x)))
233 case RTX_CONST_OBJ:
234 case RTX_COMPARE:
235 case RTX_COMM_COMPARE:
236 case RTX_BIN_ARITH:
237 case RTX_COMM_ARITH:
238 case RTX_UNARY:
239 case RTX_TERNARY:
240 /* Constant or expression. Continue. */
241 break;
243 case RTX_OBJ:
244 case RTX_EXTRA:
245 switch (GET_CODE (x))
247 case UNSPEC:
248 case SUBREG:
249 case STRICT_LOW_PART:
250 case PC:
251 case LO_SUM:
252 /* Ok. Continue. */
253 break;
255 case REG:
256 /* Fail if we see a second inner register. */
257 if (src_inner != NULL)
258 return false;
259 src_inner = x;
260 break;
262 default:
263 return false;
265 break;
267 default:
268 return false;
272 if (src_inner != NULL)
273 src = src_inner;
276 /* Make sure that the source register isn't defined later in BB. */
277 if (REG_P (src))
279 sregno = REGNO (src);
280 end_sregno = END_REGNO (src);
281 if (overlaps_hard_reg_set_p (defs, GET_MODE (src), sregno))
282 return false;
285 /* Make sure that the destination register isn't referenced later in BB. */
286 dregno = REGNO (dest);
287 end_dregno = END_REGNO (dest);
288 if (overlaps_hard_reg_set_p (uses, GET_MODE (dest), dregno)
289 || overlaps_hard_reg_set_p (defs, GET_MODE (dest), dregno))
290 return false;
292 /* See whether there is a successor block to which we could move INSN. */
293 live_edge = live_edge_for_reg (bb, dregno, end_dregno);
294 if (!live_edge)
295 return false;
297 next_block = live_edge->dest;
298 /* Create a new basic block on the edge. */
299 if (EDGE_COUNT (next_block->preds) == 2)
301 /* split_edge for a block with only one successor is meaningless. */
302 if (EDGE_COUNT (bb->succs) == 1)
303 return false;
305 /* If DF_LIVE doesn't exist, i.e. at -O1, just give up. */
306 if (!df_live)
307 return false;
309 basic_block old_dest = live_edge->dest;
310 next_block = split_edge (live_edge);
312 /* We create a new basic block. Call df_grow_bb_info to make sure
313 all data structures are allocated. */
314 df_grow_bb_info (df_live);
316 bitmap_and (df_get_live_in (next_block), df_get_live_out (bb),
317 df_get_live_in (old_dest));
318 df_set_bb_dirty (next_block);
320 /* We should not split more than once for a function. */
321 if (*split_p)
322 return false;
324 *split_p = true;
327 /* At this point we are committed to moving INSN, but let's try to
328 move it as far as we can. */
331 live_out = df_get_live_out (bb);
332 live_in = df_get_live_in (next_block);
333 bb = next_block;
335 /* Check whether BB uses DEST or clobbers DEST. We need to add
336 INSN to BB if so. Either way, DEST is no longer live on entry,
337 except for any part that overlaps SRC (next loop). */
338 bb_uses = &DF_LR_BB_INFO (bb)->use;
339 bb_defs = &DF_LR_BB_INFO (bb)->def;
340 if (df_live)
342 for (i = dregno; i < end_dregno; i++)
344 if (*split_p
345 || REGNO_REG_SET_P (bb_uses, i)
346 || REGNO_REG_SET_P (bb_defs, i)
347 || REGNO_REG_SET_P (&DF_LIVE_BB_INFO (bb)->gen, i))
348 next_block = NULL;
349 CLEAR_REGNO_REG_SET (live_out, i);
350 CLEAR_REGNO_REG_SET (live_in, i);
353 /* Check whether BB clobbers SRC. We need to add INSN to BB if so.
354 Either way, SRC is now live on entry. */
355 for (i = sregno; i < end_sregno; i++)
357 if (*split_p
358 || REGNO_REG_SET_P (bb_defs, i)
359 || REGNO_REG_SET_P (&DF_LIVE_BB_INFO (bb)->gen, i))
360 next_block = NULL;
361 SET_REGNO_REG_SET (live_out, i);
362 SET_REGNO_REG_SET (live_in, i);
365 else
367 /* DF_LR_BB_INFO (bb)->def does not comprise the DF_REF_PARTIAL and
368 DF_REF_CONDITIONAL defs. So if DF_LIVE doesn't exist, i.e.
369 at -O1, just give up searching NEXT_BLOCK. */
370 next_block = NULL;
371 for (i = dregno; i < end_dregno; i++)
373 CLEAR_REGNO_REG_SET (live_out, i);
374 CLEAR_REGNO_REG_SET (live_in, i);
377 for (i = sregno; i < end_sregno; i++)
379 SET_REGNO_REG_SET (live_out, i);
380 SET_REGNO_REG_SET (live_in, i);
384 /* If we don't need to add the move to BB, look for a single
385 successor block. */
386 if (next_block)
388 live_edge = live_edge_for_reg (next_block, dregno, end_dregno);
389 if (!live_edge || EDGE_COUNT (live_edge->dest->preds) > 1)
390 break;
391 next_block = live_edge->dest;
394 while (next_block);
396 /* For the new created basic block, there is no dataflow info at all.
397 So skip the following dataflow update and check. */
398 if (!(*split_p))
400 /* BB now defines DEST. It only uses the parts of DEST that overlap SRC
401 (next loop). */
402 for (i = dregno; i < end_dregno; i++)
404 CLEAR_REGNO_REG_SET (bb_uses, i);
405 SET_REGNO_REG_SET (bb_defs, i);
408 /* BB now uses SRC. */
409 for (i = sregno; i < end_sregno; i++)
410 SET_REGNO_REG_SET (bb_uses, i);
413 emit_insn_after (PATTERN (insn), bb_note (bb));
414 delete_insn (insn);
415 return true;
418 /* Look for register copies in the first block of the function, and move
419 them down into successor blocks if the register is used only on one
420 path. This exposes more opportunities for shrink-wrapping. These
421 kinds of sets often occur when incoming argument registers are moved
422 to call-saved registers because their values are live across one or
423 more calls during the function. */
425 void
426 prepare_shrink_wrap (basic_block entry_block)
428 rtx_insn *insn, *curr;
429 rtx x;
430 HARD_REG_SET uses, defs;
431 df_ref def, use;
432 bool split_p = false;
434 if (JUMP_P (BB_END (entry_block)))
436 /* To have more shrink-wrapping opportunities, prepare_shrink_wrap tries
437 to sink the copies from parameter to callee saved register out of
438 entry block. copyprop_hardreg_forward_bb_without_debug_insn is called
439 to release some dependences. */
440 copyprop_hardreg_forward_bb_without_debug_insn (entry_block);
443 CLEAR_HARD_REG_SET (uses);
444 CLEAR_HARD_REG_SET (defs);
445 FOR_BB_INSNS_REVERSE_SAFE (entry_block, insn, curr)
446 if (NONDEBUG_INSN_P (insn)
447 && !move_insn_for_shrink_wrap (entry_block, insn, uses, defs,
448 &split_p))
450 /* Add all defined registers to DEFs. */
451 FOR_EACH_INSN_DEF (def, insn)
453 x = DF_REF_REG (def);
454 if (REG_P (x) && HARD_REGISTER_P (x))
455 SET_HARD_REG_BIT (defs, REGNO (x));
458 /* Add all used registers to USESs. */
459 FOR_EACH_INSN_USE (use, insn)
461 x = DF_REF_REG (use);
462 if (REG_P (x) && HARD_REGISTER_P (x))
463 SET_HARD_REG_BIT (uses, REGNO (x));
468 /* Create a copy of BB instructions and insert at BEFORE. Redirect
469 preds of BB to COPY_BB if they don't appear in NEED_PROLOGUE. */
470 void
471 dup_block_and_redirect (basic_block bb, basic_block copy_bb, rtx_insn *before,
472 bitmap_head *need_prologue)
474 edge_iterator ei;
475 edge e;
476 rtx_insn *insn = BB_END (bb);
478 /* We know BB has a single successor, so there is no need to copy a
479 simple jump at the end of BB. */
480 if (simplejump_p (insn))
481 insn = PREV_INSN (insn);
483 start_sequence ();
484 duplicate_insn_chain (BB_HEAD (bb), insn);
485 if (dump_file)
487 unsigned count = 0;
488 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
489 if (active_insn_p (insn))
490 ++count;
491 fprintf (dump_file, "Duplicating bb %d to bb %d, %u active insns.\n",
492 bb->index, copy_bb->index, count);
494 insn = get_insns ();
495 end_sequence ();
496 emit_insn_before (insn, before);
498 /* Redirect all the paths that need no prologue into copy_bb. */
499 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei));)
500 if (!bitmap_bit_p (need_prologue, e->src->index))
502 int freq = EDGE_FREQUENCY (e);
503 copy_bb->count += e->count;
504 copy_bb->frequency += EDGE_FREQUENCY (e);
505 e->dest->count -= e->count;
506 if (e->dest->count < 0)
507 e->dest->count = 0;
508 e->dest->frequency -= freq;
509 if (e->dest->frequency < 0)
510 e->dest->frequency = 0;
511 redirect_edge_and_branch_force (e, copy_bb);
512 continue;
514 else
515 ei_next (&ei);
519 /* Try to perform a kind of shrink-wrapping, making sure the
520 prologue/epilogue is emitted only around those parts of the
521 function that require it. */
523 void
524 try_shrink_wrapping (edge *entry_edge, edge orig_entry_edge,
525 bitmap_head *bb_flags, rtx_insn *prologue_seq)
527 edge e;
528 edge_iterator ei;
529 bool nonempty_prologue = false;
530 unsigned max_grow_size;
531 rtx_insn *seq;
533 for (seq = prologue_seq; seq; seq = NEXT_INSN (seq))
534 if (!NOTE_P (seq) || NOTE_KIND (seq) != NOTE_INSN_PROLOGUE_END)
536 nonempty_prologue = true;
537 break;
540 if (flag_shrink_wrap && HAVE_simple_return
541 && (targetm.profile_before_prologue () || !crtl->profile)
542 && nonempty_prologue && !crtl->calls_eh_return)
544 HARD_REG_SET prologue_clobbered, prologue_used, live_on_edge;
545 struct hard_reg_set_container set_up_by_prologue;
546 rtx_insn *p_insn;
547 vec<basic_block> vec;
548 basic_block bb;
549 bitmap_head bb_antic_flags;
550 bitmap_head bb_on_list;
551 bitmap_head bb_tail;
553 if (dump_file)
554 fprintf (dump_file, "Attempting shrink-wrapping optimization.\n");
556 /* Compute the registers set and used in the prologue. */
557 CLEAR_HARD_REG_SET (prologue_clobbered);
558 CLEAR_HARD_REG_SET (prologue_used);
559 for (p_insn = prologue_seq; p_insn; p_insn = NEXT_INSN (p_insn))
561 HARD_REG_SET this_used;
562 if (!NONDEBUG_INSN_P (p_insn))
563 continue;
565 CLEAR_HARD_REG_SET (this_used);
566 note_uses (&PATTERN (p_insn), record_hard_reg_uses,
567 &this_used);
568 AND_COMPL_HARD_REG_SET (this_used, prologue_clobbered);
569 IOR_HARD_REG_SET (prologue_used, this_used);
570 note_stores (PATTERN (p_insn), record_hard_reg_sets,
571 &prologue_clobbered);
574 prepare_shrink_wrap ((*entry_edge)->dest);
576 bitmap_initialize (&bb_antic_flags, &bitmap_default_obstack);
577 bitmap_initialize (&bb_on_list, &bitmap_default_obstack);
578 bitmap_initialize (&bb_tail, &bitmap_default_obstack);
580 /* Find the set of basic blocks that require a stack frame,
581 and blocks that are too big to be duplicated. */
583 vec.create (n_basic_blocks_for_fn (cfun));
585 CLEAR_HARD_REG_SET (set_up_by_prologue.set);
586 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode,
587 STACK_POINTER_REGNUM);
588 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode, ARG_POINTER_REGNUM);
589 if (frame_pointer_needed)
590 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode,
591 HARD_FRAME_POINTER_REGNUM);
592 if (pic_offset_table_rtx
593 && (unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
594 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode,
595 PIC_OFFSET_TABLE_REGNUM);
596 if (crtl->drap_reg)
597 add_to_hard_reg_set (&set_up_by_prologue.set,
598 GET_MODE (crtl->drap_reg),
599 REGNO (crtl->drap_reg));
600 if (targetm.set_up_by_prologue)
601 targetm.set_up_by_prologue (&set_up_by_prologue);
603 /* We don't use a different max size depending on
604 optimize_bb_for_speed_p because increasing shrink-wrapping
605 opportunities by duplicating tail blocks can actually result
606 in an overall decrease in code size. */
607 max_grow_size = get_uncond_jump_length ();
608 max_grow_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
610 FOR_EACH_BB_FN (bb, cfun)
612 rtx_insn *insn;
613 unsigned size = 0;
615 FOR_BB_INSNS (bb, insn)
616 if (NONDEBUG_INSN_P (insn))
618 if (requires_stack_frame_p (insn, prologue_used,
619 set_up_by_prologue.set))
621 if (bb == (*entry_edge)->dest)
622 goto fail_shrinkwrap;
623 bitmap_set_bit (bb_flags, bb->index);
624 vec.quick_push (bb);
625 break;
627 else if (size <= max_grow_size)
629 size += get_attr_min_length (insn);
630 if (size > max_grow_size)
631 bitmap_set_bit (&bb_on_list, bb->index);
636 /* Blocks that really need a prologue, or are too big for tails. */
637 bitmap_ior_into (&bb_on_list, bb_flags);
639 /* For every basic block that needs a prologue, mark all blocks
640 reachable from it, so as to ensure they are also seen as
641 requiring a prologue. */
642 while (!vec.is_empty ())
644 basic_block tmp_bb = vec.pop ();
646 FOR_EACH_EDGE (e, ei, tmp_bb->succs)
647 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
648 && bitmap_set_bit (bb_flags, e->dest->index))
649 vec.quick_push (e->dest);
652 /* Find the set of basic blocks that need no prologue, have a
653 single successor, can be duplicated, meet a max size
654 requirement, and go to the exit via like blocks. */
655 vec.quick_push (EXIT_BLOCK_PTR_FOR_FN (cfun));
656 while (!vec.is_empty ())
658 basic_block tmp_bb = vec.pop ();
660 FOR_EACH_EDGE (e, ei, tmp_bb->preds)
661 if (single_succ_p (e->src)
662 && !bitmap_bit_p (&bb_on_list, e->src->index)
663 && can_duplicate_block_p (e->src))
665 edge pe;
666 edge_iterator pei;
668 /* If there is predecessor of e->src which doesn't
669 need prologue and the edge is complex,
670 we might not be able to redirect the branch
671 to a copy of e->src. */
672 FOR_EACH_EDGE (pe, pei, e->src->preds)
673 if ((pe->flags & EDGE_COMPLEX) != 0
674 && !bitmap_bit_p (bb_flags, pe->src->index))
675 break;
676 if (pe == NULL && bitmap_set_bit (&bb_tail, e->src->index))
677 vec.quick_push (e->src);
681 /* Now walk backwards from every block that is marked as needing
682 a prologue to compute the bb_antic_flags bitmap. Exclude
683 tail blocks; They can be duplicated to be used on paths not
684 needing a prologue. */
685 bitmap_clear (&bb_on_list);
686 bitmap_and_compl (&bb_antic_flags, bb_flags, &bb_tail);
687 FOR_EACH_BB_FN (bb, cfun)
689 if (!bitmap_bit_p (&bb_antic_flags, bb->index))
690 continue;
691 FOR_EACH_EDGE (e, ei, bb->preds)
692 if (!bitmap_bit_p (&bb_antic_flags, e->src->index)
693 && bitmap_set_bit (&bb_on_list, e->src->index))
694 vec.quick_push (e->src);
696 while (!vec.is_empty ())
698 basic_block tmp_bb = vec.pop ();
699 bool all_set = true;
701 bitmap_clear_bit (&bb_on_list, tmp_bb->index);
702 FOR_EACH_EDGE (e, ei, tmp_bb->succs)
703 if (!bitmap_bit_p (&bb_antic_flags, e->dest->index))
705 all_set = false;
706 break;
709 if (all_set)
711 bitmap_set_bit (&bb_antic_flags, tmp_bb->index);
712 FOR_EACH_EDGE (e, ei, tmp_bb->preds)
713 if (!bitmap_bit_p (&bb_antic_flags, e->src->index)
714 && bitmap_set_bit (&bb_on_list, e->src->index))
715 vec.quick_push (e->src);
718 /* Find exactly one edge that leads to a block in ANTIC from
719 a block that isn't. */
720 if (!bitmap_bit_p (&bb_antic_flags, (*entry_edge)->dest->index))
721 FOR_EACH_BB_FN (bb, cfun)
723 if (!bitmap_bit_p (&bb_antic_flags, bb->index))
724 continue;
725 FOR_EACH_EDGE (e, ei, bb->preds)
726 if (!bitmap_bit_p (&bb_antic_flags, e->src->index))
728 if (*entry_edge != orig_entry_edge)
730 *entry_edge = orig_entry_edge;
731 if (dump_file)
732 fprintf (dump_file, "More than one candidate edge.\n");
733 goto fail_shrinkwrap;
735 if (dump_file)
736 fprintf (dump_file, "Found candidate edge for "
737 "shrink-wrapping, %d->%d.\n", e->src->index,
738 e->dest->index);
739 *entry_edge = e;
743 if (*entry_edge != orig_entry_edge)
745 /* Test whether the prologue is known to clobber any register
746 (other than FP or SP) which are live on the edge. */
747 CLEAR_HARD_REG_BIT (prologue_clobbered, STACK_POINTER_REGNUM);
748 if (frame_pointer_needed)
749 CLEAR_HARD_REG_BIT (prologue_clobbered, HARD_FRAME_POINTER_REGNUM);
750 REG_SET_TO_HARD_REG_SET (live_on_edge,
751 df_get_live_in ((*entry_edge)->dest));
752 if (hard_reg_set_intersect_p (live_on_edge, prologue_clobbered))
754 *entry_edge = orig_entry_edge;
755 if (dump_file)
756 fprintf (dump_file,
757 "Shrink-wrapping aborted due to clobber.\n");
760 if (*entry_edge != orig_entry_edge)
762 crtl->shrink_wrapped = true;
763 if (dump_file)
764 fprintf (dump_file, "Performing shrink-wrapping.\n");
766 /* Find tail blocks reachable from both blocks needing a
767 prologue and blocks not needing a prologue. */
768 if (!bitmap_empty_p (&bb_tail))
769 FOR_EACH_BB_FN (bb, cfun)
771 bool some_pro, some_no_pro;
772 if (!bitmap_bit_p (&bb_tail, bb->index))
773 continue;
774 some_pro = some_no_pro = false;
775 FOR_EACH_EDGE (e, ei, bb->preds)
777 if (bitmap_bit_p (bb_flags, e->src->index))
778 some_pro = true;
779 else
780 some_no_pro = true;
782 if (some_pro && some_no_pro)
783 vec.quick_push (bb);
784 else
785 bitmap_clear_bit (&bb_tail, bb->index);
787 /* Find the head of each tail. */
788 while (!vec.is_empty ())
790 basic_block tbb = vec.pop ();
792 if (!bitmap_bit_p (&bb_tail, tbb->index))
793 continue;
795 while (single_succ_p (tbb))
797 tbb = single_succ (tbb);
798 bitmap_clear_bit (&bb_tail, tbb->index);
801 /* Now duplicate the tails. */
802 if (!bitmap_empty_p (&bb_tail))
803 FOR_EACH_BB_REVERSE_FN (bb, cfun)
805 basic_block copy_bb, tbb;
806 rtx_insn *insert_point;
807 int eflags;
809 if (!bitmap_clear_bit (&bb_tail, bb->index))
810 continue;
812 /* Create a copy of BB, instructions and all, for
813 use on paths that don't need a prologue.
814 Ideal placement of the copy is on a fall-thru edge
815 or after a block that would jump to the copy. */
816 FOR_EACH_EDGE (e, ei, bb->preds)
817 if (!bitmap_bit_p (bb_flags, e->src->index)
818 && single_succ_p (e->src))
819 break;
820 if (e)
822 /* Make sure we insert after any barriers. */
823 rtx_insn *end = get_last_bb_insn (e->src);
824 copy_bb = create_basic_block (NEXT_INSN (end),
825 NULL_RTX, e->src);
826 BB_COPY_PARTITION (copy_bb, e->src);
828 else
830 /* Otherwise put the copy at the end of the function. */
831 copy_bb = create_basic_block (NULL_RTX, NULL_RTX,
832 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
833 BB_COPY_PARTITION (copy_bb, bb);
836 insert_point = emit_note_after (NOTE_INSN_DELETED,
837 BB_END (copy_bb));
838 emit_barrier_after (BB_END (copy_bb));
840 tbb = bb;
841 while (1)
843 dup_block_and_redirect (tbb, copy_bb, insert_point,
844 bb_flags);
845 tbb = single_succ (tbb);
846 if (tbb == EXIT_BLOCK_PTR_FOR_FN (cfun))
847 break;
848 e = split_block (copy_bb, PREV_INSN (insert_point));
849 copy_bb = e->dest;
852 /* Quiet verify_flow_info by (ab)using EDGE_FAKE.
853 We have yet to add a simple_return to the tails,
854 as we'd like to first convert_jumps_to_returns in
855 case the block is no longer used after that. */
856 eflags = EDGE_FAKE;
857 if (CALL_P (PREV_INSN (insert_point))
858 && SIBLING_CALL_P (PREV_INSN (insert_point)))
859 eflags = EDGE_SIBCALL | EDGE_ABNORMAL;
860 make_single_succ_edge (copy_bb, EXIT_BLOCK_PTR_FOR_FN (cfun),
861 eflags);
863 /* verify_flow_info doesn't like a note after a
864 sibling call. */
865 delete_insn (insert_point);
866 if (bitmap_empty_p (&bb_tail))
867 break;
871 fail_shrinkwrap:
872 bitmap_clear (&bb_tail);
873 bitmap_clear (&bb_antic_flags);
874 bitmap_clear (&bb_on_list);
875 vec.release ();
879 /* If we're allowed to generate a simple return instruction, then by
880 definition we don't need a full epilogue. If the last basic
881 block before the exit block does not contain active instructions,
882 examine its predecessors and try to emit (conditional) return
883 instructions. */
885 edge
886 get_unconverted_simple_return (edge exit_fallthru_edge, bitmap_head bb_flags,
887 vec<edge> *unconverted_simple_returns,
888 rtx_insn **returnjump)
890 if (optimize)
892 unsigned i, last;
894 /* convert_jumps_to_returns may add to preds of the exit block
895 (but won't remove). Stop at end of current preds. */
896 last = EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
897 for (i = 0; i < last; i++)
899 edge e = EDGE_I (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds, i);
900 if (LABEL_P (BB_HEAD (e->src))
901 && !bitmap_bit_p (&bb_flags, e->src->index)
902 && !active_insn_between (BB_HEAD (e->src), BB_END (e->src)))
903 *unconverted_simple_returns
904 = convert_jumps_to_returns (e->src, true,
905 *unconverted_simple_returns);
909 if (exit_fallthru_edge != NULL
910 && EDGE_COUNT (exit_fallthru_edge->src->preds) != 0
911 && !bitmap_bit_p (&bb_flags, exit_fallthru_edge->src->index))
913 basic_block last_bb;
915 last_bb = emit_return_for_exit (exit_fallthru_edge, true);
916 *returnjump = BB_END (last_bb);
917 exit_fallthru_edge = NULL;
919 return exit_fallthru_edge;
922 /* If there were branches to an empty LAST_BB which we tried to
923 convert to conditional simple_returns, but couldn't for some
924 reason, create a block to hold a simple_return insn and redirect
925 those remaining edges. */
927 void
928 convert_to_simple_return (edge entry_edge, edge orig_entry_edge,
929 bitmap_head bb_flags, rtx_insn *returnjump,
930 vec<edge> unconverted_simple_returns)
932 edge e;
933 edge_iterator ei;
935 if (!unconverted_simple_returns.is_empty ())
937 basic_block simple_return_block_hot = NULL;
938 basic_block simple_return_block_cold = NULL;
939 edge pending_edge_hot = NULL;
940 edge pending_edge_cold = NULL;
941 basic_block exit_pred;
942 int i;
944 gcc_assert (entry_edge != orig_entry_edge);
946 /* See if we can reuse the last insn that was emitted for the
947 epilogue. */
948 if (returnjump != NULL_RTX
949 && JUMP_LABEL (returnjump) == simple_return_rtx)
951 e = split_block (BLOCK_FOR_INSN (returnjump), PREV_INSN (returnjump));
952 if (BB_PARTITION (e->src) == BB_HOT_PARTITION)
953 simple_return_block_hot = e->dest;
954 else
955 simple_return_block_cold = e->dest;
958 /* Also check returns we might need to add to tail blocks. */
959 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
960 if (EDGE_COUNT (e->src->preds) != 0
961 && (e->flags & EDGE_FAKE) != 0
962 && !bitmap_bit_p (&bb_flags, e->src->index))
964 if (BB_PARTITION (e->src) == BB_HOT_PARTITION)
965 pending_edge_hot = e;
966 else
967 pending_edge_cold = e;
970 /* Save a pointer to the exit's predecessor BB for use in
971 inserting new BBs at the end of the function. Do this
972 after the call to split_block above which may split
973 the original exit pred. */
974 exit_pred = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
976 FOR_EACH_VEC_ELT (unconverted_simple_returns, i, e)
978 basic_block *pdest_bb;
979 edge pending;
981 if (BB_PARTITION (e->src) == BB_HOT_PARTITION)
983 pdest_bb = &simple_return_block_hot;
984 pending = pending_edge_hot;
986 else
988 pdest_bb = &simple_return_block_cold;
989 pending = pending_edge_cold;
992 if (*pdest_bb == NULL && pending != NULL)
994 emit_return_into_block (true, pending->src);
995 pending->flags &= ~(EDGE_FALLTHRU | EDGE_FAKE);
996 *pdest_bb = pending->src;
998 else if (*pdest_bb == NULL)
1000 basic_block bb;
1001 rtx_insn *start;
1003 bb = create_basic_block (NULL, NULL, exit_pred);
1004 BB_COPY_PARTITION (bb, e->src);
1005 start = emit_jump_insn_after (gen_simple_return (),
1006 BB_END (bb));
1007 JUMP_LABEL (start) = simple_return_rtx;
1008 emit_barrier_after (start);
1010 *pdest_bb = bb;
1011 make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
1013 redirect_edge_and_branch_force (e, *pdest_bb);
1015 unconverted_simple_returns.release ();
1018 if (entry_edge != orig_entry_edge)
1020 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
1021 if (EDGE_COUNT (e->src->preds) != 0
1022 && (e->flags & EDGE_FAKE) != 0
1023 && !bitmap_bit_p (&bb_flags, e->src->index))
1025 emit_return_into_block (true, e->src);
1026 e->flags &= ~(EDGE_FALLTHRU | EDGE_FAKE);
1031 #endif