[PATCH] [PR testsuite/67959]Minor cleanup for ssa-thread-13.c
[official-gcc.git] / gcc / shrink-wrap.c
bloba99474d52f53c290dd2067470b8aaa269d25c0c5
1 /* Shrink-wrapping related optimizations.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This file handles shrink-wrapping related optimizations. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "cfghooks.h"
27 #include "tree.h"
28 #include "rtl.h"
29 #include "df.h"
30 #include "rtl-error.h"
31 #include "alias.h"
32 #include "fold-const.h"
33 #include "stor-layout.h"
34 #include "varasm.h"
35 #include "stringpool.h"
36 #include "flags.h"
37 #include "except.h"
38 #include "insn-config.h"
39 #include "expmed.h"
40 #include "dojump.h"
41 #include "explow.h"
42 #include "calls.h"
43 #include "emit-rtl.h"
44 #include "stmt.h"
45 #include "expr.h"
46 #include "insn-codes.h"
47 #include "optabs.h"
48 #include "libfuncs.h"
49 #include "regs.h"
50 #include "recog.h"
51 #include "output.h"
52 #include "tm_p.h"
53 #include "langhooks.h"
54 #include "target.h"
55 #include "common/common-target.h"
56 #include "gimple-expr.h"
57 #include "gimplify.h"
58 #include "tree-pass.h"
59 #include "cfgrtl.h"
60 #include "params.h"
61 #include "bb-reorder.h"
62 #include "shrink-wrap.h"
63 #include "regcprop.h"
64 #include "rtl-iter.h"
67 /* Return true if INSN requires the stack frame to be set up.
68 PROLOGUE_USED contains the hard registers used in the function
69 prologue. SET_UP_BY_PROLOGUE is the set of registers we expect the
70 prologue to set up for the function. */
71 bool
72 requires_stack_frame_p (rtx_insn *insn, HARD_REG_SET prologue_used,
73 HARD_REG_SET set_up_by_prologue)
75 df_ref def, use;
76 HARD_REG_SET hardregs;
77 unsigned regno;
79 if (CALL_P (insn))
80 return !SIBLING_CALL_P (insn);
82 /* We need a frame to get the unique CFA expected by the unwinder. */
83 if (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
84 return true;
86 CLEAR_HARD_REG_SET (hardregs);
87 FOR_EACH_INSN_DEF (def, insn)
89 rtx dreg = DF_REF_REG (def);
91 if (!REG_P (dreg))
92 continue;
94 add_to_hard_reg_set (&hardregs, GET_MODE (dreg), REGNO (dreg));
96 if (hard_reg_set_intersect_p (hardregs, prologue_used))
97 return true;
98 AND_COMPL_HARD_REG_SET (hardregs, call_used_reg_set);
99 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
100 if (TEST_HARD_REG_BIT (hardregs, regno)
101 && df_regs_ever_live_p (regno))
102 return true;
104 FOR_EACH_INSN_USE (use, insn)
106 rtx reg = DF_REF_REG (use);
108 if (!REG_P (reg))
109 continue;
111 add_to_hard_reg_set (&hardregs, GET_MODE (reg),
112 REGNO (reg));
114 if (hard_reg_set_intersect_p (hardregs, set_up_by_prologue))
115 return true;
117 return false;
120 /* See whether there has a single live edge from BB, which dest uses
121 [REGNO, END_REGNO). Return the live edge if its dest bb has
122 one or two predecessors. Otherwise return NULL. */
124 static edge
125 live_edge_for_reg (basic_block bb, int regno, int end_regno)
127 edge e, live_edge;
128 edge_iterator ei;
129 bitmap live;
130 int i;
132 live_edge = NULL;
133 FOR_EACH_EDGE (e, ei, bb->succs)
135 live = df_get_live_in (e->dest);
136 for (i = regno; i < end_regno; i++)
137 if (REGNO_REG_SET_P (live, i))
139 if (live_edge && live_edge != e)
140 return NULL;
141 live_edge = e;
145 /* We can sometimes encounter dead code. Don't try to move it
146 into the exit block. */
147 if (!live_edge || live_edge->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
148 return NULL;
150 /* Reject targets of abnormal edges. This is needed for correctness
151 on ports like Alpha and MIPS, whose pic_offset_table_rtx can die on
152 exception edges even though it is generally treated as call-saved
153 for the majority of the compilation. Moving across abnormal edges
154 isn't going to be interesting for shrink-wrap usage anyway. */
155 if (live_edge->flags & EDGE_ABNORMAL)
156 return NULL;
158 /* When live_edge->dest->preds == 2, we can create a new block on
159 the edge to make it meet the requirement. */
160 if (EDGE_COUNT (live_edge->dest->preds) > 2)
161 return NULL;
163 return live_edge;
166 /* Try to move INSN from BB to a successor. Return true on success.
167 USES and DEFS are the set of registers that are used and defined
168 after INSN in BB. SPLIT_P indicates whether a live edge from BB
169 is splitted or not. */
171 static bool
172 move_insn_for_shrink_wrap (basic_block bb, rtx_insn *insn,
173 const HARD_REG_SET uses,
174 const HARD_REG_SET defs,
175 bool *split_p)
177 rtx set, src, dest;
178 bitmap live_out, live_in, bb_uses, bb_defs;
179 unsigned int i, dregno, end_dregno;
180 unsigned int sregno = FIRST_PSEUDO_REGISTER;
181 unsigned int end_sregno = FIRST_PSEUDO_REGISTER;
182 basic_block next_block;
183 edge live_edge;
185 /* Look for a simple register assignment. We don't use single_set here
186 because we can't deal with any CLOBBERs, USEs, or REG_UNUSED secondary
187 destinations. */
188 if (!INSN_P (insn))
189 return false;
190 set = PATTERN (insn);
191 if (GET_CODE (set) != SET)
192 return false;
193 src = SET_SRC (set);
194 dest = SET_DEST (set);
196 /* For the destination, we want only a register. Also disallow STACK
197 or FRAME related adjustments. They are likely part of the prologue,
198 so keep them in the entry block. */
199 if (!REG_P (dest)
200 || dest == stack_pointer_rtx
201 || dest == frame_pointer_rtx
202 || dest == hard_frame_pointer_rtx)
203 return false;
205 /* For the source, we want one of:
206 (1) A (non-overlapping) register
207 (2) A constant,
208 (3) An expression involving no more than one register.
210 That last point comes from the code following, which was originally
211 written to handle only register move operations, and still only handles
212 a single source register when checking for overlaps. Happily, the
213 same checks can be applied to expressions like (plus reg const). */
215 if (CONSTANT_P (src))
217 else if (!REG_P (src))
219 rtx src_inner = NULL_RTX;
221 if (can_throw_internal (insn))
222 return false;
224 subrtx_var_iterator::array_type array;
225 FOR_EACH_SUBRTX_VAR (iter, array, src, ALL)
227 rtx x = *iter;
228 switch (GET_RTX_CLASS (GET_CODE (x)))
230 case RTX_CONST_OBJ:
231 case RTX_COMPARE:
232 case RTX_COMM_COMPARE:
233 case RTX_BIN_ARITH:
234 case RTX_COMM_ARITH:
235 case RTX_UNARY:
236 case RTX_TERNARY:
237 /* Constant or expression. Continue. */
238 break;
240 case RTX_OBJ:
241 case RTX_EXTRA:
242 switch (GET_CODE (x))
244 case UNSPEC:
245 case SUBREG:
246 case STRICT_LOW_PART:
247 case PC:
248 case LO_SUM:
249 /* Ok. Continue. */
250 break;
252 case REG:
253 /* Fail if we see a second inner register. */
254 if (src_inner != NULL)
255 return false;
256 src_inner = x;
257 break;
259 default:
260 return false;
262 break;
264 default:
265 return false;
269 if (src_inner != NULL)
270 src = src_inner;
273 /* Make sure that the source register isn't defined later in BB. */
274 if (REG_P (src))
276 sregno = REGNO (src);
277 end_sregno = END_REGNO (src);
278 if (overlaps_hard_reg_set_p (defs, GET_MODE (src), sregno))
279 return false;
282 /* Make sure that the destination register isn't referenced later in BB. */
283 dregno = REGNO (dest);
284 end_dregno = END_REGNO (dest);
285 if (overlaps_hard_reg_set_p (uses, GET_MODE (dest), dregno)
286 || overlaps_hard_reg_set_p (defs, GET_MODE (dest), dregno))
287 return false;
289 /* See whether there is a successor block to which we could move INSN. */
290 live_edge = live_edge_for_reg (bb, dregno, end_dregno);
291 if (!live_edge)
292 return false;
294 next_block = live_edge->dest;
295 /* Create a new basic block on the edge. */
296 if (EDGE_COUNT (next_block->preds) == 2)
298 /* split_edge for a block with only one successor is meaningless. */
299 if (EDGE_COUNT (bb->succs) == 1)
300 return false;
302 /* If DF_LIVE doesn't exist, i.e. at -O1, just give up. */
303 if (!df_live)
304 return false;
306 basic_block old_dest = live_edge->dest;
307 next_block = split_edge (live_edge);
309 /* We create a new basic block. Call df_grow_bb_info to make sure
310 all data structures are allocated. */
311 df_grow_bb_info (df_live);
313 bitmap_and (df_get_live_in (next_block), df_get_live_out (bb),
314 df_get_live_in (old_dest));
315 df_set_bb_dirty (next_block);
317 /* We should not split more than once for a function. */
318 if (*split_p)
319 return false;
321 *split_p = true;
324 /* At this point we are committed to moving INSN, but let's try to
325 move it as far as we can. */
328 live_out = df_get_live_out (bb);
329 live_in = df_get_live_in (next_block);
330 bb = next_block;
332 /* Check whether BB uses DEST or clobbers DEST. We need to add
333 INSN to BB if so. Either way, DEST is no longer live on entry,
334 except for any part that overlaps SRC (next loop). */
335 bb_uses = &DF_LR_BB_INFO (bb)->use;
336 bb_defs = &DF_LR_BB_INFO (bb)->def;
337 if (df_live)
339 for (i = dregno; i < end_dregno; i++)
341 if (*split_p
342 || REGNO_REG_SET_P (bb_uses, i)
343 || REGNO_REG_SET_P (bb_defs, i)
344 || REGNO_REG_SET_P (&DF_LIVE_BB_INFO (bb)->gen, i))
345 next_block = NULL;
346 CLEAR_REGNO_REG_SET (live_out, i);
347 CLEAR_REGNO_REG_SET (live_in, i);
350 /* Check whether BB clobbers SRC. We need to add INSN to BB if so.
351 Either way, SRC is now live on entry. */
352 for (i = sregno; i < end_sregno; i++)
354 if (*split_p
355 || REGNO_REG_SET_P (bb_defs, i)
356 || REGNO_REG_SET_P (&DF_LIVE_BB_INFO (bb)->gen, i))
357 next_block = NULL;
358 SET_REGNO_REG_SET (live_out, i);
359 SET_REGNO_REG_SET (live_in, i);
362 else
364 /* DF_LR_BB_INFO (bb)->def does not comprise the DF_REF_PARTIAL and
365 DF_REF_CONDITIONAL defs. So if DF_LIVE doesn't exist, i.e.
366 at -O1, just give up searching NEXT_BLOCK. */
367 next_block = NULL;
368 for (i = dregno; i < end_dregno; i++)
370 CLEAR_REGNO_REG_SET (live_out, i);
371 CLEAR_REGNO_REG_SET (live_in, i);
374 for (i = sregno; i < end_sregno; i++)
376 SET_REGNO_REG_SET (live_out, i);
377 SET_REGNO_REG_SET (live_in, i);
381 /* If we don't need to add the move to BB, look for a single
382 successor block. */
383 if (next_block)
385 live_edge = live_edge_for_reg (next_block, dregno, end_dregno);
386 if (!live_edge || EDGE_COUNT (live_edge->dest->preds) > 1)
387 break;
388 next_block = live_edge->dest;
391 while (next_block);
393 /* For the new created basic block, there is no dataflow info at all.
394 So skip the following dataflow update and check. */
395 if (!(*split_p))
397 /* BB now defines DEST. It only uses the parts of DEST that overlap SRC
398 (next loop). */
399 for (i = dregno; i < end_dregno; i++)
401 CLEAR_REGNO_REG_SET (bb_uses, i);
402 SET_REGNO_REG_SET (bb_defs, i);
405 /* BB now uses SRC. */
406 for (i = sregno; i < end_sregno; i++)
407 SET_REGNO_REG_SET (bb_uses, i);
410 emit_insn_after (PATTERN (insn), bb_note (bb));
411 delete_insn (insn);
412 return true;
415 /* Look for register copies in the first block of the function, and move
416 them down into successor blocks if the register is used only on one
417 path. This exposes more opportunities for shrink-wrapping. These
418 kinds of sets often occur when incoming argument registers are moved
419 to call-saved registers because their values are live across one or
420 more calls during the function. */
422 static void
423 prepare_shrink_wrap (basic_block entry_block)
425 rtx_insn *insn, *curr;
426 rtx x;
427 HARD_REG_SET uses, defs;
428 df_ref def, use;
429 bool split_p = false;
431 if (JUMP_P (BB_END (entry_block)))
433 /* To have more shrink-wrapping opportunities, prepare_shrink_wrap tries
434 to sink the copies from parameter to callee saved register out of
435 entry block. copyprop_hardreg_forward_bb_without_debug_insn is called
436 to release some dependences. */
437 copyprop_hardreg_forward_bb_without_debug_insn (entry_block);
440 CLEAR_HARD_REG_SET (uses);
441 CLEAR_HARD_REG_SET (defs);
442 FOR_BB_INSNS_REVERSE_SAFE (entry_block, insn, curr)
443 if (NONDEBUG_INSN_P (insn)
444 && !move_insn_for_shrink_wrap (entry_block, insn, uses, defs,
445 &split_p))
447 /* Add all defined registers to DEFs. */
448 FOR_EACH_INSN_DEF (def, insn)
450 x = DF_REF_REG (def);
451 if (REG_P (x) && HARD_REGISTER_P (x))
452 SET_HARD_REG_BIT (defs, REGNO (x));
455 /* Add all used registers to USESs. */
456 FOR_EACH_INSN_USE (use, insn)
458 x = DF_REF_REG (use);
459 if (REG_P (x) && HARD_REGISTER_P (x))
460 SET_HARD_REG_BIT (uses, REGNO (x));
465 /* Return whether basic block PRO can get the prologue. It can not if it
466 has incoming complex edges that need a prologue inserted (we make a new
467 block for the prologue, so those edges would need to be redirected, which
468 does not work). It also can not if there exist registers live on entry
469 to PRO that are clobbered by the prologue. */
471 static bool
472 can_get_prologue (basic_block pro, HARD_REG_SET prologue_clobbered)
474 edge e;
475 edge_iterator ei;
476 FOR_EACH_EDGE (e, ei, pro->preds)
477 if (e->flags & (EDGE_COMPLEX | EDGE_CROSSING)
478 && !dominated_by_p (CDI_DOMINATORS, e->src, pro))
479 return false;
481 HARD_REG_SET live;
482 REG_SET_TO_HARD_REG_SET (live, df_get_live_in (pro));
483 if (hard_reg_set_intersect_p (live, prologue_clobbered))
484 return false;
486 return true;
489 /* Return whether we can duplicate basic block BB for shrink wrapping. We
490 cannot if the block cannot be duplicated at all, or if any of its incoming
491 edges are complex and come from a block that does not require a prologue
492 (we cannot redirect such edges), or if the block is too big to copy.
493 PRO is the basic block before which we would put the prologue, MAX_SIZE is
494 the maximum size block we allow to be copied. */
496 static bool
497 can_dup_for_shrink_wrapping (basic_block bb, basic_block pro, unsigned max_size)
499 if (!can_duplicate_block_p (bb))
500 return false;
502 edge e;
503 edge_iterator ei;
504 FOR_EACH_EDGE (e, ei, bb->preds)
505 if (e->flags & (EDGE_COMPLEX | EDGE_CROSSING)
506 && !dominated_by_p (CDI_DOMINATORS, e->src, pro))
507 return false;
509 unsigned size = 0;
511 rtx_insn *insn;
512 FOR_BB_INSNS (bb, insn)
513 if (NONDEBUG_INSN_P (insn))
515 size += get_attr_min_length (insn);
516 if (size > max_size)
517 return false;
520 return true;
523 /* If the source of edge E has more than one successor, the verifier for
524 branch probabilities gets confused by the fake edges we make where
525 simple_return statements will be inserted later (because those are not
526 marked as fallthrough edges). Fix this by creating an extra block just
527 for that fallthrough. */
529 static edge
530 fix_fake_fallthrough_edge (edge e)
532 if (EDGE_COUNT (e->src->succs) <= 1)
533 return e;
535 basic_block old_bb = e->src;
536 rtx_insn *end = BB_END (old_bb);
537 rtx_note *note = emit_note_after (NOTE_INSN_DELETED, end);
538 basic_block new_bb = create_basic_block (note, note, old_bb);
539 BB_COPY_PARTITION (new_bb, old_bb);
540 BB_END (old_bb) = end;
542 redirect_edge_succ (e, new_bb);
543 e->flags |= EDGE_FALLTHRU;
544 e->flags &= ~EDGE_FAKE;
546 return make_edge (new_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
549 /* Try to perform a kind of shrink-wrapping, making sure the
550 prologue/epilogue is emitted only around those parts of the
551 function that require it.
553 There will be exactly one prologue, and it will be executed either
554 zero or one time, on any path. Depending on where the prologue is
555 placed, some of the basic blocks can be reached via both paths with
556 and without a prologue. Such blocks will be duplicated here, and the
557 edges changed to match.
559 Paths that go to the exit without going through the prologue will use
560 a simple_return instead of the epilogue. We maximize the number of
561 those, making sure to only duplicate blocks that can be duplicated.
562 If the prologue can then still be placed in multiple locations, we
563 place it as early as possible.
565 An example, where we duplicate blocks with control flow (legend:
566 _B_egin, _R_eturn and _S_imple_return; edges without arrowhead should
567 be taken to point down or to the right, to simplify the diagram; here,
568 block 3 needs a prologue, the rest does not):
574 |\ |\
575 | 3 becomes | 3
576 |/ | \
577 4 7 4
578 |\ |\ |\
579 | 5 | 8 | 5
580 |/ |/ |/
581 6 9 6
582 | | |
583 R S R
586 (bb 4 is duplicated to 7, and so on; the prologue is inserted on the
587 edge 2->3).
589 Another example, where part of a loop is duplicated (again, bb 3 is
590 the only block that needs a prologue):
593 B 3<-- B ->3<--
594 | | | | | | |
595 | v | becomes | | v |
596 2---4--- 2---5-- 4---
597 | | |
598 R S R
601 (bb 4 is duplicated to 5; the prologue is inserted on the edge 5->3).
603 ENTRY_EDGE is the edge where the prologue will be placed, possibly
604 changed by this function. BB_WITH is a bitmap that, if we do shrink-
605 wrap, will on return contain the interesting blocks that run with
606 prologue. PROLOGUE_SEQ is the prologue we will insert. */
608 void
609 try_shrink_wrapping (edge *entry_edge, bitmap_head *bb_with,
610 rtx_insn *prologue_seq)
612 /* If we cannot shrink-wrap, are told not to shrink-wrap, or it makes
613 no sense to shrink-wrap: then do not shrink-wrap! */
615 if (!SHRINK_WRAPPING_ENABLED)
616 return;
618 if (crtl->profile && !targetm.profile_before_prologue ())
619 return;
621 if (crtl->calls_eh_return)
622 return;
624 bool empty_prologue = true;
625 for (rtx_insn *insn = prologue_seq; insn; insn = NEXT_INSN (insn))
626 if (!(NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_PROLOGUE_END))
628 empty_prologue = false;
629 break;
631 if (empty_prologue)
632 return;
634 /* Move some code down to expose more shrink-wrapping opportunities. */
636 basic_block entry = (*entry_edge)->dest;
637 prepare_shrink_wrap (entry);
639 if (dump_file)
640 fprintf (dump_file, "Attempting shrink-wrapping optimization.\n");
642 /* Compute the registers set and used in the prologue. */
644 HARD_REG_SET prologue_clobbered, prologue_used;
645 CLEAR_HARD_REG_SET (prologue_clobbered);
646 CLEAR_HARD_REG_SET (prologue_used);
647 for (rtx_insn *insn = prologue_seq; insn; insn = NEXT_INSN (insn))
648 if (NONDEBUG_INSN_P (insn))
650 HARD_REG_SET this_used;
651 CLEAR_HARD_REG_SET (this_used);
652 note_uses (&PATTERN (insn), record_hard_reg_uses, &this_used);
653 AND_COMPL_HARD_REG_SET (this_used, prologue_clobbered);
654 IOR_HARD_REG_SET (prologue_used, this_used);
655 note_stores (PATTERN (insn), record_hard_reg_sets, &prologue_clobbered);
657 CLEAR_HARD_REG_BIT (prologue_clobbered, STACK_POINTER_REGNUM);
658 if (frame_pointer_needed)
659 CLEAR_HARD_REG_BIT (prologue_clobbered, HARD_FRAME_POINTER_REGNUM);
661 /* Find out what registers are set up by the prologue; any use of these
662 cannot happen before the prologue. */
664 struct hard_reg_set_container set_up_by_prologue;
665 CLEAR_HARD_REG_SET (set_up_by_prologue.set);
666 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode, STACK_POINTER_REGNUM);
667 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode, ARG_POINTER_REGNUM);
668 if (frame_pointer_needed)
669 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode,
670 HARD_FRAME_POINTER_REGNUM);
671 if (pic_offset_table_rtx
672 && (unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
673 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode,
674 PIC_OFFSET_TABLE_REGNUM);
675 if (crtl->drap_reg)
676 add_to_hard_reg_set (&set_up_by_prologue.set,
677 GET_MODE (crtl->drap_reg),
678 REGNO (crtl->drap_reg));
679 if (targetm.set_up_by_prologue)
680 targetm.set_up_by_prologue (&set_up_by_prologue);
682 /* We will insert the prologue before the basic block PRO. PRO should
683 dominate all basic blocks that need the prologue to be executed
684 before them. First, make PRO the "tightest wrap" possible. */
686 calculate_dominance_info (CDI_DOMINATORS);
688 basic_block pro = 0;
690 basic_block bb;
691 edge e;
692 edge_iterator ei;
693 FOR_EACH_BB_FN (bb, cfun)
695 rtx_insn *insn;
696 FOR_BB_INSNS (bb, insn)
697 if (NONDEBUG_INSN_P (insn)
698 && requires_stack_frame_p (insn, prologue_used,
699 set_up_by_prologue.set))
701 if (dump_file)
702 fprintf (dump_file, "Block %d needs the prologue.\n", bb->index);
703 pro = nearest_common_dominator (CDI_DOMINATORS, pro, bb);
704 break;
708 /* If nothing needs a prologue, just put it at the start. This really
709 shouldn't happen, but we cannot fix it here. */
711 if (pro == 0)
713 if (dump_file)
714 fprintf(dump_file, "Nothing needs a prologue, but it isn't empty; "
715 "putting it at the start.\n");
716 pro = entry;
719 if (dump_file)
720 fprintf (dump_file, "After wrapping required blocks, PRO is now %d\n",
721 pro->index);
723 /* Now see if we can put the prologue at the start of PRO. Putting it
724 there might require duplicating a block that cannot be duplicated,
725 or in some cases we cannot insert the prologue there at all. If PRO
726 wont't do, try again with the immediate dominator of PRO, and so on.
728 The blocks that need duplicating are those reachable from PRO but
729 not dominated by it. We keep in BB_WITH a bitmap of the blocks
730 reachable from PRO that we already found, and in VEC a stack of
731 those we still need to consider (to find successors). */
733 bitmap_set_bit (bb_with, pro->index);
735 vec<basic_block> vec;
736 vec.create (n_basic_blocks_for_fn (cfun));
737 vec.quick_push (pro);
739 unsigned max_grow_size = get_uncond_jump_length ();
740 max_grow_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
742 while (!vec.is_empty () && pro != entry)
744 while (pro != entry && !can_get_prologue (pro, prologue_clobbered))
746 pro = get_immediate_dominator (CDI_DOMINATORS, pro);
748 bitmap_set_bit (bb_with, pro->index);
749 vec.quick_push (pro);
752 basic_block bb = vec.pop ();
753 if (!can_dup_for_shrink_wrapping (bb, pro, max_grow_size))
754 while (!dominated_by_p (CDI_DOMINATORS, bb, pro))
756 gcc_assert (pro != entry);
758 pro = get_immediate_dominator (CDI_DOMINATORS, pro);
760 bitmap_set_bit (bb_with, pro->index);
761 vec.quick_push (pro);
764 FOR_EACH_EDGE (e, ei, bb->succs)
765 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
766 && bitmap_set_bit (bb_with, e->dest->index))
767 vec.quick_push (e->dest);
770 vec.release ();
772 if (dump_file)
773 fprintf (dump_file, "Avoiding non-duplicatable blocks, PRO is now %d\n",
774 pro->index);
776 /* If we can move PRO back without having to duplicate more blocks, do so.
777 We can move back to a block PRE if every path from PRE will eventually
778 need a prologue, that is, PRO is a post-dominator of PRE. */
780 if (pro != entry)
782 calculate_dominance_info (CDI_POST_DOMINATORS);
784 basic_block last_ok = pro;
785 while (pro != entry)
787 basic_block pre = get_immediate_dominator (CDI_DOMINATORS, pro);
788 if (!dominated_by_p (CDI_POST_DOMINATORS, pre, pro))
789 break;
791 pro = pre;
792 if (can_get_prologue (pro, prologue_clobbered))
793 last_ok = pro;
795 pro = last_ok;
797 free_dominance_info (CDI_POST_DOMINATORS);
800 if (dump_file)
801 fprintf (dump_file, "Bumping back to anticipatable blocks, PRO is now %d\n",
802 pro->index);
804 if (pro == entry)
806 free_dominance_info (CDI_DOMINATORS);
807 return;
810 /* Compute what fraction of the frequency and count of the blocks that run
811 both with and without prologue are for running with prologue. This gives
812 the correct answer for reducible flow graphs; for irreducible flow graphs
813 our profile is messed up beyond repair anyway. */
815 gcov_type num = 0;
816 gcov_type den = 0;
818 FOR_EACH_EDGE (e, ei, pro->preds)
819 if (!dominated_by_p (CDI_DOMINATORS, e->src, pro))
821 num += EDGE_FREQUENCY (e);
822 den += e->src->frequency;
825 if (den == 0)
826 den = 1;
828 /* All is okay, so do it. */
830 crtl->shrink_wrapped = true;
831 if (dump_file)
832 fprintf (dump_file, "Performing shrink-wrapping.\n");
834 /* Copy the blocks that can run both with and without prologue. The
835 originals run with prologue, the copies without. Store a pointer to
836 the copy in the ->aux field of the original. */
838 FOR_EACH_BB_FN (bb, cfun)
839 if (bitmap_bit_p (bb_with, bb->index)
840 && !dominated_by_p (CDI_DOMINATORS, bb, pro))
842 basic_block dup = duplicate_block (bb, 0, 0);
844 bb->aux = dup;
846 if (JUMP_P (BB_END (dup)) && !any_condjump_p (BB_END (dup)))
847 emit_barrier_after_bb (dup);
849 if (EDGE_COUNT (dup->succs) == 0)
850 emit_barrier_after_bb (dup);
852 if (dump_file)
853 fprintf (dump_file, "Duplicated %d to %d\n", bb->index, dup->index);
855 bb->frequency = RDIV (num * bb->frequency, den);
856 dup->frequency -= bb->frequency;
857 bb->count = RDIV (num * bb->count, den);
858 dup->count -= bb->count;
861 /* Now change the edges to point to the copies, where appropriate. */
863 FOR_EACH_BB_FN (bb, cfun)
864 if (!dominated_by_p (CDI_DOMINATORS, bb, pro))
866 basic_block src = bb;
867 if (bitmap_bit_p (bb_with, bb->index))
868 src = (basic_block) bb->aux;
870 FOR_EACH_EDGE (e, ei, src->succs)
872 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
873 continue;
875 if (bitmap_bit_p (bb_with, e->dest->index)
876 && !dominated_by_p (CDI_DOMINATORS, e->dest, pro))
878 if (dump_file)
879 fprintf (dump_file, "Redirecting edge %d->%d to %d\n",
880 e->src->index, e->dest->index,
881 ((basic_block) e->dest->aux)->index);
882 redirect_edge_and_branch_force (e, (basic_block) e->dest->aux);
884 else if (e->flags & EDGE_FALLTHRU
885 && bitmap_bit_p (bb_with, bb->index))
886 force_nonfallthru (e);
890 /* Also redirect the function entry edge if necessary. */
892 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
893 if (bitmap_bit_p (bb_with, e->dest->index)
894 && !dominated_by_p (CDI_DOMINATORS, e->dest, pro))
896 basic_block split_bb = split_edge (e);
897 e = single_succ_edge (split_bb);
898 redirect_edge_and_branch_force (e, (basic_block) e->dest->aux);
901 /* Change all the exits that should get a simple_return to FAKE.
902 They will be converted later. */
904 FOR_EACH_BB_FN (bb, cfun)
905 if (!bitmap_bit_p (bb_with, bb->index))
906 FOR_EACH_EDGE (e, ei, bb->succs)
907 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
909 e = fix_fake_fallthrough_edge (e);
911 e->flags &= ~EDGE_FALLTHRU;
912 if (!(e->flags & EDGE_SIBCALL))
913 e->flags |= EDGE_FAKE;
915 emit_barrier_after_bb (e->src);
918 /* Finally, we want a single edge to put the prologue on. Make a new
919 block before the PRO block; the edge beteen them is the edge we want.
920 Then redirect those edges into PRO that come from blocks without the
921 prologue, to point to the new block instead. The new prologue block
922 is put at the end of the insn chain. */
924 basic_block new_bb = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
925 BB_COPY_PARTITION (new_bb, pro);
926 if (dump_file)
927 fprintf (dump_file, "Made prologue block %d\n", new_bb->index);
929 for (ei = ei_start (pro->preds); (e = ei_safe_edge (ei)); )
931 if (bitmap_bit_p (bb_with, e->src->index)
932 || dominated_by_p (CDI_DOMINATORS, e->src, pro))
934 ei_next (&ei);
935 continue;
938 new_bb->count += RDIV (e->src->count * e->probability, REG_BR_PROB_BASE);
939 new_bb->frequency += EDGE_FREQUENCY (e);
941 redirect_edge_and_branch_force (e, new_bb);
942 if (dump_file)
943 fprintf (dump_file, "Redirected edge from %d\n", e->src->index);
946 *entry_edge = make_single_succ_edge (new_bb, pro, EDGE_FALLTHRU);
947 force_nonfallthru (*entry_edge);
949 free_dominance_info (CDI_DOMINATORS);
952 /* If we're allowed to generate a simple return instruction, then by
953 definition we don't need a full epilogue. If the last basic
954 block before the exit block does not contain active instructions,
955 examine its predecessors and try to emit (conditional) return
956 instructions. */
958 edge
959 get_unconverted_simple_return (edge exit_fallthru_edge, bitmap_head bb_flags,
960 vec<edge> *unconverted_simple_returns,
961 rtx_insn **returnjump)
963 if (optimize)
965 unsigned i, last;
967 /* convert_jumps_to_returns may add to preds of the exit block
968 (but won't remove). Stop at end of current preds. */
969 last = EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
970 for (i = 0; i < last; i++)
972 edge e = EDGE_I (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds, i);
973 if (LABEL_P (BB_HEAD (e->src))
974 && !bitmap_bit_p (&bb_flags, e->src->index)
975 && !active_insn_between (BB_HEAD (e->src), BB_END (e->src)))
976 *unconverted_simple_returns
977 = convert_jumps_to_returns (e->src, true,
978 *unconverted_simple_returns);
982 if (exit_fallthru_edge != NULL
983 && EDGE_COUNT (exit_fallthru_edge->src->preds) != 0
984 && !bitmap_bit_p (&bb_flags, exit_fallthru_edge->src->index))
986 basic_block last_bb;
988 last_bb = emit_return_for_exit (exit_fallthru_edge, true);
989 *returnjump = BB_END (last_bb);
990 exit_fallthru_edge = NULL;
992 return exit_fallthru_edge;
995 /* If there were branches to an empty LAST_BB which we tried to
996 convert to conditional simple_returns, but couldn't for some
997 reason, create a block to hold a simple_return insn and redirect
998 those remaining edges. */
1000 void
1001 convert_to_simple_return (edge entry_edge, edge orig_entry_edge,
1002 bitmap_head bb_flags, rtx_insn *returnjump,
1003 vec<edge> unconverted_simple_returns)
1005 edge e;
1006 edge_iterator ei;
1008 if (!unconverted_simple_returns.is_empty ())
1010 basic_block simple_return_block_hot = NULL;
1011 basic_block simple_return_block_cold = NULL;
1012 edge pending_edge_hot = NULL;
1013 edge pending_edge_cold = NULL;
1014 basic_block exit_pred;
1015 int i;
1017 gcc_assert (entry_edge != orig_entry_edge);
1019 /* See if we can reuse the last insn that was emitted for the
1020 epilogue. */
1021 if (returnjump != NULL_RTX
1022 && JUMP_LABEL (returnjump) == simple_return_rtx)
1024 e = split_block (BLOCK_FOR_INSN (returnjump), PREV_INSN (returnjump));
1025 if (BB_PARTITION (e->src) == BB_HOT_PARTITION)
1026 simple_return_block_hot = e->dest;
1027 else
1028 simple_return_block_cold = e->dest;
1031 /* Also check returns we might need to add to tail blocks. */
1032 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
1033 if (EDGE_COUNT (e->src->preds) != 0
1034 && (e->flags & EDGE_FAKE) != 0
1035 && !bitmap_bit_p (&bb_flags, e->src->index))
1037 if (BB_PARTITION (e->src) == BB_HOT_PARTITION)
1038 pending_edge_hot = e;
1039 else
1040 pending_edge_cold = e;
1043 /* Save a pointer to the exit's predecessor BB for use in
1044 inserting new BBs at the end of the function. Do this
1045 after the call to split_block above which may split
1046 the original exit pred. */
1047 exit_pred = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
1049 FOR_EACH_VEC_ELT (unconverted_simple_returns, i, e)
1051 basic_block *pdest_bb;
1052 edge pending;
1054 if (BB_PARTITION (e->src) == BB_HOT_PARTITION)
1056 pdest_bb = &simple_return_block_hot;
1057 pending = pending_edge_hot;
1059 else
1061 pdest_bb = &simple_return_block_cold;
1062 pending = pending_edge_cold;
1065 if (*pdest_bb == NULL && pending != NULL)
1067 emit_return_into_block (true, pending->src);
1068 pending->flags &= ~(EDGE_FALLTHRU | EDGE_FAKE);
1069 *pdest_bb = pending->src;
1071 else if (*pdest_bb == NULL)
1073 basic_block bb;
1075 bb = create_basic_block (NULL, NULL, exit_pred);
1076 BB_COPY_PARTITION (bb, e->src);
1077 rtx_insn *ret = targetm.gen_simple_return ();
1078 rtx_jump_insn *start = emit_jump_insn_after (ret, BB_END (bb));
1079 JUMP_LABEL (start) = simple_return_rtx;
1080 emit_barrier_after (start);
1082 *pdest_bb = bb;
1083 make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
1085 redirect_edge_and_branch_force (e, *pdest_bb);
1087 unconverted_simple_returns.release ();
1090 if (entry_edge != orig_entry_edge)
1092 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
1093 if (EDGE_COUNT (e->src->preds) != 0
1094 && (e->flags & EDGE_FAKE) != 0
1095 && !bitmap_bit_p (&bb_flags, e->src->index))
1097 e = fix_fake_fallthrough_edge (e);
1099 emit_return_into_block (true, e->src);
1100 e->flags &= ~(EDGE_FALLTHRU | EDGE_FAKE);