2016-07-28 Steven G. Kargl <kargl@gcc.gnu.org>
[official-gcc.git] / gcc / shrink-wrap.c
blobb85b1c3b3497c6c1b8d7ee80930cb123025fb23f
1 /* Shrink-wrapping related optimizations.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This file handles shrink-wrapping related optimizations. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "cfghooks.h"
30 #include "df.h"
31 #include "tm_p.h"
32 #include "regs.h"
33 #include "emit-rtl.h"
34 #include "output.h"
35 #include "tree-pass.h"
36 #include "cfgrtl.h"
37 #include "params.h"
38 #include "bb-reorder.h"
39 #include "shrink-wrap.h"
40 #include "regcprop.h"
41 #include "rtl-iter.h"
42 #include "valtrack.h"
45 /* Return true if INSN requires the stack frame to be set up.
46 PROLOGUE_USED contains the hard registers used in the function
47 prologue. SET_UP_BY_PROLOGUE is the set of registers we expect the
48 prologue to set up for the function. */
49 bool
50 requires_stack_frame_p (rtx_insn *insn, HARD_REG_SET prologue_used,
51 HARD_REG_SET set_up_by_prologue)
53 df_ref def, use;
54 HARD_REG_SET hardregs;
55 unsigned regno;
57 if (CALL_P (insn))
58 return !SIBLING_CALL_P (insn);
60 /* We need a frame to get the unique CFA expected by the unwinder. */
61 if (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
62 return true;
64 CLEAR_HARD_REG_SET (hardregs);
65 FOR_EACH_INSN_DEF (def, insn)
67 rtx dreg = DF_REF_REG (def);
69 if (!REG_P (dreg))
70 continue;
72 add_to_hard_reg_set (&hardregs, GET_MODE (dreg), REGNO (dreg));
74 if (hard_reg_set_intersect_p (hardregs, prologue_used))
75 return true;
76 AND_COMPL_HARD_REG_SET (hardregs, call_used_reg_set);
77 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
78 if (TEST_HARD_REG_BIT (hardregs, regno)
79 && df_regs_ever_live_p (regno))
80 return true;
82 FOR_EACH_INSN_USE (use, insn)
84 rtx reg = DF_REF_REG (use);
86 if (!REG_P (reg))
87 continue;
89 add_to_hard_reg_set (&hardregs, GET_MODE (reg),
90 REGNO (reg));
92 if (hard_reg_set_intersect_p (hardregs, set_up_by_prologue))
93 return true;
95 return false;
98 /* See whether there has a single live edge from BB, which dest uses
99 [REGNO, END_REGNO). Return the live edge if its dest bb has
100 one or two predecessors. Otherwise return NULL. */
102 static edge
103 live_edge_for_reg (basic_block bb, int regno, int end_regno)
105 edge e, live_edge;
106 edge_iterator ei;
107 bitmap live;
108 int i;
110 live_edge = NULL;
111 FOR_EACH_EDGE (e, ei, bb->succs)
113 live = df_get_live_in (e->dest);
114 for (i = regno; i < end_regno; i++)
115 if (REGNO_REG_SET_P (live, i))
117 if (live_edge && live_edge != e)
118 return NULL;
119 live_edge = e;
123 /* We can sometimes encounter dead code. Don't try to move it
124 into the exit block. */
125 if (!live_edge || live_edge->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
126 return NULL;
128 /* Reject targets of abnormal edges. This is needed for correctness
129 on ports like Alpha and MIPS, whose pic_offset_table_rtx can die on
130 exception edges even though it is generally treated as call-saved
131 for the majority of the compilation. Moving across abnormal edges
132 isn't going to be interesting for shrink-wrap usage anyway. */
133 if (live_edge->flags & EDGE_ABNORMAL)
134 return NULL;
136 /* When live_edge->dest->preds == 2, we can create a new block on
137 the edge to make it meet the requirement. */
138 if (EDGE_COUNT (live_edge->dest->preds) > 2)
139 return NULL;
141 return live_edge;
144 /* Try to move INSN from BB to a successor. Return true on success.
145 USES and DEFS are the set of registers that are used and defined
146 after INSN in BB. SPLIT_P indicates whether a live edge from BB
147 is splitted or not. */
149 static bool
150 move_insn_for_shrink_wrap (basic_block bb, rtx_insn *insn,
151 const HARD_REG_SET uses,
152 const HARD_REG_SET defs,
153 bool *split_p,
154 struct dead_debug_local *debug)
156 rtx set, src, dest;
157 bitmap live_out, live_in, bb_uses, bb_defs;
158 unsigned int i, dregno, end_dregno;
159 unsigned int sregno = FIRST_PSEUDO_REGISTER;
160 unsigned int end_sregno = FIRST_PSEUDO_REGISTER;
161 basic_block next_block;
162 edge live_edge;
163 rtx_insn *dinsn;
164 df_ref def;
166 /* Look for a simple register assignment. We don't use single_set here
167 because we can't deal with any CLOBBERs, USEs, or REG_UNUSED secondary
168 destinations. */
169 if (!INSN_P (insn))
170 return false;
171 set = PATTERN (insn);
172 if (GET_CODE (set) != SET)
173 return false;
174 src = SET_SRC (set);
175 dest = SET_DEST (set);
177 /* For the destination, we want only a register. Also disallow STACK
178 or FRAME related adjustments. They are likely part of the prologue,
179 so keep them in the entry block. */
180 if (!REG_P (dest)
181 || dest == stack_pointer_rtx
182 || dest == frame_pointer_rtx
183 || dest == hard_frame_pointer_rtx)
184 return false;
186 /* For the source, we want one of:
187 (1) A (non-overlapping) register
188 (2) A constant,
189 (3) An expression involving no more than one register.
191 That last point comes from the code following, which was originally
192 written to handle only register move operations, and still only handles
193 a single source register when checking for overlaps. Happily, the
194 same checks can be applied to expressions like (plus reg const). */
196 if (CONSTANT_P (src))
198 else if (!REG_P (src))
200 rtx src_inner = NULL_RTX;
202 if (can_throw_internal (insn))
203 return false;
205 subrtx_var_iterator::array_type array;
206 FOR_EACH_SUBRTX_VAR (iter, array, src, ALL)
208 rtx x = *iter;
209 switch (GET_RTX_CLASS (GET_CODE (x)))
211 case RTX_CONST_OBJ:
212 case RTX_COMPARE:
213 case RTX_COMM_COMPARE:
214 case RTX_BIN_ARITH:
215 case RTX_COMM_ARITH:
216 case RTX_UNARY:
217 case RTX_TERNARY:
218 /* Constant or expression. Continue. */
219 break;
221 case RTX_OBJ:
222 case RTX_EXTRA:
223 switch (GET_CODE (x))
225 case UNSPEC:
226 case SUBREG:
227 case STRICT_LOW_PART:
228 case PC:
229 case LO_SUM:
230 /* Ok. Continue. */
231 break;
233 case REG:
234 /* Fail if we see a second inner register. */
235 if (src_inner != NULL)
236 return false;
237 src_inner = x;
238 break;
240 default:
241 return false;
243 break;
245 default:
246 return false;
250 if (src_inner != NULL)
251 src = src_inner;
254 /* Make sure that the source register isn't defined later in BB. */
255 if (REG_P (src))
257 sregno = REGNO (src);
258 end_sregno = END_REGNO (src);
259 if (overlaps_hard_reg_set_p (defs, GET_MODE (src), sregno))
260 return false;
263 /* Make sure that the destination register isn't referenced later in BB. */
264 dregno = REGNO (dest);
265 end_dregno = END_REGNO (dest);
266 if (overlaps_hard_reg_set_p (uses, GET_MODE (dest), dregno)
267 || overlaps_hard_reg_set_p (defs, GET_MODE (dest), dregno))
268 return false;
270 /* See whether there is a successor block to which we could move INSN. */
271 live_edge = live_edge_for_reg (bb, dregno, end_dregno);
272 if (!live_edge)
273 return false;
275 next_block = live_edge->dest;
276 /* Create a new basic block on the edge. */
277 if (EDGE_COUNT (next_block->preds) == 2)
279 /* split_edge for a block with only one successor is meaningless. */
280 if (EDGE_COUNT (bb->succs) == 1)
281 return false;
283 /* If DF_LIVE doesn't exist, i.e. at -O1, just give up. */
284 if (!df_live)
285 return false;
287 basic_block old_dest = live_edge->dest;
288 next_block = split_edge (live_edge);
290 /* We create a new basic block. Call df_grow_bb_info to make sure
291 all data structures are allocated. */
292 df_grow_bb_info (df_live);
294 bitmap_and (df_get_live_in (next_block), df_get_live_out (bb),
295 df_get_live_in (old_dest));
296 df_set_bb_dirty (next_block);
298 /* We should not split more than once for a function. */
299 if (*split_p)
300 return false;
302 *split_p = true;
305 /* At this point we are committed to moving INSN, but let's try to
306 move it as far as we can. */
309 if (MAY_HAVE_DEBUG_INSNS)
311 FOR_BB_INSNS_REVERSE (bb, dinsn)
312 if (DEBUG_INSN_P (dinsn))
314 df_ref use;
315 FOR_EACH_INSN_USE (use, dinsn)
316 if (refers_to_regno_p (dregno, end_dregno,
317 DF_REF_REG (use), (rtx *) NULL))
318 dead_debug_add (debug, use, DF_REF_REGNO (use));
320 else if (dinsn == insn)
321 break;
323 live_out = df_get_live_out (bb);
324 live_in = df_get_live_in (next_block);
325 bb = next_block;
327 /* Check whether BB uses DEST or clobbers DEST. We need to add
328 INSN to BB if so. Either way, DEST is no longer live on entry,
329 except for any part that overlaps SRC (next loop). */
330 bb_uses = &DF_LR_BB_INFO (bb)->use;
331 bb_defs = &DF_LR_BB_INFO (bb)->def;
332 if (df_live)
334 for (i = dregno; i < end_dregno; i++)
336 if (*split_p
337 || REGNO_REG_SET_P (bb_uses, i)
338 || REGNO_REG_SET_P (bb_defs, i)
339 || REGNO_REG_SET_P (&DF_LIVE_BB_INFO (bb)->gen, i))
340 next_block = NULL;
341 CLEAR_REGNO_REG_SET (live_out, i);
342 CLEAR_REGNO_REG_SET (live_in, i);
345 /* Check whether BB clobbers SRC. We need to add INSN to BB if so.
346 Either way, SRC is now live on entry. */
347 for (i = sregno; i < end_sregno; i++)
349 if (*split_p
350 || REGNO_REG_SET_P (bb_defs, i)
351 || REGNO_REG_SET_P (&DF_LIVE_BB_INFO (bb)->gen, i))
352 next_block = NULL;
353 SET_REGNO_REG_SET (live_out, i);
354 SET_REGNO_REG_SET (live_in, i);
357 else
359 /* DF_LR_BB_INFO (bb)->def does not comprise the DF_REF_PARTIAL and
360 DF_REF_CONDITIONAL defs. So if DF_LIVE doesn't exist, i.e.
361 at -O1, just give up searching NEXT_BLOCK. */
362 next_block = NULL;
363 for (i = dregno; i < end_dregno; i++)
365 CLEAR_REGNO_REG_SET (live_out, i);
366 CLEAR_REGNO_REG_SET (live_in, i);
369 for (i = sregno; i < end_sregno; i++)
371 SET_REGNO_REG_SET (live_out, i);
372 SET_REGNO_REG_SET (live_in, i);
376 /* If we don't need to add the move to BB, look for a single
377 successor block. */
378 if (next_block)
380 live_edge = live_edge_for_reg (next_block, dregno, end_dregno);
381 if (!live_edge || EDGE_COUNT (live_edge->dest->preds) > 1)
382 break;
383 next_block = live_edge->dest;
386 while (next_block);
388 /* For the new created basic block, there is no dataflow info at all.
389 So skip the following dataflow update and check. */
390 if (!(*split_p))
392 /* BB now defines DEST. It only uses the parts of DEST that overlap SRC
393 (next loop). */
394 for (i = dregno; i < end_dregno; i++)
396 CLEAR_REGNO_REG_SET (bb_uses, i);
397 SET_REGNO_REG_SET (bb_defs, i);
400 /* BB now uses SRC. */
401 for (i = sregno; i < end_sregno; i++)
402 SET_REGNO_REG_SET (bb_uses, i);
405 /* Insert debug temps for dead REGs used in subsequent debug insns. */
406 if (debug->used && !bitmap_empty_p (debug->used))
407 FOR_EACH_INSN_DEF (def, insn)
408 dead_debug_insert_temp (debug, DF_REF_REGNO (def), insn,
409 DEBUG_TEMP_BEFORE_WITH_VALUE);
411 emit_insn_after (PATTERN (insn), bb_note (bb));
412 delete_insn (insn);
413 return true;
416 /* Look for register copies in the first block of the function, and move
417 them down into successor blocks if the register is used only on one
418 path. This exposes more opportunities for shrink-wrapping. These
419 kinds of sets often occur when incoming argument registers are moved
420 to call-saved registers because their values are live across one or
421 more calls during the function. */
423 static void
424 prepare_shrink_wrap (basic_block entry_block)
426 rtx_insn *insn, *curr;
427 rtx x;
428 HARD_REG_SET uses, defs;
429 df_ref def, use;
430 bool split_p = false;
431 unsigned int i;
432 struct dead_debug_local debug;
434 if (JUMP_P (BB_END (entry_block)))
436 /* To have more shrink-wrapping opportunities, prepare_shrink_wrap tries
437 to sink the copies from parameter to callee saved register out of
438 entry block. copyprop_hardreg_forward_bb_without_debug_insn is called
439 to release some dependences. */
440 copyprop_hardreg_forward_bb_without_debug_insn (entry_block);
443 dead_debug_local_init (&debug, NULL, NULL);
444 CLEAR_HARD_REG_SET (uses);
445 CLEAR_HARD_REG_SET (defs);
447 FOR_BB_INSNS_REVERSE_SAFE (entry_block, insn, curr)
448 if (NONDEBUG_INSN_P (insn)
449 && !move_insn_for_shrink_wrap (entry_block, insn, uses, defs,
450 &split_p, &debug))
452 /* Add all defined registers to DEFs. */
453 FOR_EACH_INSN_DEF (def, insn)
455 x = DF_REF_REG (def);
456 if (REG_P (x) && HARD_REGISTER_P (x))
457 for (i = REGNO (x); i < END_REGNO (x); i++)
458 SET_HARD_REG_BIT (defs, i);
461 /* Add all used registers to USESs. */
462 FOR_EACH_INSN_USE (use, insn)
464 x = DF_REF_REG (use);
465 if (REG_P (x) && HARD_REGISTER_P (x))
466 for (i = REGNO (x); i < END_REGNO (x); i++)
467 SET_HARD_REG_BIT (uses, i);
471 dead_debug_local_finish (&debug, NULL);
474 /* Return whether basic block PRO can get the prologue. It can not if it
475 has incoming complex edges that need a prologue inserted (we make a new
476 block for the prologue, so those edges would need to be redirected, which
477 does not work). It also can not if there exist registers live on entry
478 to PRO that are clobbered by the prologue. */
480 static bool
481 can_get_prologue (basic_block pro, HARD_REG_SET prologue_clobbered)
483 edge e;
484 edge_iterator ei;
485 FOR_EACH_EDGE (e, ei, pro->preds)
486 if (e->flags & (EDGE_COMPLEX | EDGE_CROSSING)
487 && !dominated_by_p (CDI_DOMINATORS, e->src, pro))
488 return false;
490 HARD_REG_SET live;
491 REG_SET_TO_HARD_REG_SET (live, df_get_live_in (pro));
492 if (hard_reg_set_intersect_p (live, prologue_clobbered))
493 return false;
495 return true;
498 /* Return whether we can duplicate basic block BB for shrink wrapping. We
499 cannot if the block cannot be duplicated at all, or if any of its incoming
500 edges are complex and come from a block that does not require a prologue
501 (we cannot redirect such edges), or if the block is too big to copy.
502 PRO is the basic block before which we would put the prologue, MAX_SIZE is
503 the maximum size block we allow to be copied. */
505 static bool
506 can_dup_for_shrink_wrapping (basic_block bb, basic_block pro, unsigned max_size)
508 if (!can_duplicate_block_p (bb))
509 return false;
511 edge e;
512 edge_iterator ei;
513 FOR_EACH_EDGE (e, ei, bb->preds)
514 if (e->flags & (EDGE_COMPLEX | EDGE_CROSSING)
515 && !dominated_by_p (CDI_DOMINATORS, e->src, pro))
516 return false;
518 unsigned size = 0;
520 rtx_insn *insn;
521 FOR_BB_INSNS (bb, insn)
522 if (NONDEBUG_INSN_P (insn))
524 size += get_attr_min_length (insn);
525 if (size > max_size)
526 return false;
529 return true;
532 /* Do whatever needs to be done for exits that run without prologue.
533 Sibcalls need nothing done. Normal exits get a simple_return inserted. */
535 static void
536 handle_simple_exit (edge e)
539 if (e->flags & EDGE_SIBCALL)
541 /* Tell function.c to take no further action on this edge. */
542 e->flags |= EDGE_IGNORE;
544 e->flags &= ~EDGE_FALLTHRU;
545 emit_barrier_after_bb (e->src);
546 return;
549 /* If the basic block the edge comes from has multiple successors,
550 split the edge. */
551 if (EDGE_COUNT (e->src->succs) > 1)
553 basic_block old_bb = e->src;
554 rtx_insn *end = BB_END (old_bb);
555 rtx_note *note = emit_note_after (NOTE_INSN_DELETED, end);
556 basic_block new_bb = create_basic_block (note, note, old_bb);
557 BB_COPY_PARTITION (new_bb, old_bb);
558 BB_END (old_bb) = end;
560 redirect_edge_succ (e, new_bb);
561 e->flags |= EDGE_FALLTHRU;
563 e = make_edge (new_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
566 e->flags &= ~EDGE_FALLTHRU;
567 rtx_jump_insn *ret = emit_jump_insn_after (targetm.gen_simple_return (),
568 BB_END (e->src));
569 JUMP_LABEL (ret) = simple_return_rtx;
570 emit_barrier_after_bb (e->src);
572 if (dump_file)
573 fprintf (dump_file, "Made simple_return with UID %d in bb %d\n",
574 INSN_UID (ret), e->src->index);
577 /* Try to perform a kind of shrink-wrapping, making sure the
578 prologue/epilogue is emitted only around those parts of the
579 function that require it.
581 There will be exactly one prologue, and it will be executed either
582 zero or one time, on any path. Depending on where the prologue is
583 placed, some of the basic blocks can be reached via both paths with
584 and without a prologue. Such blocks will be duplicated here, and the
585 edges changed to match.
587 Paths that go to the exit without going through the prologue will use
588 a simple_return instead of the epilogue. We maximize the number of
589 those, making sure to only duplicate blocks that can be duplicated.
590 If the prologue can then still be placed in multiple locations, we
591 place it as early as possible.
593 An example, where we duplicate blocks with control flow (legend:
594 _B_egin, _R_eturn and _S_imple_return; edges without arrowhead should
595 be taken to point down or to the right, to simplify the diagram; here,
596 block 3 needs a prologue, the rest does not):
602 |\ |\
603 | 3 becomes | 3
604 |/ | \
605 4 7 4
606 |\ |\ |\
607 | 5 | 8 | 5
608 |/ |/ |/
609 6 9 6
610 | | |
611 R S R
614 (bb 4 is duplicated to 7, and so on; the prologue is inserted on the
615 edge 2->3).
617 Another example, where part of a loop is duplicated (again, bb 3 is
618 the only block that needs a prologue):
621 B 3<-- B ->3<--
622 | | | | | | |
623 | v | becomes | | v |
624 2---4--- 2---5-- 4---
625 | | |
626 R S R
629 (bb 4 is duplicated to 5; the prologue is inserted on the edge 5->3).
631 ENTRY_EDGE is the edge where the prologue will be placed, possibly
632 changed by this function. PROLOGUE_SEQ is the prologue we will insert. */
634 void
635 try_shrink_wrapping (edge *entry_edge, rtx_insn *prologue_seq)
637 /* If we cannot shrink-wrap, are told not to shrink-wrap, or it makes
638 no sense to shrink-wrap: then do not shrink-wrap! */
640 if (!SHRINK_WRAPPING_ENABLED)
641 return;
643 if (crtl->profile && !targetm.profile_before_prologue ())
644 return;
646 if (crtl->calls_eh_return)
647 return;
649 bool empty_prologue = true;
650 for (rtx_insn *insn = prologue_seq; insn; insn = NEXT_INSN (insn))
651 if (!(NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_PROLOGUE_END))
653 empty_prologue = false;
654 break;
656 if (empty_prologue)
657 return;
659 /* Move some code down to expose more shrink-wrapping opportunities. */
661 basic_block entry = (*entry_edge)->dest;
662 prepare_shrink_wrap (entry);
664 if (dump_file)
665 fprintf (dump_file, "Attempting shrink-wrapping optimization.\n");
667 /* Compute the registers set and used in the prologue. */
669 HARD_REG_SET prologue_clobbered, prologue_used;
670 CLEAR_HARD_REG_SET (prologue_clobbered);
671 CLEAR_HARD_REG_SET (prologue_used);
672 for (rtx_insn *insn = prologue_seq; insn; insn = NEXT_INSN (insn))
673 if (NONDEBUG_INSN_P (insn))
675 HARD_REG_SET this_used;
676 CLEAR_HARD_REG_SET (this_used);
677 note_uses (&PATTERN (insn), record_hard_reg_uses, &this_used);
678 AND_COMPL_HARD_REG_SET (this_used, prologue_clobbered);
679 IOR_HARD_REG_SET (prologue_used, this_used);
680 note_stores (PATTERN (insn), record_hard_reg_sets, &prologue_clobbered);
682 CLEAR_HARD_REG_BIT (prologue_clobbered, STACK_POINTER_REGNUM);
683 if (frame_pointer_needed)
684 CLEAR_HARD_REG_BIT (prologue_clobbered, HARD_FRAME_POINTER_REGNUM);
686 /* Find out what registers are set up by the prologue; any use of these
687 cannot happen before the prologue. */
689 struct hard_reg_set_container set_up_by_prologue;
690 CLEAR_HARD_REG_SET (set_up_by_prologue.set);
691 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode, STACK_POINTER_REGNUM);
692 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode, ARG_POINTER_REGNUM);
693 if (frame_pointer_needed)
694 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode,
695 HARD_FRAME_POINTER_REGNUM);
696 if (pic_offset_table_rtx
697 && (unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
698 add_to_hard_reg_set (&set_up_by_prologue.set, Pmode,
699 PIC_OFFSET_TABLE_REGNUM);
700 if (crtl->drap_reg)
701 add_to_hard_reg_set (&set_up_by_prologue.set,
702 GET_MODE (crtl->drap_reg),
703 REGNO (crtl->drap_reg));
704 if (targetm.set_up_by_prologue)
705 targetm.set_up_by_prologue (&set_up_by_prologue);
707 /* We will insert the prologue before the basic block PRO. PRO should
708 dominate all basic blocks that need the prologue to be executed
709 before them. First, make PRO the "tightest wrap" possible. */
711 calculate_dominance_info (CDI_DOMINATORS);
713 basic_block pro = 0;
715 basic_block bb;
716 edge e;
717 edge_iterator ei;
718 FOR_EACH_BB_FN (bb, cfun)
720 rtx_insn *insn;
721 FOR_BB_INSNS (bb, insn)
722 if (NONDEBUG_INSN_P (insn)
723 && requires_stack_frame_p (insn, prologue_used,
724 set_up_by_prologue.set))
726 if (dump_file)
727 fprintf (dump_file, "Block %d needs the prologue.\n", bb->index);
728 pro = nearest_common_dominator (CDI_DOMINATORS, pro, bb);
729 break;
733 /* If nothing needs a prologue, just put it at the start. This really
734 shouldn't happen, but we cannot fix it here. */
736 if (pro == 0)
738 if (dump_file)
739 fprintf(dump_file, "Nothing needs a prologue, but it isn't empty; "
740 "putting it at the start.\n");
741 pro = entry;
744 if (dump_file)
745 fprintf (dump_file, "After wrapping required blocks, PRO is now %d\n",
746 pro->index);
748 /* Now see if we can put the prologue at the start of PRO. Putting it
749 there might require duplicating a block that cannot be duplicated,
750 or in some cases we cannot insert the prologue there at all. If PRO
751 wont't do, try again with the immediate dominator of PRO, and so on.
753 The blocks that need duplicating are those reachable from PRO but
754 not dominated by it. We keep in BB_WITH a bitmap of the blocks
755 reachable from PRO that we already found, and in VEC a stack of
756 those we still need to consider (to find successors). */
758 bitmap bb_with = BITMAP_ALLOC (NULL);
759 bitmap_set_bit (bb_with, pro->index);
761 vec<basic_block> vec;
762 vec.create (n_basic_blocks_for_fn (cfun));
763 vec.quick_push (pro);
765 unsigned max_grow_size = get_uncond_jump_length ();
766 max_grow_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS);
768 while (!vec.is_empty () && pro != entry)
770 while (pro != entry && !can_get_prologue (pro, prologue_clobbered))
772 pro = get_immediate_dominator (CDI_DOMINATORS, pro);
774 if (bitmap_set_bit (bb_with, pro->index))
775 vec.quick_push (pro);
778 basic_block bb = vec.pop ();
779 if (!can_dup_for_shrink_wrapping (bb, pro, max_grow_size))
780 while (!dominated_by_p (CDI_DOMINATORS, bb, pro))
782 gcc_assert (pro != entry);
784 pro = get_immediate_dominator (CDI_DOMINATORS, pro);
786 if (bitmap_set_bit (bb_with, pro->index))
787 vec.quick_push (pro);
790 FOR_EACH_EDGE (e, ei, bb->succs)
791 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
792 && bitmap_set_bit (bb_with, e->dest->index))
793 vec.quick_push (e->dest);
796 if (dump_file)
797 fprintf (dump_file, "Avoiding non-duplicatable blocks, PRO is now %d\n",
798 pro->index);
800 /* If we can move PRO back without having to duplicate more blocks, do so.
801 We do this because putting the prologue earlier is better for scheduling.
803 We can move back to a block PRE if every path from PRE will eventually
804 need a prologue, that is, PRO is a post-dominator of PRE. PRE needs
805 to dominate every block reachable from itself. We keep in BB_TMP a
806 bitmap of the blocks reachable from PRE that we already found, and in
807 VEC a stack of those we still need to consider.
809 Any block reachable from PRE is also reachable from all predecessors
810 of PRE, so if we find we need to move PRE back further we can leave
811 everything not considered so far on the stack. Any block dominated
812 by PRE is also dominated by all other dominators of PRE, so anything
813 found good for some PRE does not need to be reconsidered later.
815 We don't need to update BB_WITH because none of the new blocks found
816 can jump to a block that does not need the prologue. */
818 if (pro != entry)
820 calculate_dominance_info (CDI_POST_DOMINATORS);
822 bitmap bb_tmp = BITMAP_ALLOC (NULL);
823 bitmap_copy (bb_tmp, bb_with);
824 basic_block last_ok = pro;
825 vec.truncate (0);
827 while (pro != entry)
829 basic_block pre = get_immediate_dominator (CDI_DOMINATORS, pro);
830 if (!dominated_by_p (CDI_POST_DOMINATORS, pre, pro))
831 break;
833 if (bitmap_set_bit (bb_tmp, pre->index))
834 vec.quick_push (pre);
836 bool ok = true;
837 while (!vec.is_empty ())
839 if (!dominated_by_p (CDI_DOMINATORS, vec.last (), pre))
841 ok = false;
842 break;
845 basic_block bb = vec.pop ();
846 FOR_EACH_EDGE (e, ei, bb->succs)
847 if (bitmap_set_bit (bb_tmp, e->dest->index))
848 vec.quick_push (e->dest);
851 if (ok && can_get_prologue (pre, prologue_clobbered))
852 last_ok = pre;
854 pro = pre;
857 pro = last_ok;
859 BITMAP_FREE (bb_tmp);
860 free_dominance_info (CDI_POST_DOMINATORS);
863 vec.release ();
865 if (dump_file)
866 fprintf (dump_file, "Bumping back to anticipatable blocks, PRO is now %d\n",
867 pro->index);
869 if (pro == entry)
871 BITMAP_FREE (bb_with);
872 free_dominance_info (CDI_DOMINATORS);
873 return;
876 /* Compute what fraction of the frequency and count of the blocks that run
877 both with and without prologue are for running with prologue. This gives
878 the correct answer for reducible flow graphs; for irreducible flow graphs
879 our profile is messed up beyond repair anyway. */
881 gcov_type num = 0;
882 gcov_type den = 0;
884 FOR_EACH_EDGE (e, ei, pro->preds)
885 if (!dominated_by_p (CDI_DOMINATORS, e->src, pro))
887 num += EDGE_FREQUENCY (e);
888 den += e->src->frequency;
891 if (den == 0)
892 den = 1;
894 /* All is okay, so do it. */
896 crtl->shrink_wrapped = true;
897 if (dump_file)
898 fprintf (dump_file, "Performing shrink-wrapping.\n");
900 /* Copy the blocks that can run both with and without prologue. The
901 originals run with prologue, the copies without. Store a pointer to
902 the copy in the ->aux field of the original. */
904 FOR_EACH_BB_FN (bb, cfun)
905 if (bitmap_bit_p (bb_with, bb->index)
906 && !dominated_by_p (CDI_DOMINATORS, bb, pro))
908 basic_block dup = duplicate_block (bb, 0, 0);
910 bb->aux = dup;
912 if (JUMP_P (BB_END (dup)) && !any_condjump_p (BB_END (dup)))
913 emit_barrier_after_bb (dup);
915 if (EDGE_COUNT (dup->succs) == 0)
916 emit_barrier_after_bb (dup);
918 if (dump_file)
919 fprintf (dump_file, "Duplicated %d to %d\n", bb->index, dup->index);
921 bb->frequency = RDIV (num * bb->frequency, den);
922 dup->frequency -= bb->frequency;
923 bb->count = RDIV (num * bb->count, den);
924 dup->count -= bb->count;
927 /* Now change the edges to point to the copies, where appropriate. */
929 FOR_EACH_BB_FN (bb, cfun)
930 if (!dominated_by_p (CDI_DOMINATORS, bb, pro))
932 basic_block src = bb;
933 if (bitmap_bit_p (bb_with, bb->index))
934 src = (basic_block) bb->aux;
936 FOR_EACH_EDGE (e, ei, src->succs)
938 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
939 continue;
941 if (bitmap_bit_p (bb_with, e->dest->index)
942 && !dominated_by_p (CDI_DOMINATORS, e->dest, pro))
944 if (dump_file)
945 fprintf (dump_file, "Redirecting edge %d->%d to %d\n",
946 e->src->index, e->dest->index,
947 ((basic_block) e->dest->aux)->index);
948 redirect_edge_and_branch_force (e, (basic_block) e->dest->aux);
950 else if (e->flags & EDGE_FALLTHRU
951 && bitmap_bit_p (bb_with, bb->index))
952 force_nonfallthru (e);
956 /* Also redirect the function entry edge if necessary. */
958 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
959 if (bitmap_bit_p (bb_with, e->dest->index)
960 && !dominated_by_p (CDI_DOMINATORS, e->dest, pro))
962 basic_block split_bb = split_edge (e);
963 e = single_succ_edge (split_bb);
964 redirect_edge_and_branch_force (e, (basic_block) e->dest->aux);
967 /* Make a simple_return for those exits that run without prologue. */
969 FOR_EACH_BB_REVERSE_FN (bb, cfun)
970 if (!bitmap_bit_p (bb_with, bb->index))
971 FOR_EACH_EDGE (e, ei, bb->succs)
972 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
973 handle_simple_exit (e);
975 /* Finally, we want a single edge to put the prologue on. Make a new
976 block before the PRO block; the edge beteen them is the edge we want.
977 Then redirect those edges into PRO that come from blocks without the
978 prologue, to point to the new block instead. The new prologue block
979 is put at the end of the insn chain. */
981 basic_block new_bb = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
982 BB_COPY_PARTITION (new_bb, pro);
983 if (dump_file)
984 fprintf (dump_file, "Made prologue block %d\n", new_bb->index);
986 for (ei = ei_start (pro->preds); (e = ei_safe_edge (ei)); )
988 if (bitmap_bit_p (bb_with, e->src->index)
989 || dominated_by_p (CDI_DOMINATORS, e->src, pro))
991 ei_next (&ei);
992 continue;
995 new_bb->count += RDIV (e->src->count * e->probability, REG_BR_PROB_BASE);
996 new_bb->frequency += EDGE_FREQUENCY (e);
998 redirect_edge_and_branch_force (e, new_bb);
999 if (dump_file)
1000 fprintf (dump_file, "Redirected edge from %d\n", e->src->index);
1003 *entry_edge = make_single_succ_edge (new_bb, pro, EDGE_FALLTHRU);
1004 force_nonfallthru (*entry_edge);
1006 BITMAP_FREE (bb_with);
1007 free_dominance_info (CDI_DOMINATORS);