* config/rx/rx.c (ADD_RX_BUILTIN0): New macro, used for builtins
[official-gcc.git] / gcc / tree-ssa-threadedge.c
blobc3e7bd0d1388745ee10bf5c00afcc4befe235b6f
1 /* SSA Jump Threading
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "flags.h"
27 #include "tm_p.h"
28 #include "basic-block.h"
29 #include "cfgloop.h"
30 #include "function.h"
31 #include "timevar.h"
32 #include "dumpfile.h"
33 #include "gimple.h"
34 #include "gimple-ssa.h"
35 #include "tree-cfg.h"
36 #include "tree-phinodes.h"
37 #include "ssa-iterators.h"
38 #include "tree-ssanames.h"
39 #include "tree-ssa-propagate.h"
40 #include "tree-ssa-threadupdate.h"
41 #include "langhooks.h"
42 #include "params.h"
43 #include "tree-ssa-threadedge.h"
45 /* To avoid code explosion due to jump threading, we limit the
46 number of statements we are going to copy. This variable
47 holds the number of statements currently seen that we'll have
48 to copy as part of the jump threading process. */
49 static int stmt_count;
51 /* Array to record value-handles per SSA_NAME. */
52 vec<tree> ssa_name_values;
54 /* Set the value for the SSA name NAME to VALUE. */
56 void
57 set_ssa_name_value (tree name, tree value)
59 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
60 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
61 ssa_name_values[SSA_NAME_VERSION (name)] = value;
64 /* Initialize the per SSA_NAME value-handles array. Returns it. */
65 void
66 threadedge_initialize_values (void)
68 gcc_assert (!ssa_name_values.exists ());
69 ssa_name_values.create (num_ssa_names);
72 /* Free the per SSA_NAME value-handle array. */
73 void
74 threadedge_finalize_values (void)
76 ssa_name_values.release ();
79 /* Return TRUE if we may be able to thread an incoming edge into
80 BB to an outgoing edge from BB. Return FALSE otherwise. */
82 bool
83 potentially_threadable_block (basic_block bb)
85 gimple_stmt_iterator gsi;
87 /* If BB has a single successor or a single predecessor, then
88 there is no threading opportunity. */
89 if (single_succ_p (bb) || single_pred_p (bb))
90 return false;
92 /* If BB does not end with a conditional, switch or computed goto,
93 then there is no threading opportunity. */
94 gsi = gsi_last_bb (bb);
95 if (gsi_end_p (gsi)
96 || ! gsi_stmt (gsi)
97 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
98 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
99 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
100 return false;
102 return true;
105 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
106 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
107 BB. If no such ASSERT_EXPR is found, return OP. */
109 static tree
110 lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt)
112 imm_use_iterator imm_iter;
113 gimple use_stmt;
114 use_operand_p use_p;
116 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
118 use_stmt = USE_STMT (use_p);
119 if (use_stmt != stmt
120 && gimple_assign_single_p (use_stmt)
121 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
122 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
123 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
125 return gimple_assign_lhs (use_stmt);
128 return op;
131 /* We record temporary equivalences created by PHI nodes or
132 statements within the target block. Doing so allows us to
133 identify more jump threading opportunities, even in blocks
134 with side effects.
136 We keep track of those temporary equivalences in a stack
137 structure so that we can unwind them when we're done processing
138 a particular edge. This routine handles unwinding the data
139 structures. */
141 static void
142 remove_temporary_equivalences (vec<tree> *stack)
144 while (stack->length () > 0)
146 tree prev_value, dest;
148 dest = stack->pop ();
150 /* A NULL value indicates we should stop unwinding, otherwise
151 pop off the next entry as they're recorded in pairs. */
152 if (dest == NULL)
153 break;
155 prev_value = stack->pop ();
156 set_ssa_name_value (dest, prev_value);
160 /* Record a temporary equivalence, saving enough information so that
161 we can restore the state of recorded equivalences when we're
162 done processing the current edge. */
164 static void
165 record_temporary_equivalence (tree x, tree y, vec<tree> *stack)
167 tree prev_x = SSA_NAME_VALUE (x);
169 if (TREE_CODE (y) == SSA_NAME)
171 tree tmp = SSA_NAME_VALUE (y);
172 y = tmp ? tmp : y;
175 set_ssa_name_value (x, y);
176 stack->reserve (2);
177 stack->quick_push (prev_x);
178 stack->quick_push (x);
181 /* Record temporary equivalences created by PHIs at the target of the
182 edge E. Record unwind information for the equivalences onto STACK.
184 If a PHI which prevents threading is encountered, then return FALSE
185 indicating we should not thread this edge, else return TRUE. */
187 static bool
188 record_temporary_equivalences_from_phis (edge e, vec<tree> *stack)
190 gimple_stmt_iterator gsi;
192 /* Each PHI creates a temporary equivalence, record them.
193 These are context sensitive equivalences and will be removed
194 later. */
195 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
197 gimple phi = gsi_stmt (gsi);
198 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
199 tree dst = gimple_phi_result (phi);
201 /* If the desired argument is not the same as this PHI's result
202 and it is set by a PHI in E->dest, then we can not thread
203 through E->dest. */
204 if (src != dst
205 && TREE_CODE (src) == SSA_NAME
206 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
207 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
208 return false;
210 /* We consider any non-virtual PHI as a statement since it
211 count result in a constant assignment or copy operation. */
212 if (!virtual_operand_p (dst))
213 stmt_count++;
215 record_temporary_equivalence (dst, src, stack);
217 return true;
220 /* Fold the RHS of an assignment statement and return it as a tree.
221 May return NULL_TREE if no simplification is possible. */
223 static tree
224 fold_assignment_stmt (gimple stmt)
226 enum tree_code subcode = gimple_assign_rhs_code (stmt);
228 switch (get_gimple_rhs_class (subcode))
230 case GIMPLE_SINGLE_RHS:
231 return fold (gimple_assign_rhs1 (stmt));
233 case GIMPLE_UNARY_RHS:
235 tree lhs = gimple_assign_lhs (stmt);
236 tree op0 = gimple_assign_rhs1 (stmt);
237 return fold_unary (subcode, TREE_TYPE (lhs), op0);
240 case GIMPLE_BINARY_RHS:
242 tree lhs = gimple_assign_lhs (stmt);
243 tree op0 = gimple_assign_rhs1 (stmt);
244 tree op1 = gimple_assign_rhs2 (stmt);
245 return fold_binary (subcode, TREE_TYPE (lhs), op0, op1);
248 case GIMPLE_TERNARY_RHS:
250 tree lhs = gimple_assign_lhs (stmt);
251 tree op0 = gimple_assign_rhs1 (stmt);
252 tree op1 = gimple_assign_rhs2 (stmt);
253 tree op2 = gimple_assign_rhs3 (stmt);
255 /* Sadly, we have to handle conditional assignments specially
256 here, because fold expects all the operands of an expression
257 to be folded before the expression itself is folded, but we
258 can't just substitute the folded condition here. */
259 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
260 op0 = fold (op0);
262 return fold_ternary (subcode, TREE_TYPE (lhs), op0, op1, op2);
265 default:
266 gcc_unreachable ();
270 /* Try to simplify each statement in E->dest, ultimately leading to
271 a simplification of the COND_EXPR at the end of E->dest.
273 Record unwind information for temporary equivalences onto STACK.
275 Use SIMPLIFY (a pointer to a callback function) to further simplify
276 statements using pass specific information.
278 We might consider marking just those statements which ultimately
279 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
280 would be recovered by trying to simplify fewer statements.
282 If we are able to simplify a statement into the form
283 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
284 a context sensitive equivalence which may help us simplify
285 later statements in E->dest. */
287 static gimple
288 record_temporary_equivalences_from_stmts_at_dest (edge e,
289 vec<tree> *stack,
290 tree (*simplify) (gimple,
291 gimple))
293 gimple stmt = NULL;
294 gimple_stmt_iterator gsi;
295 int max_stmt_count;
297 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS);
299 /* Walk through each statement in the block recording equivalences
300 we discover. Note any equivalences we discover are context
301 sensitive (ie, are dependent on traversing E) and must be unwound
302 when we're finished processing E. */
303 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
305 tree cached_lhs = NULL;
307 stmt = gsi_stmt (gsi);
309 /* Ignore empty statements and labels. */
310 if (gimple_code (stmt) == GIMPLE_NOP
311 || gimple_code (stmt) == GIMPLE_LABEL
312 || is_gimple_debug (stmt))
313 continue;
315 /* If the statement has volatile operands, then we assume we
316 can not thread through this block. This is overly
317 conservative in some ways. */
318 if (gimple_code (stmt) == GIMPLE_ASM && gimple_asm_volatile_p (stmt))
319 return NULL;
321 /* If duplicating this block is going to cause too much code
322 expansion, then do not thread through this block. */
323 stmt_count++;
324 if (stmt_count > max_stmt_count)
325 return NULL;
327 /* If this is not a statement that sets an SSA_NAME to a new
328 value, then do not try to simplify this statement as it will
329 not simplify in any way that is helpful for jump threading. */
330 if ((gimple_code (stmt) != GIMPLE_ASSIGN
331 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
332 && (gimple_code (stmt) != GIMPLE_CALL
333 || gimple_call_lhs (stmt) == NULL_TREE
334 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
335 continue;
337 /* The result of __builtin_object_size depends on all the arguments
338 of a phi node. Temporarily using only one edge produces invalid
339 results. For example
341 if (x < 6)
342 goto l;
343 else
344 goto l;
347 r = PHI <&w[2].a[1](2), &a.a[6](3)>
348 __builtin_object_size (r, 0)
350 The result of __builtin_object_size is defined to be the maximum of
351 remaining bytes. If we use only one edge on the phi, the result will
352 change to be the remaining bytes for the corresponding phi argument.
354 Similarly for __builtin_constant_p:
356 r = PHI <1(2), 2(3)>
357 __builtin_constant_p (r)
359 Both PHI arguments are constant, but x ? 1 : 2 is still not
360 constant. */
362 if (is_gimple_call (stmt))
364 tree fndecl = gimple_call_fndecl (stmt);
365 if (fndecl
366 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
367 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
368 continue;
371 /* At this point we have a statement which assigns an RHS to an
372 SSA_VAR on the LHS. We want to try and simplify this statement
373 to expose more context sensitive equivalences which in turn may
374 allow us to simplify the condition at the end of the loop.
376 Handle simple copy operations as well as implied copies from
377 ASSERT_EXPRs. */
378 if (gimple_assign_single_p (stmt)
379 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
380 cached_lhs = gimple_assign_rhs1 (stmt);
381 else if (gimple_assign_single_p (stmt)
382 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
383 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
384 else
386 /* A statement that is not a trivial copy or ASSERT_EXPR.
387 We're going to temporarily copy propagate the operands
388 and see if that allows us to simplify this statement. */
389 tree *copy;
390 ssa_op_iter iter;
391 use_operand_p use_p;
392 unsigned int num, i = 0;
394 num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE));
395 copy = XCNEWVEC (tree, num);
397 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
398 the operands. */
399 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
401 tree tmp = NULL;
402 tree use = USE_FROM_PTR (use_p);
404 copy[i++] = use;
405 if (TREE_CODE (use) == SSA_NAME)
406 tmp = SSA_NAME_VALUE (use);
407 if (tmp)
408 SET_USE (use_p, tmp);
411 /* Try to fold/lookup the new expression. Inserting the
412 expression into the hash table is unlikely to help. */
413 if (is_gimple_call (stmt))
414 cached_lhs = fold_call_stmt (stmt, false);
415 else
416 cached_lhs = fold_assignment_stmt (stmt);
418 if (!cached_lhs
419 || (TREE_CODE (cached_lhs) != SSA_NAME
420 && !is_gimple_min_invariant (cached_lhs)))
421 cached_lhs = (*simplify) (stmt, stmt);
423 /* Restore the statement's original uses/defs. */
424 i = 0;
425 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE)
426 SET_USE (use_p, copy[i++]);
428 free (copy);
431 /* Record the context sensitive equivalence if we were able
432 to simplify this statement. */
433 if (cached_lhs
434 && (TREE_CODE (cached_lhs) == SSA_NAME
435 || is_gimple_min_invariant (cached_lhs)))
436 record_temporary_equivalence (gimple_get_lhs (stmt), cached_lhs, stack);
438 return stmt;
441 /* Simplify the control statement at the end of the block E->dest.
443 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND
444 is available to use/clobber in DUMMY_COND.
446 Use SIMPLIFY (a pointer to a callback function) to further simplify
447 a condition using pass specific information.
449 Return the simplified condition or NULL if simplification could
450 not be performed. */
452 static tree
453 simplify_control_stmt_condition (edge e,
454 gimple stmt,
455 gimple dummy_cond,
456 tree (*simplify) (gimple, gimple),
457 bool handle_dominating_asserts)
459 tree cond, cached_lhs;
460 enum gimple_code code = gimple_code (stmt);
462 /* For comparisons, we have to update both operands, then try
463 to simplify the comparison. */
464 if (code == GIMPLE_COND)
466 tree op0, op1;
467 enum tree_code cond_code;
469 op0 = gimple_cond_lhs (stmt);
470 op1 = gimple_cond_rhs (stmt);
471 cond_code = gimple_cond_code (stmt);
473 /* Get the current value of both operands. */
474 if (TREE_CODE (op0) == SSA_NAME)
476 tree tmp = SSA_NAME_VALUE (op0);
477 if (tmp)
478 op0 = tmp;
481 if (TREE_CODE (op1) == SSA_NAME)
483 tree tmp = SSA_NAME_VALUE (op1);
484 if (tmp)
485 op1 = tmp;
488 if (handle_dominating_asserts)
490 /* Now see if the operand was consumed by an ASSERT_EXPR
491 which dominates E->src. If so, we want to replace the
492 operand with the LHS of the ASSERT_EXPR. */
493 if (TREE_CODE (op0) == SSA_NAME)
494 op0 = lhs_of_dominating_assert (op0, e->src, stmt);
496 if (TREE_CODE (op1) == SSA_NAME)
497 op1 = lhs_of_dominating_assert (op1, e->src, stmt);
500 /* We may need to canonicalize the comparison. For
501 example, op0 might be a constant while op1 is an
502 SSA_NAME. Failure to canonicalize will cause us to
503 miss threading opportunities. */
504 if (tree_swap_operands_p (op0, op1, false))
506 tree tmp;
507 cond_code = swap_tree_comparison (cond_code);
508 tmp = op0;
509 op0 = op1;
510 op1 = tmp;
513 /* Stuff the operator and operands into our dummy conditional
514 expression. */
515 gimple_cond_set_code (dummy_cond, cond_code);
516 gimple_cond_set_lhs (dummy_cond, op0);
517 gimple_cond_set_rhs (dummy_cond, op1);
519 /* We absolutely do not care about any type conversions
520 we only care about a zero/nonzero value. */
521 fold_defer_overflow_warnings ();
523 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1);
524 if (cached_lhs)
525 while (CONVERT_EXPR_P (cached_lhs))
526 cached_lhs = TREE_OPERAND (cached_lhs, 0);
528 fold_undefer_overflow_warnings ((cached_lhs
529 && is_gimple_min_invariant (cached_lhs)),
530 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
532 /* If we have not simplified the condition down to an invariant,
533 then use the pass specific callback to simplify the condition. */
534 if (!cached_lhs
535 || !is_gimple_min_invariant (cached_lhs))
536 cached_lhs = (*simplify) (dummy_cond, stmt);
538 return cached_lhs;
541 if (code == GIMPLE_SWITCH)
542 cond = gimple_switch_index (stmt);
543 else if (code == GIMPLE_GOTO)
544 cond = gimple_goto_dest (stmt);
545 else
546 gcc_unreachable ();
548 /* We can have conditionals which just test the state of a variable
549 rather than use a relational operator. These are simpler to handle. */
550 if (TREE_CODE (cond) == SSA_NAME)
552 cached_lhs = cond;
554 /* Get the variable's current value from the equivalence chains.
556 It is possible to get loops in the SSA_NAME_VALUE chains
557 (consider threading the backedge of a loop where we have
558 a loop invariant SSA_NAME used in the condition. */
559 if (cached_lhs
560 && TREE_CODE (cached_lhs) == SSA_NAME
561 && SSA_NAME_VALUE (cached_lhs))
562 cached_lhs = SSA_NAME_VALUE (cached_lhs);
564 /* If we're dominated by a suitable ASSERT_EXPR, then
565 update CACHED_LHS appropriately. */
566 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME)
567 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt);
569 /* If we haven't simplified to an invariant yet, then use the
570 pass specific callback to try and simplify it further. */
571 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
572 cached_lhs = (*simplify) (stmt, stmt);
574 else
575 cached_lhs = NULL;
577 return cached_lhs;
580 /* Return TRUE if the statement at the end of e->dest depends on
581 the output of any statement in BB. Otherwise return FALSE.
583 This is used when we are threading a backedge and need to ensure
584 that temporary equivalences from BB do not affect the condition
585 in e->dest. */
587 static bool
588 cond_arg_set_in_bb (edge e, basic_block bb)
590 ssa_op_iter iter;
591 use_operand_p use_p;
592 gimple last = last_stmt (e->dest);
594 /* E->dest does not have to end with a control transferring
595 instruction. This can occur when we try to extend a jump
596 threading opportunity deeper into the CFG. In that case
597 it is safe for this check to return false. */
598 if (!last)
599 return false;
601 if (gimple_code (last) != GIMPLE_COND
602 && gimple_code (last) != GIMPLE_GOTO
603 && gimple_code (last) != GIMPLE_SWITCH)
604 return false;
606 FOR_EACH_SSA_USE_OPERAND (use_p, last, iter, SSA_OP_USE | SSA_OP_VUSE)
608 tree use = USE_FROM_PTR (use_p);
610 if (TREE_CODE (use) == SSA_NAME
611 && gimple_code (SSA_NAME_DEF_STMT (use)) != GIMPLE_PHI
612 && gimple_bb (SSA_NAME_DEF_STMT (use)) == bb)
613 return true;
615 return false;
618 /* Copy debug stmts from DEST's chain of single predecessors up to
619 SRC, so that we don't lose the bindings as PHI nodes are introduced
620 when DEST gains new predecessors. */
621 void
622 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
624 if (!MAY_HAVE_DEBUG_STMTS)
625 return;
627 if (!single_pred_p (dest))
628 return;
630 gcc_checking_assert (dest != src);
632 gimple_stmt_iterator gsi = gsi_after_labels (dest);
633 int i = 0;
634 const int alloc_count = 16; // ?? Should this be a PARAM?
636 /* Estimate the number of debug vars overridden in the beginning of
637 DEST, to tell how many we're going to need to begin with. */
638 for (gimple_stmt_iterator si = gsi;
639 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
641 gimple stmt = gsi_stmt (si);
642 if (!is_gimple_debug (stmt))
643 break;
644 i++;
647 stack_vec<tree, alloc_count> fewvars;
648 pointer_set_t *vars = NULL;
650 /* If we're already starting with 3/4 of alloc_count, go for a
651 pointer_set, otherwise start with an unordered stack-allocated
652 VEC. */
653 if (i * 4 > alloc_count * 3)
654 vars = pointer_set_create ();
656 /* Now go through the initial debug stmts in DEST again, this time
657 actually inserting in VARS or FEWVARS. Don't bother checking for
658 duplicates in FEWVARS. */
659 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
661 gimple stmt = gsi_stmt (si);
662 if (!is_gimple_debug (stmt))
663 break;
665 tree var;
667 if (gimple_debug_bind_p (stmt))
668 var = gimple_debug_bind_get_var (stmt);
669 else if (gimple_debug_source_bind_p (stmt))
670 var = gimple_debug_source_bind_get_var (stmt);
671 else
672 gcc_unreachable ();
674 if (vars)
675 pointer_set_insert (vars, var);
676 else
677 fewvars.quick_push (var);
680 basic_block bb = dest;
684 bb = single_pred (bb);
685 for (gimple_stmt_iterator si = gsi_last_bb (bb);
686 !gsi_end_p (si); gsi_prev (&si))
688 gimple stmt = gsi_stmt (si);
689 if (!is_gimple_debug (stmt))
690 continue;
692 tree var;
694 if (gimple_debug_bind_p (stmt))
695 var = gimple_debug_bind_get_var (stmt);
696 else if (gimple_debug_source_bind_p (stmt))
697 var = gimple_debug_source_bind_get_var (stmt);
698 else
699 gcc_unreachable ();
701 /* Discard debug bind overlaps. ??? Unlike stmts from src,
702 copied into a new block that will precede BB, debug bind
703 stmts in bypassed BBs may actually be discarded if
704 they're overwritten by subsequent debug bind stmts, which
705 might be a problem once we introduce stmt frontier notes
706 or somesuch. Adding `&& bb == src' to the condition
707 below will preserve all potentially relevant debug
708 notes. */
709 if (vars && pointer_set_insert (vars, var))
710 continue;
711 else if (!vars)
713 int i = fewvars.length ();
714 while (i--)
715 if (fewvars[i] == var)
716 break;
717 if (i >= 0)
718 continue;
720 if (fewvars.length () < (unsigned) alloc_count)
721 fewvars.quick_push (var);
722 else
724 vars = pointer_set_create ();
725 for (i = 0; i < alloc_count; i++)
726 pointer_set_insert (vars, fewvars[i]);
727 fewvars.release ();
728 pointer_set_insert (vars, var);
732 stmt = gimple_copy (stmt);
733 /* ??? Should we drop the location of the copy to denote
734 they're artificial bindings? */
735 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
738 while (bb != src && single_pred_p (bb));
740 if (vars)
741 pointer_set_destroy (vars);
742 else if (fewvars.exists ())
743 fewvars.release ();
746 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
747 need not be duplicated as part of the CFG/SSA updating process).
749 If it is threadable, add it to PATH and VISITED and recurse, ultimately
750 returning TRUE from the toplevel call. Otherwise do nothing and
751 return false.
753 DUMMY_COND, HANDLE_DOMINATING_ASSERTS and SIMPLIFY are used to
754 try and simplify the condition at the end of TAKEN_EDGE->dest. */
755 static bool
756 thread_around_empty_blocks (edge taken_edge,
757 gimple dummy_cond,
758 bool handle_dominating_asserts,
759 tree (*simplify) (gimple, gimple),
760 bitmap visited,
761 vec<jump_thread_edge *> *path)
763 basic_block bb = taken_edge->dest;
764 gimple_stmt_iterator gsi;
765 gimple stmt;
766 tree cond;
768 /* The key property of these blocks is that they need not be duplicated
769 when threading. Thus they can not have visible side effects such
770 as PHI nodes. */
771 if (!gsi_end_p (gsi_start_phis (bb)))
772 return false;
774 /* Skip over DEBUG statements at the start of the block. */
775 gsi = gsi_start_nondebug_bb (bb);
777 /* If the block has no statements, but does have a single successor, then
778 it's just a forwarding block and we can thread through it trivially.
780 However, note that just threading through empty blocks with single
781 successors is not inherently profitable. For the jump thread to
782 be profitable, we must avoid a runtime conditional.
784 By taking the return value from the recursive call, we get the
785 desired effect of returning TRUE when we found a profitable jump
786 threading opportunity and FALSE otherwise.
788 This is particularly important when this routine is called after
789 processing a joiner block. Returning TRUE too aggressively in
790 that case results in pointless duplication of the joiner block. */
791 if (gsi_end_p (gsi))
793 if (single_succ_p (bb))
795 taken_edge = single_succ_edge (bb);
796 if ((taken_edge->flags & EDGE_DFS_BACK) == 0
797 && !bitmap_bit_p (visited, taken_edge->dest->index))
799 jump_thread_edge *x
800 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
801 path->safe_push (x);
802 bitmap_set_bit (visited, taken_edge->dest->index);
803 return thread_around_empty_blocks (taken_edge,
804 dummy_cond,
805 handle_dominating_asserts,
806 simplify,
807 visited,
808 path);
812 /* We have a block with no statements, but multiple successors? */
813 return false;
816 /* The only real statements this block can have are a control
817 flow altering statement. Anything else stops the thread. */
818 stmt = gsi_stmt (gsi);
819 if (gimple_code (stmt) != GIMPLE_COND
820 && gimple_code (stmt) != GIMPLE_GOTO
821 && gimple_code (stmt) != GIMPLE_SWITCH)
822 return false;
824 /* Extract and simplify the condition. */
825 cond = simplify_control_stmt_condition (taken_edge, stmt, dummy_cond,
826 simplify, handle_dominating_asserts);
828 /* If the condition can be statically computed and we have not already
829 visited the destination edge, then add the taken edge to our thread
830 path. */
831 if (cond && is_gimple_min_invariant (cond))
833 taken_edge = find_taken_edge (bb, cond);
835 if (bitmap_bit_p (visited, taken_edge->dest->index))
836 return false;
837 bitmap_set_bit (visited, taken_edge->dest->index);
839 jump_thread_edge *x
840 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
841 path->safe_push (x);
843 thread_around_empty_blocks (taken_edge,
844 dummy_cond,
845 handle_dominating_asserts,
846 simplify,
847 visited,
848 path);
849 return true;
852 return false;
855 /* We are exiting E->src, see if E->dest ends with a conditional
856 jump which has a known value when reached via E.
858 E->dest can have arbitrary side effects which, if threading is
859 successful, will be maintained.
861 Special care is necessary if E is a back edge in the CFG as we
862 may have already recorded equivalences for E->dest into our
863 various tables, including the result of the conditional at
864 the end of E->dest. Threading opportunities are severely
865 limited in that case to avoid short-circuiting the loop
866 incorrectly.
868 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
869 to avoid allocating memory.
871 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
872 the simplified condition with left-hand sides of ASSERT_EXPRs they are
873 used in.
875 STACK is used to undo temporary equivalences created during the walk of
876 E->dest.
878 SIMPLIFY is a pass-specific function used to simplify statements.
880 Our caller is responsible for restoring the state of the expression
881 and const_and_copies stacks. */
883 static bool
884 thread_through_normal_block (edge e,
885 gimple dummy_cond,
886 bool handle_dominating_asserts,
887 vec<tree> *stack,
888 tree (*simplify) (gimple, gimple),
889 vec<jump_thread_edge *> *path,
890 bitmap visited)
892 /* If E is a backedge, then we want to verify that the COND_EXPR,
893 SWITCH_EXPR or GOTO_EXPR at the end of e->dest is not affected
894 by any statements in e->dest. If it is affected, then it is not
895 safe to thread this edge. */
896 if (e->flags & EDGE_DFS_BACK)
898 if (cond_arg_set_in_bb (e, e->dest))
899 return false;
902 /* PHIs create temporary equivalences. */
903 if (!record_temporary_equivalences_from_phis (e, stack))
904 return false;
906 /* Now walk each statement recording any context sensitive
907 temporary equivalences we can detect. */
908 gimple stmt
909 = record_temporary_equivalences_from_stmts_at_dest (e, stack, simplify);
910 if (!stmt)
911 return false;
913 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
914 will be taken. */
915 if (gimple_code (stmt) == GIMPLE_COND
916 || gimple_code (stmt) == GIMPLE_GOTO
917 || gimple_code (stmt) == GIMPLE_SWITCH)
919 tree cond;
921 /* Extract and simplify the condition. */
922 cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify,
923 handle_dominating_asserts);
925 if (cond && is_gimple_min_invariant (cond))
927 edge taken_edge = find_taken_edge (e->dest, cond);
928 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
930 /* DEST could be NULL for a computed jump to an absolute
931 address. */
932 if (dest == NULL || dest == e->dest || bitmap_bit_p (visited, dest->index))
933 return false;
935 jump_thread_edge *x
936 = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
937 path->safe_push (x);
939 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_BLOCK);
940 path->safe_push (x);
942 /* See if we can thread through DEST as well, this helps capture
943 secondary effects of threading without having to re-run DOM or
944 VRP. */
945 if ((e->flags & EDGE_DFS_BACK) == 0
946 || ! cond_arg_set_in_bb (taken_edge, e->dest))
948 /* We don't want to thread back to a block we have already
949 visited. This may be overly conservative. */
950 bitmap_set_bit (visited, dest->index);
951 bitmap_set_bit (visited, e->dest->index);
952 thread_around_empty_blocks (taken_edge,
953 dummy_cond,
954 handle_dominating_asserts,
955 simplify,
956 visited,
957 path);
959 return true;
962 return false;
965 /* We are exiting E->src, see if E->dest ends with a conditional
966 jump which has a known value when reached via E.
968 Special care is necessary if E is a back edge in the CFG as we
969 may have already recorded equivalences for E->dest into our
970 various tables, including the result of the conditional at
971 the end of E->dest. Threading opportunities are severely
972 limited in that case to avoid short-circuiting the loop
973 incorrectly.
975 Note it is quite common for the first block inside a loop to
976 end with a conditional which is either always true or always
977 false when reached via the loop backedge. Thus we do not want
978 to blindly disable threading across a loop backedge.
980 DUMMY_COND is a shared cond_expr used by condition simplification as scratch,
981 to avoid allocating memory.
983 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of
984 the simplified condition with left-hand sides of ASSERT_EXPRs they are
985 used in.
987 STACK is used to undo temporary equivalences created during the walk of
988 E->dest.
990 SIMPLIFY is a pass-specific function used to simplify statements. */
992 void
993 thread_across_edge (gimple dummy_cond,
994 edge e,
995 bool handle_dominating_asserts,
996 vec<tree> *stack,
997 tree (*simplify) (gimple, gimple))
999 bitmap visited = BITMAP_ALLOC (NULL);
1001 stmt_count = 0;
1003 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1004 bitmap_clear (visited);
1005 bitmap_set_bit (visited, e->src->index);
1006 bitmap_set_bit (visited, e->dest->index);
1007 if (thread_through_normal_block (e, dummy_cond, handle_dominating_asserts,
1008 stack, simplify, path, visited))
1010 propagate_threaded_block_debug_into (path->last ()->e->dest,
1011 e->dest);
1012 remove_temporary_equivalences (stack);
1013 BITMAP_FREE (visited);
1014 register_jump_thread (path);
1015 return;
1017 else
1019 /* There should be no edges on the path, so no need to walk through
1020 the vector entries. */
1021 gcc_assert (path->length () == 0);
1022 path->release ();
1025 /* We were unable to determine what out edge from E->dest is taken. However,
1026 we might still be able to thread through successors of E->dest. This
1027 often occurs when E->dest is a joiner block which then fans back out
1028 based on redundant tests.
1030 If so, we'll copy E->dest and redirect the appropriate predecessor to
1031 the copy. Within the copy of E->dest, we'll thread one or more edges
1032 to points deeper in the CFG.
1034 This is a stopgap until we have a more structured approach to path
1035 isolation. */
1037 edge taken_edge;
1038 edge_iterator ei;
1039 bool found;
1041 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1042 we can safely redirect any of the edges. Just punt those cases. */
1043 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1044 if (taken_edge->flags & EDGE_ABNORMAL)
1046 remove_temporary_equivalences (stack);
1047 BITMAP_FREE (visited);
1048 return;
1051 /* Look at each successor of E->dest to see if we can thread through it. */
1052 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1054 /* Avoid threading to any block we have already visited. */
1055 bitmap_clear (visited);
1056 bitmap_set_bit (visited, taken_edge->dest->index);
1057 bitmap_set_bit (visited, e->dest->index);
1058 vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
1060 /* Record whether or not we were able to thread through a successor
1061 of E->dest. */
1062 jump_thread_edge *x = new jump_thread_edge (e, EDGE_START_JUMP_THREAD);
1063 path->safe_push (x);
1065 x = new jump_thread_edge (taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1066 path->safe_push (x);
1067 found = false;
1068 if ((e->flags & EDGE_DFS_BACK) == 0
1069 || ! cond_arg_set_in_bb (path->last ()->e, e->dest))
1070 found = thread_around_empty_blocks (taken_edge,
1071 dummy_cond,
1072 handle_dominating_asserts,
1073 simplify,
1074 visited,
1075 path);
1077 /* If we were able to thread through a successor of E->dest, then
1078 record the jump threading opportunity. */
1079 if (found)
1081 propagate_threaded_block_debug_into (path->last ()->e->dest,
1082 taken_edge->dest);
1083 register_jump_thread (path);
1085 else
1087 for (unsigned int i = 0; i < path->length (); i++)
1088 delete (*path)[i];
1089 path->release();
1092 BITMAP_FREE (visited);
1095 remove_temporary_equivalences (stack);