Don't warn when alignment of global common data exceeds maximum alignment.
[official-gcc.git] / gcc / tree-ssa-threadedge.c
blob37ee5c11be373d7affe5a7e0a3f0fdbd3968a2bc
1 /* SSA Jump Threading
2 Copyright (C) 2005-2021 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "predict.h"
28 #include "ssa.h"
29 #include "fold-const.h"
30 #include "cfgloop.h"
31 #include "gimple-iterator.h"
32 #include "tree-cfg.h"
33 #include "tree-ssa-threadupdate.h"
34 #include "tree-ssa-scopedtables.h"
35 #include "tree-ssa-threadedge.h"
36 #include "tree-ssa-dom.h"
37 #include "gimple-fold.h"
38 #include "cfganal.h"
39 #include "alloc-pool.h"
40 #include "vr-values.h"
41 #include "gimple-ssa-evrp-analyze.h"
43 /* To avoid code explosion due to jump threading, we limit the
44 number of statements we are going to copy. This variable
45 holds the number of statements currently seen that we'll have
46 to copy as part of the jump threading process. */
47 static int stmt_count;
49 /* Array to record value-handles per SSA_NAME. */
50 vec<tree> ssa_name_values;
52 /* Set the value for the SSA name NAME to VALUE. */
54 void
55 set_ssa_name_value (tree name, tree value)
57 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
58 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1, true);
59 if (value && TREE_OVERFLOW_P (value))
60 value = drop_tree_overflow (value);
61 ssa_name_values[SSA_NAME_VERSION (name)] = value;
64 jump_threader::jump_threader (jump_threader_simplifier *simplifier,
65 jt_state *state)
67 /* Initialize the per SSA_NAME value-handles array. */
68 gcc_assert (!ssa_name_values.exists ());
69 ssa_name_values.create (num_ssa_names);
71 dummy_cond = gimple_build_cond (NE_EXPR, integer_zero_node,
72 integer_zero_node, NULL, NULL);
74 m_registry = new jump_thread_path_registry ();
75 m_simplifier = simplifier;
76 m_state = state;
79 jump_threader::~jump_threader (void)
81 ssa_name_values.release ();
82 ggc_free (dummy_cond);
83 delete m_registry;
86 void
87 jump_threader::remove_jump_threads_including (edge_def *e)
89 m_registry->remove_jump_threads_including (e);
92 bool
93 jump_threader::thread_through_all_blocks (bool may_peel_loop_headers)
95 return m_registry->thread_through_all_blocks (may_peel_loop_headers);
98 /* Return TRUE if we may be able to thread an incoming edge into
99 BB to an outgoing edge from BB. Return FALSE otherwise. */
101 static bool
102 potentially_threadable_block (basic_block bb)
104 gimple_stmt_iterator gsi;
106 /* Special case. We can get blocks that are forwarders, but are
107 not optimized away because they forward from outside a loop
108 to the loop header. We want to thread through them as we can
109 sometimes thread to the loop exit, which is obviously profitable.
110 the interesting case here is when the block has PHIs. */
111 if (gsi_end_p (gsi_start_nondebug_bb (bb))
112 && !gsi_end_p (gsi_start_phis (bb)))
113 return true;
115 /* If BB has a single successor or a single predecessor, then
116 there is no threading opportunity. */
117 if (single_succ_p (bb) || single_pred_p (bb))
118 return false;
120 /* If BB does not end with a conditional, switch or computed goto,
121 then there is no threading opportunity. */
122 gsi = gsi_last_bb (bb);
123 if (gsi_end_p (gsi)
124 || ! gsi_stmt (gsi)
125 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
126 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
127 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
128 return false;
130 return true;
133 /* Record temporary equivalences created by PHIs at the target of the
134 edge E.
136 If a PHI which prevents threading is encountered, then return FALSE
137 indicating we should not thread this edge, else return TRUE. */
139 bool
140 jump_threader::record_temporary_equivalences_from_phis (edge e)
142 gphi_iterator gsi;
144 /* Each PHI creates a temporary equivalence, record them.
145 These are context sensitive equivalences and will be removed
146 later. */
147 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
149 gphi *phi = gsi.phi ();
150 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
151 tree dst = gimple_phi_result (phi);
153 /* If the desired argument is not the same as this PHI's result
154 and it is set by a PHI in E->dest, then we cannot thread
155 through E->dest. */
156 if (src != dst
157 && TREE_CODE (src) == SSA_NAME
158 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
159 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
160 return false;
162 /* We consider any non-virtual PHI as a statement since it
163 count result in a constant assignment or copy operation. */
164 if (!virtual_operand_p (dst))
165 stmt_count++;
167 m_state->register_equiv (dst, src, /*update_range=*/true);
169 return true;
172 /* Valueize hook for gimple_fold_stmt_to_constant_1. */
174 static tree
175 threadedge_valueize (tree t)
177 if (TREE_CODE (t) == SSA_NAME)
179 tree tem = SSA_NAME_VALUE (t);
180 if (tem)
181 return tem;
183 return t;
186 /* Try to simplify each statement in E->dest, ultimately leading to
187 a simplification of the COND_EXPR at the end of E->dest.
189 Record unwind information for temporary equivalences onto STACK.
191 Uses M_SIMPLIFIER to further simplify statements using pass specific
192 information.
194 We might consider marking just those statements which ultimately
195 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
196 would be recovered by trying to simplify fewer statements.
198 If we are able to simplify a statement into the form
199 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
200 a context sensitive equivalence which may help us simplify
201 later statements in E->dest. */
203 gimple *
204 jump_threader::record_temporary_equivalences_from_stmts_at_dest (edge e)
206 gimple *stmt = NULL;
207 gimple_stmt_iterator gsi;
208 int max_stmt_count;
210 max_stmt_count = param_max_jump_thread_duplication_stmts;
212 /* Walk through each statement in the block recording equivalences
213 we discover. Note any equivalences we discover are context
214 sensitive (ie, are dependent on traversing E) and must be unwound
215 when we're finished processing E. */
216 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
218 tree cached_lhs = NULL;
220 stmt = gsi_stmt (gsi);
222 /* Ignore empty statements and labels. */
223 if (gimple_code (stmt) == GIMPLE_NOP
224 || gimple_code (stmt) == GIMPLE_LABEL
225 || is_gimple_debug (stmt))
226 continue;
228 /* If the statement has volatile operands, then we assume we
229 cannot thread through this block. This is overly
230 conservative in some ways. */
231 if (gimple_code (stmt) == GIMPLE_ASM
232 && gimple_asm_volatile_p (as_a <gasm *> (stmt)))
233 return NULL;
235 /* If the statement is a unique builtin, we cannot thread
236 through here. */
237 if (gimple_code (stmt) == GIMPLE_CALL
238 && gimple_call_internal_p (stmt)
239 && gimple_call_internal_unique_p (stmt))
240 return NULL;
242 /* We cannot thread through __builtin_constant_p, because an
243 expression that is constant on two threading paths may become
244 non-constant (i.e.: phi) when they merge. */
245 if (gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P))
246 return NULL;
248 /* If duplicating this block is going to cause too much code
249 expansion, then do not thread through this block. */
250 stmt_count++;
251 if (stmt_count > max_stmt_count)
253 /* If any of the stmts in the PATH's dests are going to be
254 killed due to threading, grow the max count
255 accordingly. */
256 if (max_stmt_count
257 == param_max_jump_thread_duplication_stmts)
259 max_stmt_count += estimate_threading_killed_stmts (e->dest);
260 if (dump_file)
261 fprintf (dump_file, "threading bb %i up to %i stmts\n",
262 e->dest->index, max_stmt_count);
264 /* If we're still past the limit, we're done. */
265 if (stmt_count > max_stmt_count)
266 return NULL;
269 m_state->record_ranges_from_stmt (stmt, true);
271 /* If this is not a statement that sets an SSA_NAME to a new
272 value, then do not try to simplify this statement as it will
273 not simplify in any way that is helpful for jump threading. */
274 if ((gimple_code (stmt) != GIMPLE_ASSIGN
275 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
276 && (gimple_code (stmt) != GIMPLE_CALL
277 || gimple_call_lhs (stmt) == NULL_TREE
278 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
279 continue;
281 /* The result of __builtin_object_size depends on all the arguments
282 of a phi node. Temporarily using only one edge produces invalid
283 results. For example
285 if (x < 6)
286 goto l;
287 else
288 goto l;
291 r = PHI <&w[2].a[1](2), &a.a[6](3)>
292 __builtin_object_size (r, 0)
294 The result of __builtin_object_size is defined to be the maximum of
295 remaining bytes. If we use only one edge on the phi, the result will
296 change to be the remaining bytes for the corresponding phi argument.
298 Similarly for __builtin_constant_p:
300 r = PHI <1(2), 2(3)>
301 __builtin_constant_p (r)
303 Both PHI arguments are constant, but x ? 1 : 2 is still not
304 constant. */
306 if (is_gimple_call (stmt))
308 tree fndecl = gimple_call_fndecl (stmt);
309 if (fndecl
310 && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
311 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
312 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
313 continue;
316 /* At this point we have a statement which assigns an RHS to an
317 SSA_VAR on the LHS. We want to try and simplify this statement
318 to expose more context sensitive equivalences which in turn may
319 allow us to simplify the condition at the end of the loop.
321 Handle simple copy operations as well as implied copies from
322 ASSERT_EXPRs. */
323 if (gimple_assign_single_p (stmt)
324 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
325 cached_lhs = gimple_assign_rhs1 (stmt);
326 else if (gimple_assign_single_p (stmt)
327 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
328 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
329 else
331 /* A statement that is not a trivial copy or ASSERT_EXPR.
332 Try to fold the new expression. Inserting the
333 expression into the hash table is unlikely to help. */
334 /* ??? The DOM callback below can be changed to setting
335 the mprts_hook around the call to thread_across_edge,
336 avoiding the use substitution. The VRP hook should be
337 changed to properly valueize operands itself using
338 SSA_NAME_VALUE in addition to its own lattice. */
339 cached_lhs = gimple_fold_stmt_to_constant_1 (stmt,
340 threadedge_valueize);
341 if (NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES) != 0
342 && (!cached_lhs
343 || (TREE_CODE (cached_lhs) != SSA_NAME
344 && !is_gimple_min_invariant (cached_lhs))))
346 /* We're going to temporarily copy propagate the operands
347 and see if that allows us to simplify this statement. */
348 tree *copy;
349 ssa_op_iter iter;
350 use_operand_p use_p;
351 unsigned int num, i = 0;
353 num = NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES);
354 copy = XALLOCAVEC (tree, num);
356 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
357 the operands. */
358 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
360 tree tmp = NULL;
361 tree use = USE_FROM_PTR (use_p);
363 copy[i++] = use;
364 if (TREE_CODE (use) == SSA_NAME)
365 tmp = SSA_NAME_VALUE (use);
366 if (tmp)
367 SET_USE (use_p, tmp);
370 cached_lhs = m_simplifier->simplify (stmt, stmt, e->src, m_state);
372 /* Restore the statement's original uses/defs. */
373 i = 0;
374 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
375 SET_USE (use_p, copy[i++]);
379 /* Record the context sensitive equivalence if we were able
380 to simplify this statement. */
381 if (cached_lhs
382 && (TREE_CODE (cached_lhs) == SSA_NAME
383 || is_gimple_min_invariant (cached_lhs)))
384 m_state->register_equiv (gimple_get_lhs (stmt), cached_lhs);
386 return stmt;
389 /* Simplify the control statement at the end of the block E->dest.
391 Use SIMPLIFY (a pointer to a callback function) to further simplify
392 a condition using pass specific information.
394 Return the simplified condition or NULL if simplification could
395 not be performed. When simplifying a GIMPLE_SWITCH, we may return
396 the CASE_LABEL_EXPR that will be taken. */
398 tree
399 jump_threader::simplify_control_stmt_condition (edge e, gimple *stmt)
401 tree cond, cached_lhs;
402 enum gimple_code code = gimple_code (stmt);
404 /* For comparisons, we have to update both operands, then try
405 to simplify the comparison. */
406 if (code == GIMPLE_COND)
408 tree op0, op1;
409 enum tree_code cond_code;
411 op0 = gimple_cond_lhs (stmt);
412 op1 = gimple_cond_rhs (stmt);
413 cond_code = gimple_cond_code (stmt);
415 /* Get the current value of both operands. */
416 if (TREE_CODE (op0) == SSA_NAME)
418 for (int i = 0; i < 2; i++)
420 if (TREE_CODE (op0) == SSA_NAME
421 && SSA_NAME_VALUE (op0))
422 op0 = SSA_NAME_VALUE (op0);
423 else
424 break;
428 if (TREE_CODE (op1) == SSA_NAME)
430 for (int i = 0; i < 2; i++)
432 if (TREE_CODE (op1) == SSA_NAME
433 && SSA_NAME_VALUE (op1))
434 op1 = SSA_NAME_VALUE (op1);
435 else
436 break;
440 const unsigned recursion_limit = 4;
442 cached_lhs
443 = simplify_control_stmt_condition_1 (e, stmt, op0, cond_code, op1,
444 recursion_limit);
446 /* If we were testing an integer/pointer against a constant, then
447 we can use the FSM code to trace the value of the SSA_NAME. If
448 a value is found, then the condition will collapse to a constant.
450 Return the SSA_NAME we want to trace back rather than the full
451 expression and give the FSM threader a chance to find its value. */
452 if (cached_lhs == NULL)
454 /* Recover the original operands. They may have been simplified
455 using context sensitive equivalences. Those context sensitive
456 equivalences may not be valid on paths found by the FSM optimizer. */
457 tree op0 = gimple_cond_lhs (stmt);
458 tree op1 = gimple_cond_rhs (stmt);
460 if ((INTEGRAL_TYPE_P (TREE_TYPE (op0))
461 || POINTER_TYPE_P (TREE_TYPE (op0)))
462 && TREE_CODE (op0) == SSA_NAME
463 && TREE_CODE (op1) == INTEGER_CST)
464 return op0;
467 return cached_lhs;
470 if (code == GIMPLE_SWITCH)
471 cond = gimple_switch_index (as_a <gswitch *> (stmt));
472 else if (code == GIMPLE_GOTO)
473 cond = gimple_goto_dest (stmt);
474 else
475 gcc_unreachable ();
477 /* We can have conditionals which just test the state of a variable
478 rather than use a relational operator. These are simpler to handle. */
479 if (TREE_CODE (cond) == SSA_NAME)
481 tree original_lhs = cond;
482 cached_lhs = cond;
484 /* Get the variable's current value from the equivalence chains.
486 It is possible to get loops in the SSA_NAME_VALUE chains
487 (consider threading the backedge of a loop where we have
488 a loop invariant SSA_NAME used in the condition). */
489 if (cached_lhs)
491 for (int i = 0; i < 2; i++)
493 if (TREE_CODE (cached_lhs) == SSA_NAME
494 && SSA_NAME_VALUE (cached_lhs))
495 cached_lhs = SSA_NAME_VALUE (cached_lhs);
496 else
497 break;
501 /* If we haven't simplified to an invariant yet, then use the
502 pass specific callback to try and simplify it further. */
503 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
505 if (code == GIMPLE_SWITCH)
507 /* Replace the index operand of the GIMPLE_SWITCH with any LHS
508 we found before handing off to VRP. If simplification is
509 possible, the simplified value will be a CASE_LABEL_EXPR of
510 the label that is proven to be taken. */
511 gswitch *dummy_switch = as_a<gswitch *> (gimple_copy (stmt));
512 gimple_switch_set_index (dummy_switch, cached_lhs);
513 cached_lhs = m_simplifier->simplify (dummy_switch, stmt, e->src,
514 m_state);
515 ggc_free (dummy_switch);
517 else
518 cached_lhs = m_simplifier->simplify (stmt, stmt, e->src, m_state);
521 /* We couldn't find an invariant. But, callers of this
522 function may be able to do something useful with the
523 unmodified destination. */
524 if (!cached_lhs)
525 cached_lhs = original_lhs;
527 else
528 cached_lhs = NULL;
530 return cached_lhs;
533 /* Recursive helper for simplify_control_stmt_condition. */
535 tree
536 jump_threader::simplify_control_stmt_condition_1
537 (edge e,
538 gimple *stmt,
539 tree op0,
540 enum tree_code cond_code,
541 tree op1,
542 unsigned limit)
544 if (limit == 0)
545 return NULL_TREE;
547 /* We may need to canonicalize the comparison. For
548 example, op0 might be a constant while op1 is an
549 SSA_NAME. Failure to canonicalize will cause us to
550 miss threading opportunities. */
551 if (tree_swap_operands_p (op0, op1))
553 cond_code = swap_tree_comparison (cond_code);
554 std::swap (op0, op1);
557 /* If the condition has the form (A & B) CMP 0 or (A | B) CMP 0 then
558 recurse into the LHS to see if there is a dominating ASSERT_EXPR
559 of A or of B that makes this condition always true or always false
560 along the edge E. */
561 if ((cond_code == EQ_EXPR || cond_code == NE_EXPR)
562 && TREE_CODE (op0) == SSA_NAME
563 && integer_zerop (op1))
565 gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
566 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
568 else if (gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR
569 || gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)
571 enum tree_code rhs_code = gimple_assign_rhs_code (def_stmt);
572 const tree rhs1 = gimple_assign_rhs1 (def_stmt);
573 const tree rhs2 = gimple_assign_rhs2 (def_stmt);
575 /* Is A != 0 ? */
576 const tree res1
577 = simplify_control_stmt_condition_1 (e, def_stmt,
578 rhs1, NE_EXPR, op1,
579 limit - 1);
580 if (res1 == NULL_TREE)
582 else if (rhs_code == BIT_AND_EXPR && integer_zerop (res1))
584 /* If A == 0 then (A & B) != 0 is always false. */
585 if (cond_code == NE_EXPR)
586 return boolean_false_node;
587 /* If A == 0 then (A & B) == 0 is always true. */
588 if (cond_code == EQ_EXPR)
589 return boolean_true_node;
591 else if (rhs_code == BIT_IOR_EXPR && integer_nonzerop (res1))
593 /* If A != 0 then (A | B) != 0 is always true. */
594 if (cond_code == NE_EXPR)
595 return boolean_true_node;
596 /* If A != 0 then (A | B) == 0 is always false. */
597 if (cond_code == EQ_EXPR)
598 return boolean_false_node;
601 /* Is B != 0 ? */
602 const tree res2
603 = simplify_control_stmt_condition_1 (e, def_stmt,
604 rhs2, NE_EXPR, op1,
605 limit - 1);
606 if (res2 == NULL_TREE)
608 else if (rhs_code == BIT_AND_EXPR && integer_zerop (res2))
610 /* If B == 0 then (A & B) != 0 is always false. */
611 if (cond_code == NE_EXPR)
612 return boolean_false_node;
613 /* If B == 0 then (A & B) == 0 is always true. */
614 if (cond_code == EQ_EXPR)
615 return boolean_true_node;
617 else if (rhs_code == BIT_IOR_EXPR && integer_nonzerop (res2))
619 /* If B != 0 then (A | B) != 0 is always true. */
620 if (cond_code == NE_EXPR)
621 return boolean_true_node;
622 /* If B != 0 then (A | B) == 0 is always false. */
623 if (cond_code == EQ_EXPR)
624 return boolean_false_node;
627 if (res1 != NULL_TREE && res2 != NULL_TREE)
629 if (rhs_code == BIT_AND_EXPR
630 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
631 && integer_nonzerop (res1)
632 && integer_nonzerop (res2))
634 /* If A != 0 and B != 0 then (bool)(A & B) != 0 is true. */
635 if (cond_code == NE_EXPR)
636 return boolean_true_node;
637 /* If A != 0 and B != 0 then (bool)(A & B) == 0 is false. */
638 if (cond_code == EQ_EXPR)
639 return boolean_false_node;
642 if (rhs_code == BIT_IOR_EXPR
643 && integer_zerop (res1)
644 && integer_zerop (res2))
646 /* If A == 0 and B == 0 then (A | B) != 0 is false. */
647 if (cond_code == NE_EXPR)
648 return boolean_false_node;
649 /* If A == 0 and B == 0 then (A | B) == 0 is true. */
650 if (cond_code == EQ_EXPR)
651 return boolean_true_node;
655 /* Handle (A CMP B) CMP 0. */
656 else if (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
657 == tcc_comparison)
659 tree rhs1 = gimple_assign_rhs1 (def_stmt);
660 tree rhs2 = gimple_assign_rhs2 (def_stmt);
662 tree_code new_cond = gimple_assign_rhs_code (def_stmt);
663 if (cond_code == EQ_EXPR)
664 new_cond = invert_tree_comparison (new_cond, false);
666 tree res
667 = simplify_control_stmt_condition_1 (e, def_stmt,
668 rhs1, new_cond, rhs2,
669 limit - 1);
670 if (res != NULL_TREE && is_gimple_min_invariant (res))
671 return res;
675 gimple_cond_set_code (dummy_cond, cond_code);
676 gimple_cond_set_lhs (dummy_cond, op0);
677 gimple_cond_set_rhs (dummy_cond, op1);
679 /* We absolutely do not care about any type conversions
680 we only care about a zero/nonzero value. */
681 fold_defer_overflow_warnings ();
683 tree res = fold_binary (cond_code, boolean_type_node, op0, op1);
684 if (res)
685 while (CONVERT_EXPR_P (res))
686 res = TREE_OPERAND (res, 0);
688 fold_undefer_overflow_warnings ((res && is_gimple_min_invariant (res)),
689 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
691 /* If we have not simplified the condition down to an invariant,
692 then use the pass specific callback to simplify the condition. */
693 if (!res
694 || !is_gimple_min_invariant (res))
695 res = m_simplifier->simplify (dummy_cond, stmt, e->src, m_state);
697 return res;
700 /* Copy debug stmts from DEST's chain of single predecessors up to
701 SRC, so that we don't lose the bindings as PHI nodes are introduced
702 when DEST gains new predecessors. */
703 void
704 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
706 if (!MAY_HAVE_DEBUG_BIND_STMTS)
707 return;
709 if (!single_pred_p (dest))
710 return;
712 gcc_checking_assert (dest != src);
714 gimple_stmt_iterator gsi = gsi_after_labels (dest);
715 int i = 0;
716 const int alloc_count = 16; // ?? Should this be a PARAM?
718 /* Estimate the number of debug vars overridden in the beginning of
719 DEST, to tell how many we're going to need to begin with. */
720 for (gimple_stmt_iterator si = gsi;
721 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
723 gimple *stmt = gsi_stmt (si);
724 if (!is_gimple_debug (stmt))
725 break;
726 if (gimple_debug_nonbind_marker_p (stmt))
727 continue;
728 i++;
731 auto_vec<tree, alloc_count> fewvars;
732 hash_set<tree> *vars = NULL;
734 /* If we're already starting with 3/4 of alloc_count, go for a
735 hash_set, otherwise start with an unordered stack-allocated
736 VEC. */
737 if (i * 4 > alloc_count * 3)
738 vars = new hash_set<tree>;
740 /* Now go through the initial debug stmts in DEST again, this time
741 actually inserting in VARS or FEWVARS. Don't bother checking for
742 duplicates in FEWVARS. */
743 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
745 gimple *stmt = gsi_stmt (si);
746 if (!is_gimple_debug (stmt))
747 break;
749 tree var;
751 if (gimple_debug_bind_p (stmt))
752 var = gimple_debug_bind_get_var (stmt);
753 else if (gimple_debug_source_bind_p (stmt))
754 var = gimple_debug_source_bind_get_var (stmt);
755 else if (gimple_debug_nonbind_marker_p (stmt))
756 continue;
757 else
758 gcc_unreachable ();
760 if (vars)
761 vars->add (var);
762 else
763 fewvars.quick_push (var);
766 basic_block bb = dest;
770 bb = single_pred (bb);
771 for (gimple_stmt_iterator si = gsi_last_bb (bb);
772 !gsi_end_p (si); gsi_prev (&si))
774 gimple *stmt = gsi_stmt (si);
775 if (!is_gimple_debug (stmt))
776 continue;
778 tree var;
780 if (gimple_debug_bind_p (stmt))
781 var = gimple_debug_bind_get_var (stmt);
782 else if (gimple_debug_source_bind_p (stmt))
783 var = gimple_debug_source_bind_get_var (stmt);
784 else if (gimple_debug_nonbind_marker_p (stmt))
785 continue;
786 else
787 gcc_unreachable ();
789 /* Discard debug bind overlaps. Unlike stmts from src,
790 copied into a new block that will precede BB, debug bind
791 stmts in bypassed BBs may actually be discarded if
792 they're overwritten by subsequent debug bind stmts. We
793 want to copy binds for all modified variables, so that we
794 retain a bind to the shared def if there is one, or to a
795 newly introduced PHI node if there is one. Our bind will
796 end up reset if the value is dead, but that implies the
797 variable couldn't have survived, so it's fine. We are
798 not actually running the code that performed the binds at
799 this point, we're just adding binds so that they survive
800 the new confluence, so markers should not be copied. */
801 if (vars && vars->add (var))
802 continue;
803 else if (!vars)
805 int i = fewvars.length ();
806 while (i--)
807 if (fewvars[i] == var)
808 break;
809 if (i >= 0)
810 continue;
811 else if (fewvars.length () < (unsigned) alloc_count)
812 fewvars.quick_push (var);
813 else
815 vars = new hash_set<tree>;
816 for (i = 0; i < alloc_count; i++)
817 vars->add (fewvars[i]);
818 fewvars.release ();
819 vars->add (var);
823 stmt = gimple_copy (stmt);
824 /* ??? Should we drop the location of the copy to denote
825 they're artificial bindings? */
826 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
829 while (bb != src && single_pred_p (bb));
831 if (vars)
832 delete vars;
833 else if (fewvars.exists ())
834 fewvars.release ();
837 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
838 need not be duplicated as part of the CFG/SSA updating process).
840 If it is threadable, add it to PATH and VISITED and recurse, ultimately
841 returning TRUE from the toplevel call. Otherwise do nothing and
842 return false. */
844 bool
845 jump_threader::thread_around_empty_blocks (vec<jump_thread_edge *> *path,
846 edge taken_edge,
847 bitmap visited)
849 basic_block bb = taken_edge->dest;
850 gimple_stmt_iterator gsi;
851 gimple *stmt;
852 tree cond;
854 /* The key property of these blocks is that they need not be duplicated
855 when threading. Thus they cannot have visible side effects such
856 as PHI nodes. */
857 if (!gsi_end_p (gsi_start_phis (bb)))
858 return false;
860 /* Skip over DEBUG statements at the start of the block. */
861 gsi = gsi_start_nondebug_bb (bb);
863 /* If the block has no statements, but does have a single successor, then
864 it's just a forwarding block and we can thread through it trivially.
866 However, note that just threading through empty blocks with single
867 successors is not inherently profitable. For the jump thread to
868 be profitable, we must avoid a runtime conditional.
870 By taking the return value from the recursive call, we get the
871 desired effect of returning TRUE when we found a profitable jump
872 threading opportunity and FALSE otherwise.
874 This is particularly important when this routine is called after
875 processing a joiner block. Returning TRUE too aggressively in
876 that case results in pointless duplication of the joiner block. */
877 if (gsi_end_p (gsi))
879 if (single_succ_p (bb))
881 taken_edge = single_succ_edge (bb);
883 if ((taken_edge->flags & EDGE_DFS_BACK) != 0)
884 return false;
886 if (!bitmap_bit_p (visited, taken_edge->dest->index))
888 jump_thread_edge *x
889 = m_registry->allocate_thread_edge (taken_edge,
890 EDGE_NO_COPY_SRC_BLOCK);
891 path->safe_push (x);
892 bitmap_set_bit (visited, taken_edge->dest->index);
893 return thread_around_empty_blocks (path, taken_edge, visited);
897 /* We have a block with no statements, but multiple successors? */
898 return false;
901 /* The only real statements this block can have are a control
902 flow altering statement. Anything else stops the thread. */
903 stmt = gsi_stmt (gsi);
904 if (gimple_code (stmt) != GIMPLE_COND
905 && gimple_code (stmt) != GIMPLE_GOTO
906 && gimple_code (stmt) != GIMPLE_SWITCH)
907 return false;
909 /* Extract and simplify the condition. */
910 cond = simplify_control_stmt_condition (taken_edge, stmt);
912 /* If the condition can be statically computed and we have not already
913 visited the destination edge, then add the taken edge to our thread
914 path. */
915 if (cond != NULL_TREE
916 && (is_gimple_min_invariant (cond)
917 || TREE_CODE (cond) == CASE_LABEL_EXPR))
919 if (TREE_CODE (cond) == CASE_LABEL_EXPR)
920 taken_edge = find_edge (bb, label_to_block (cfun, CASE_LABEL (cond)));
921 else
922 taken_edge = find_taken_edge (bb, cond);
924 if (!taken_edge
925 || (taken_edge->flags & EDGE_DFS_BACK) != 0)
926 return false;
928 if (bitmap_bit_p (visited, taken_edge->dest->index))
929 return false;
930 bitmap_set_bit (visited, taken_edge->dest->index);
932 jump_thread_edge *x
933 = m_registry->allocate_thread_edge (taken_edge,
934 EDGE_NO_COPY_SRC_BLOCK);
935 path->safe_push (x);
937 thread_around_empty_blocks (path, taken_edge, visited);
938 return true;
941 return false;
944 /* We are exiting E->src, see if E->dest ends with a conditional
945 jump which has a known value when reached via E.
947 E->dest can have arbitrary side effects which, if threading is
948 successful, will be maintained.
950 Special care is necessary if E is a back edge in the CFG as we
951 may have already recorded equivalences for E->dest into our
952 various tables, including the result of the conditional at
953 the end of E->dest. Threading opportunities are severely
954 limited in that case to avoid short-circuiting the loop
955 incorrectly.
957 Positive return value is success. Zero return value is failure, but
958 the block can still be duplicated as a joiner in a jump thread path,
959 negative indicates the block should not be duplicated and thus is not
960 suitable for a joiner in a jump threading path. */
963 jump_threader::thread_through_normal_block (vec<jump_thread_edge *> *path,
964 edge e, bitmap visited)
966 m_state->register_equivs_on_edge (e);
968 /* PHIs create temporary equivalences.
969 Note that if we found a PHI that made the block non-threadable, then
970 we need to bubble that up to our caller in the same manner we do
971 when we prematurely stop processing statements below. */
972 if (!record_temporary_equivalences_from_phis (e))
973 return -1;
975 /* Now walk each statement recording any context sensitive
976 temporary equivalences we can detect. */
977 gimple *stmt = record_temporary_equivalences_from_stmts_at_dest (e);
979 /* There's two reasons STMT might be null, and distinguishing
980 between them is important.
982 First the block may not have had any statements. For example, it
983 might have some PHIs and unconditionally transfer control elsewhere.
984 Such blocks are suitable for jump threading, particularly as a
985 joiner block.
987 The second reason would be if we did not process all the statements
988 in the block (because there were too many to make duplicating the
989 block profitable. If we did not look at all the statements, then
990 we may not have invalidated everything needing invalidation. Thus
991 we must signal to our caller that this block is not suitable for
992 use as a joiner in a threading path. */
993 if (!stmt)
995 /* First case. The statement simply doesn't have any instructions, but
996 does have PHIs. */
997 if (gsi_end_p (gsi_start_nondebug_bb (e->dest))
998 && !gsi_end_p (gsi_start_phis (e->dest)))
999 return 0;
1001 /* Second case. */
1002 return -1;
1005 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
1006 will be taken. */
1007 if (gimple_code (stmt) == GIMPLE_COND
1008 || gimple_code (stmt) == GIMPLE_GOTO
1009 || gimple_code (stmt) == GIMPLE_SWITCH)
1011 tree cond;
1013 /* Extract and simplify the condition. */
1014 cond = simplify_control_stmt_condition (e, stmt);
1016 if (!cond)
1017 return 0;
1019 if (is_gimple_min_invariant (cond)
1020 || TREE_CODE (cond) == CASE_LABEL_EXPR)
1022 edge taken_edge;
1023 if (TREE_CODE (cond) == CASE_LABEL_EXPR)
1024 taken_edge = find_edge (e->dest,
1025 label_to_block (cfun, CASE_LABEL (cond)));
1026 else
1027 taken_edge = find_taken_edge (e->dest, cond);
1029 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
1031 /* DEST could be NULL for a computed jump to an absolute
1032 address. */
1033 if (dest == NULL
1034 || dest == e->dest
1035 || (taken_edge->flags & EDGE_DFS_BACK) != 0
1036 || bitmap_bit_p (visited, dest->index))
1037 return 0;
1039 /* Only push the EDGE_START_JUMP_THREAD marker if this is
1040 first edge on the path. */
1041 if (path->length () == 0)
1043 jump_thread_edge *x
1044 = m_registry->allocate_thread_edge (e, EDGE_START_JUMP_THREAD);
1045 path->safe_push (x);
1048 jump_thread_edge *x
1049 = m_registry->allocate_thread_edge (taken_edge,
1050 EDGE_COPY_SRC_BLOCK);
1051 path->safe_push (x);
1053 /* See if we can thread through DEST as well, this helps capture
1054 secondary effects of threading without having to re-run DOM or
1055 VRP.
1057 We don't want to thread back to a block we have already
1058 visited. This may be overly conservative. */
1059 bitmap_set_bit (visited, dest->index);
1060 bitmap_set_bit (visited, e->dest->index);
1061 thread_around_empty_blocks (path, taken_edge, visited);
1062 return 1;
1065 return 0;
1068 /* There are basic blocks look like:
1069 <P0>
1070 p0 = a CMP b ; or p0 = (INT) (a CMP b)
1071 goto <X>;
1073 <P1>
1074 p1 = c CMP d
1075 goto <X>;
1078 # phi = PHI <p0 (P0), p1 (P1)>
1079 if (phi != 0) goto <Y>; else goto <Z>;
1081 Then, edge (P0,X) or (P1,X) could be marked as EDGE_START_JUMP_THREAD
1082 And edge (X,Y), (X,Z) is EDGE_COPY_SRC_JOINER_BLOCK
1084 Return true if E is (P0,X) or (P1,X) */
1086 bool
1087 edge_forwards_cmp_to_conditional_jump_through_empty_bb_p (edge e)
1089 /* See if there is only one stmt which is gcond. */
1090 gcond *gs;
1091 if (!(gs = safe_dyn_cast<gcond *> (last_and_only_stmt (e->dest))))
1092 return false;
1094 /* See if gcond's cond is "(phi !=/== 0/1)" in the basic block. */
1095 tree cond = gimple_cond_lhs (gs);
1096 enum tree_code code = gimple_cond_code (gs);
1097 tree rhs = gimple_cond_rhs (gs);
1098 if (TREE_CODE (cond) != SSA_NAME
1099 || (code != NE_EXPR && code != EQ_EXPR)
1100 || (!integer_onep (rhs) && !integer_zerop (rhs)))
1101 return false;
1102 gphi *phi = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (cond));
1103 if (phi == NULL || gimple_bb (phi) != e->dest)
1104 return false;
1106 /* Check if phi's incoming value is CMP. */
1107 gassign *def;
1108 tree value = PHI_ARG_DEF_FROM_EDGE (phi, e);
1109 if (TREE_CODE (value) != SSA_NAME
1110 || !has_single_use (value)
1111 || !(def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (value))))
1112 return false;
1114 /* Or if it is (INT) (a CMP b). */
1115 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def)))
1117 value = gimple_assign_rhs1 (def);
1118 if (TREE_CODE (value) != SSA_NAME
1119 || !has_single_use (value)
1120 || !(def = dyn_cast<gassign *> (SSA_NAME_DEF_STMT (value))))
1121 return false;
1124 if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) != tcc_comparison)
1125 return false;
1127 return true;
1130 /* We are exiting E->src, see if E->dest ends with a conditional jump
1131 which has a known value when reached via E. If so, thread the
1132 edge. */
1134 void
1135 jump_threader::thread_across_edge (edge e)
1137 bitmap visited = BITMAP_ALLOC (NULL);
1139 m_state->push (e);
1141 stmt_count = 0;
1143 vec<jump_thread_edge *> *path = m_registry->allocate_thread_path ();
1144 bitmap_clear (visited);
1145 bitmap_set_bit (visited, e->src->index);
1146 bitmap_set_bit (visited, e->dest->index);
1148 int threaded;
1149 if ((e->flags & EDGE_DFS_BACK) == 0)
1150 threaded = thread_through_normal_block (path, e, visited);
1151 else
1152 threaded = 0;
1154 if (threaded > 0)
1156 propagate_threaded_block_debug_into (path->last ()->e->dest,
1157 e->dest);
1158 m_state->pop ();
1159 BITMAP_FREE (visited);
1160 m_registry->register_jump_thread (path);
1161 return;
1163 else
1165 /* Negative and zero return values indicate no threading was possible,
1166 thus there should be no edges on the thread path and no need to walk
1167 through the vector entries. */
1168 gcc_assert (path->length () == 0);
1169 path->release ();
1171 /* A negative status indicates the target block was deemed too big to
1172 duplicate. Just quit now rather than trying to use the block as
1173 a joiner in a jump threading path.
1175 This prevents unnecessary code growth, but more importantly if we
1176 do not look at all the statements in the block, then we may have
1177 missed some invalidations if we had traversed a backedge! */
1178 if (threaded < 0)
1180 BITMAP_FREE (visited);
1181 m_state->pop ();
1182 return;
1186 /* We were unable to determine what out edge from E->dest is taken. However,
1187 we might still be able to thread through successors of E->dest. This
1188 often occurs when E->dest is a joiner block which then fans back out
1189 based on redundant tests.
1191 If so, we'll copy E->dest and redirect the appropriate predecessor to
1192 the copy. Within the copy of E->dest, we'll thread one or more edges
1193 to points deeper in the CFG.
1195 This is a stopgap until we have a more structured approach to path
1196 isolation. */
1198 edge taken_edge;
1199 edge_iterator ei;
1200 bool found;
1202 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1203 we can safely redirect any of the edges. Just punt those cases. */
1204 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1205 if (taken_edge->flags & EDGE_COMPLEX)
1207 m_state->pop ();
1208 BITMAP_FREE (visited);
1209 return;
1212 /* Look at each successor of E->dest to see if we can thread through it. */
1213 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1215 if ((e->flags & EDGE_DFS_BACK) != 0
1216 || (taken_edge->flags & EDGE_DFS_BACK) != 0)
1217 continue;
1219 m_state->push (taken_edge);
1221 /* Avoid threading to any block we have already visited. */
1222 bitmap_clear (visited);
1223 bitmap_set_bit (visited, e->src->index);
1224 bitmap_set_bit (visited, e->dest->index);
1225 bitmap_set_bit (visited, taken_edge->dest->index);
1226 vec<jump_thread_edge *> *path = m_registry->allocate_thread_path ();
1228 /* Record whether or not we were able to thread through a successor
1229 of E->dest. */
1230 jump_thread_edge *x
1231 = m_registry->allocate_thread_edge (e, EDGE_START_JUMP_THREAD);
1232 path->safe_push (x);
1234 x = m_registry->allocate_thread_edge (taken_edge,
1235 EDGE_COPY_SRC_JOINER_BLOCK);
1236 path->safe_push (x);
1237 found = thread_around_empty_blocks (path, taken_edge, visited);
1239 if (!found)
1240 found = thread_through_normal_block (path,
1241 path->last ()->e, visited) > 0;
1243 /* If we were able to thread through a successor of E->dest, then
1244 record the jump threading opportunity. */
1245 if (found
1246 || edge_forwards_cmp_to_conditional_jump_through_empty_bb_p (e))
1248 if (taken_edge->dest != path->last ()->e->dest)
1249 propagate_threaded_block_debug_into (path->last ()->e->dest,
1250 taken_edge->dest);
1251 m_registry->register_jump_thread (path);
1253 else
1254 path->release ();
1256 m_state->pop ();
1258 BITMAP_FREE (visited);
1261 m_state->pop ();
1264 /* Return TRUE if BB has a single successor to a block with multiple
1265 incoming and outgoing edges. */
1267 bool
1268 single_succ_to_potentially_threadable_block (basic_block bb)
1270 int flags = (EDGE_IGNORE | EDGE_COMPLEX | EDGE_ABNORMAL);
1271 return (single_succ_p (bb)
1272 && (single_succ_edge (bb)->flags & flags) == 0
1273 && potentially_threadable_block (single_succ (bb)));
1276 /* Examine the outgoing edges from BB and conditionally
1277 try to thread them. */
1279 void
1280 jump_threader::thread_outgoing_edges (basic_block bb)
1282 int flags = (EDGE_IGNORE | EDGE_COMPLEX | EDGE_ABNORMAL);
1283 gimple *last;
1285 /* If we have an outgoing edge to a block with multiple incoming and
1286 outgoing edges, then we may be able to thread the edge, i.e., we
1287 may be able to statically determine which of the outgoing edges
1288 will be traversed when the incoming edge from BB is traversed. */
1289 if (single_succ_to_potentially_threadable_block (bb))
1290 thread_across_edge (single_succ_edge (bb));
1291 else if ((last = last_stmt (bb))
1292 && gimple_code (last) == GIMPLE_COND
1293 && EDGE_COUNT (bb->succs) == 2
1294 && (EDGE_SUCC (bb, 0)->flags & flags) == 0
1295 && (EDGE_SUCC (bb, 1)->flags & flags) == 0)
1297 edge true_edge, false_edge;
1299 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1301 /* Only try to thread the edge if it reaches a target block with
1302 more than one predecessor and more than one successor. */
1303 if (potentially_threadable_block (true_edge->dest))
1304 thread_across_edge (true_edge);
1306 /* Similarly for the ELSE arm. */
1307 if (potentially_threadable_block (false_edge->dest))
1308 thread_across_edge (false_edge);
1312 jt_state::jt_state (const_and_copies *copies,
1313 avail_exprs_stack *exprs,
1314 evrp_range_analyzer *evrp)
1316 m_copies = copies;
1317 m_exprs = exprs;
1318 m_evrp = evrp;
1321 // Record that E is being crossed.
1323 void
1324 jt_state::push (edge)
1326 m_copies->push_marker ();
1327 m_exprs->push_marker ();
1328 if (m_evrp)
1329 m_evrp->push_marker ();
1332 // Pop to the last pushed state.
1334 void
1335 jt_state::pop ()
1337 m_copies->pop_to_marker ();
1338 m_exprs->pop_to_marker ();
1339 if (m_evrp)
1340 m_evrp->pop_to_marker ();
1343 // Record an equivalence from DST to SRC. If UPDATE_RANGE is TRUE,
1344 // update the value range associated with DST.
1346 void
1347 jt_state::register_equiv (tree dst, tree src, bool update_range)
1349 m_copies->record_const_or_copy (dst, src);
1351 /* If requested, update the value range associated with DST, using
1352 the range from SRC. */
1353 if (m_evrp && update_range)
1355 /* Get new VR we can pass to push_value_range. */
1356 value_range_equiv *new_vr = m_evrp->allocate_value_range_equiv ();
1357 new (new_vr) value_range_equiv ();
1359 /* There are three cases to consider:
1361 First if SRC is an SSA_NAME, then we can copy the value range
1362 from SRC into NEW_VR.
1364 Second if SRC is an INTEGER_CST, then we can just set NEW_VR
1365 to a singleton range. Note that even if SRC is a constant we
1366 need to set a suitable output range so that VR_UNDEFINED
1367 ranges do not leak through.
1369 Otherwise set NEW_VR to varying. This may be overly
1370 conservative. */
1371 if (TREE_CODE (src) == SSA_NAME)
1372 new_vr->deep_copy (m_evrp->get_value_range (src));
1373 else if (TREE_CODE (src) == INTEGER_CST)
1374 new_vr->set (src);
1375 else
1376 new_vr->set_varying (TREE_TYPE (src));
1378 /* This is a temporary range for DST, so push it. */
1379 m_evrp->push_value_range (dst, new_vr);
1383 // Record any ranges calculated in STMT. If TEMPORARY is TRUE, then
1384 // this is a temporary equivalence and should be recorded into the
1385 // unwind table, instead of the global table.
1387 void
1388 jt_state::record_ranges_from_stmt (gimple *stmt, bool temporary)
1390 if (m_evrp)
1391 m_evrp->record_ranges_from_stmt (stmt, temporary);
1394 // Record any equivalences created by traversing E.
1396 void
1397 jt_state::register_equivs_on_edge (edge e)
1399 record_temporary_equivalences (e, m_copies, m_exprs);
1402 jump_threader_simplifier::jump_threader_simplifier (vr_values *v)
1404 m_vr_values = v;
1407 // Return the singleton that resolves STMT, if it is possible to
1408 // simplify it.
1410 // STMT may be a dummy statement, not necessarily in the CFG, in which
1411 // case WITHIN_STMT can be used to determine the position in the CFG
1412 // where STMT should be evaluated as being in.
1414 tree
1415 jump_threader_simplifier::simplify (gimple *stmt,
1416 gimple *within_stmt,
1417 basic_block,
1418 jt_state *)
1420 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
1422 simplify_using_ranges simplifier (m_vr_values);
1423 return simplifier.vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
1424 gimple_cond_lhs (cond_stmt),
1425 gimple_cond_rhs (cond_stmt),
1426 within_stmt);
1428 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
1430 tree op = gimple_switch_index (switch_stmt);
1431 if (TREE_CODE (op) != SSA_NAME)
1432 return NULL_TREE;
1434 const value_range_equiv *vr = m_vr_values->get_value_range (op);
1435 return find_case_label_range (switch_stmt, vr);
1437 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
1439 tree lhs = gimple_assign_lhs (assign_stmt);
1440 if (TREE_CODE (lhs) == SSA_NAME
1441 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1442 || POINTER_TYPE_P (TREE_TYPE (lhs)))
1443 && stmt_interesting_for_vrp (stmt))
1445 edge dummy_e;
1446 tree dummy_tree;
1447 value_range_equiv new_vr;
1448 m_vr_values->extract_range_from_stmt (stmt, &dummy_e, &dummy_tree,
1449 &new_vr);
1450 tree singleton;
1451 if (new_vr.singleton_p (&singleton))
1452 return singleton;
1455 return NULL;