Support TI mode and soft float on PA64
[official-gcc.git] / gcc / tree-ssa-threadedge.c
bloba63a9764ff87040f97e4eb0c6bca58d6232b3c9d
1 /* SSA Jump Threading
2 Copyright (C) 2005-2021 Free Software Foundation, Inc.
3 Contributed by Jeff Law <law@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "predict.h"
28 #include "ssa.h"
29 #include "fold-const.h"
30 #include "cfgloop.h"
31 #include "gimple-iterator.h"
32 #include "tree-cfg.h"
33 #include "tree-ssa-threadupdate.h"
34 #include "tree-ssa-scopedtables.h"
35 #include "tree-ssa-threadedge.h"
36 #include "gimple-fold.h"
37 #include "cfganal.h"
38 #include "alloc-pool.h"
39 #include "vr-values.h"
40 #include "gimple-ssa-evrp-analyze.h"
41 #include "gimple-range.h"
42 #include "gimple-range-path.h"
44 /* To avoid code explosion due to jump threading, we limit the
45 number of statements we are going to copy. This variable
46 holds the number of statements currently seen that we'll have
47 to copy as part of the jump threading process. */
48 static int stmt_count;
50 /* Array to record value-handles per SSA_NAME. */
51 vec<tree> ssa_name_values;
53 /* Set the value for the SSA name NAME to VALUE. */
55 void
56 set_ssa_name_value (tree name, tree value)
58 if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
59 ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1, true);
60 if (value && TREE_OVERFLOW_P (value))
61 value = drop_tree_overflow (value);
62 ssa_name_values[SSA_NAME_VERSION (name)] = value;
65 jump_threader::jump_threader (jt_simplifier *simplifier, jt_state *state)
67 /* Initialize the per SSA_NAME value-handles array. */
68 gcc_assert (!ssa_name_values.exists ());
69 ssa_name_values.create (num_ssa_names);
71 dummy_cond = gimple_build_cond (NE_EXPR, integer_zero_node,
72 integer_zero_node, NULL, NULL);
74 m_registry = new fwd_jt_path_registry ();
75 m_simplifier = simplifier;
76 m_state = state;
79 jump_threader::~jump_threader (void)
81 ssa_name_values.release ();
82 ggc_free (dummy_cond);
83 delete m_registry;
86 void
87 jump_threader::remove_jump_threads_including (edge_def *e)
89 m_registry->remove_jump_threads_including (e);
92 bool
93 jump_threader::thread_through_all_blocks (bool may_peel_loop_headers)
95 return m_registry->thread_through_all_blocks (may_peel_loop_headers);
98 static inline bool
99 has_phis_p (basic_block bb)
101 return !gsi_end_p (gsi_start_phis (bb));
104 /* Return TRUE for a block with PHIs but no statements. */
106 static bool
107 empty_block_with_phis_p (basic_block bb)
109 return gsi_end_p (gsi_start_nondebug_bb (bb)) && has_phis_p (bb);
112 /* Return TRUE if we may be able to thread an incoming edge into
113 BB to an outgoing edge from BB. Return FALSE otherwise. */
115 static bool
116 potentially_threadable_block (basic_block bb)
118 gimple_stmt_iterator gsi;
120 /* Special case. We can get blocks that are forwarders, but are
121 not optimized away because they forward from outside a loop
122 to the loop header. We want to thread through them as we can
123 sometimes thread to the loop exit, which is obviously profitable.
124 The interesting case here is when the block has PHIs. */
125 if (empty_block_with_phis_p (bb))
126 return true;
128 /* If BB has a single successor or a single predecessor, then
129 there is no threading opportunity. */
130 if (single_succ_p (bb) || single_pred_p (bb))
131 return false;
133 /* If BB does not end with a conditional, switch or computed goto,
134 then there is no threading opportunity. */
135 gsi = gsi_last_bb (bb);
136 if (gsi_end_p (gsi)
137 || ! gsi_stmt (gsi)
138 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
139 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
140 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
141 return false;
143 return true;
146 /* Record temporary equivalences created by PHIs at the target of the
147 edge E.
149 If a PHI which prevents threading is encountered, then return FALSE
150 indicating we should not thread this edge, else return TRUE. */
152 bool
153 jump_threader::record_temporary_equivalences_from_phis (edge e)
155 gphi_iterator gsi;
157 /* Each PHI creates a temporary equivalence, record them.
158 These are context sensitive equivalences and will be removed
159 later. */
160 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
162 gphi *phi = gsi.phi ();
163 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
164 tree dst = gimple_phi_result (phi);
166 /* If the desired argument is not the same as this PHI's result
167 and it is set by a PHI in E->dest, then we cannot thread
168 through E->dest. */
169 if (src != dst
170 && TREE_CODE (src) == SSA_NAME
171 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
172 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
173 return false;
175 /* We consider any non-virtual PHI as a statement since it
176 count result in a constant assignment or copy operation. */
177 if (!virtual_operand_p (dst))
178 stmt_count++;
180 m_state->register_equiv (dst, src, /*update_range=*/true);
182 return true;
185 /* Valueize hook for gimple_fold_stmt_to_constant_1. */
187 static tree
188 threadedge_valueize (tree t)
190 if (TREE_CODE (t) == SSA_NAME)
192 tree tem = SSA_NAME_VALUE (t);
193 if (tem)
194 return tem;
196 return t;
199 /* Try to simplify each statement in E->dest, ultimately leading to
200 a simplification of the COND_EXPR at the end of E->dest.
202 Record unwind information for temporary equivalences onto STACK.
204 Uses M_SIMPLIFIER to further simplify statements using pass specific
205 information.
207 We might consider marking just those statements which ultimately
208 feed the COND_EXPR. It's not clear if the overhead of bookkeeping
209 would be recovered by trying to simplify fewer statements.
211 If we are able to simplify a statement into the form
212 SSA_NAME = (SSA_NAME | gimple invariant), then we can record
213 a context sensitive equivalence which may help us simplify
214 later statements in E->dest. */
216 gimple *
217 jump_threader::record_temporary_equivalences_from_stmts_at_dest (edge e)
219 gimple *stmt = NULL;
220 gimple_stmt_iterator gsi;
221 int max_stmt_count;
223 max_stmt_count = param_max_jump_thread_duplication_stmts;
225 /* Walk through each statement in the block recording equivalences
226 we discover. Note any equivalences we discover are context
227 sensitive (ie, are dependent on traversing E) and must be unwound
228 when we're finished processing E. */
229 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
231 stmt = gsi_stmt (gsi);
233 /* Ignore empty statements and labels. */
234 if (gimple_code (stmt) == GIMPLE_NOP
235 || gimple_code (stmt) == GIMPLE_LABEL
236 || is_gimple_debug (stmt))
237 continue;
239 /* If the statement has volatile operands, then we assume we
240 cannot thread through this block. This is overly
241 conservative in some ways. */
242 if (gimple_code (stmt) == GIMPLE_ASM
243 && gimple_asm_volatile_p (as_a <gasm *> (stmt)))
244 return NULL;
246 /* If the statement is a unique builtin, we cannot thread
247 through here. */
248 if (gimple_code (stmt) == GIMPLE_CALL
249 && gimple_call_internal_p (stmt)
250 && gimple_call_internal_unique_p (stmt))
251 return NULL;
253 /* We cannot thread through __builtin_constant_p, because an
254 expression that is constant on two threading paths may become
255 non-constant (i.e.: phi) when they merge. */
256 if (gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P))
257 return NULL;
259 /* If duplicating this block is going to cause too much code
260 expansion, then do not thread through this block. */
261 stmt_count++;
262 if (stmt_count > max_stmt_count)
264 /* If any of the stmts in the PATH's dests are going to be
265 killed due to threading, grow the max count
266 accordingly. */
267 if (max_stmt_count
268 == param_max_jump_thread_duplication_stmts)
270 max_stmt_count += estimate_threading_killed_stmts (e->dest);
271 if (dump_file)
272 fprintf (dump_file, "threading bb %i up to %i stmts\n",
273 e->dest->index, max_stmt_count);
275 /* If we're still past the limit, we're done. */
276 if (stmt_count > max_stmt_count)
277 return NULL;
280 m_state->record_ranges_from_stmt (stmt, true);
282 /* If this is not a statement that sets an SSA_NAME to a new
283 value, then do not try to simplify this statement as it will
284 not simplify in any way that is helpful for jump threading. */
285 if ((gimple_code (stmt) != GIMPLE_ASSIGN
286 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
287 && (gimple_code (stmt) != GIMPLE_CALL
288 || gimple_call_lhs (stmt) == NULL_TREE
289 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
290 continue;
292 /* The result of __builtin_object_size depends on all the arguments
293 of a phi node. Temporarily using only one edge produces invalid
294 results. For example
296 if (x < 6)
297 goto l;
298 else
299 goto l;
302 r = PHI <&w[2].a[1](2), &a.a[6](3)>
303 __builtin_object_size (r, 0)
305 The result of __builtin_object_size is defined to be the maximum of
306 remaining bytes. If we use only one edge on the phi, the result will
307 change to be the remaining bytes for the corresponding phi argument.
309 Similarly for __builtin_constant_p:
311 r = PHI <1(2), 2(3)>
312 __builtin_constant_p (r)
314 Both PHI arguments are constant, but x ? 1 : 2 is still not
315 constant. */
317 if (is_gimple_call (stmt))
319 tree fndecl = gimple_call_fndecl (stmt);
320 if (fndecl
321 && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
322 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
323 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
324 continue;
327 m_state->register_equivs_stmt (stmt, e->src, m_simplifier);
329 return stmt;
332 /* Simplify the control statement at the end of the block E->dest.
334 Use SIMPLIFY (a pointer to a callback function) to further simplify
335 a condition using pass specific information.
337 Return the simplified condition or NULL if simplification could
338 not be performed. When simplifying a GIMPLE_SWITCH, we may return
339 the CASE_LABEL_EXPR that will be taken. */
341 tree
342 jump_threader::simplify_control_stmt_condition (edge e, gimple *stmt)
344 tree cond, cached_lhs;
345 enum gimple_code code = gimple_code (stmt);
347 /* For comparisons, we have to update both operands, then try
348 to simplify the comparison. */
349 if (code == GIMPLE_COND)
351 tree op0, op1;
352 enum tree_code cond_code;
354 op0 = gimple_cond_lhs (stmt);
355 op1 = gimple_cond_rhs (stmt);
356 cond_code = gimple_cond_code (stmt);
358 /* Get the current value of both operands. */
359 if (TREE_CODE (op0) == SSA_NAME)
361 for (int i = 0; i < 2; i++)
363 if (TREE_CODE (op0) == SSA_NAME
364 && SSA_NAME_VALUE (op0))
365 op0 = SSA_NAME_VALUE (op0);
366 else
367 break;
371 if (TREE_CODE (op1) == SSA_NAME)
373 for (int i = 0; i < 2; i++)
375 if (TREE_CODE (op1) == SSA_NAME
376 && SSA_NAME_VALUE (op1))
377 op1 = SSA_NAME_VALUE (op1);
378 else
379 break;
383 const unsigned recursion_limit = 4;
385 cached_lhs
386 = simplify_control_stmt_condition_1 (e, stmt, op0, cond_code, op1,
387 recursion_limit);
389 /* If we were testing an integer/pointer against a constant,
390 then we can trace the value of the SSA_NAME. If a value is
391 found, then the condition will collapse to a constant.
393 Return the SSA_NAME we want to trace back rather than the full
394 expression and give the threader a chance to find its value. */
395 if (cached_lhs == NULL)
397 /* Recover the original operands. They may have been simplified
398 using context sensitive equivalences. Those context sensitive
399 equivalences may not be valid on paths. */
400 tree op0 = gimple_cond_lhs (stmt);
401 tree op1 = gimple_cond_rhs (stmt);
403 if ((INTEGRAL_TYPE_P (TREE_TYPE (op0))
404 || POINTER_TYPE_P (TREE_TYPE (op0)))
405 && TREE_CODE (op0) == SSA_NAME
406 && TREE_CODE (op1) == INTEGER_CST)
407 return op0;
410 return cached_lhs;
413 if (code == GIMPLE_SWITCH)
414 cond = gimple_switch_index (as_a <gswitch *> (stmt));
415 else if (code == GIMPLE_GOTO)
416 cond = gimple_goto_dest (stmt);
417 else
418 gcc_unreachable ();
420 /* We can have conditionals which just test the state of a variable
421 rather than use a relational operator. These are simpler to handle. */
422 if (TREE_CODE (cond) == SSA_NAME)
424 tree original_lhs = cond;
425 cached_lhs = cond;
427 /* Get the variable's current value from the equivalence chains.
429 It is possible to get loops in the SSA_NAME_VALUE chains
430 (consider threading the backedge of a loop where we have
431 a loop invariant SSA_NAME used in the condition). */
432 if (cached_lhs)
434 for (int i = 0; i < 2; i++)
436 if (TREE_CODE (cached_lhs) == SSA_NAME
437 && SSA_NAME_VALUE (cached_lhs))
438 cached_lhs = SSA_NAME_VALUE (cached_lhs);
439 else
440 break;
444 /* If we haven't simplified to an invariant yet, then use the
445 pass specific callback to try and simplify it further. */
446 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
448 if (code == GIMPLE_SWITCH)
450 /* Replace the index operand of the GIMPLE_SWITCH with any LHS
451 we found before handing off to VRP. If simplification is
452 possible, the simplified value will be a CASE_LABEL_EXPR of
453 the label that is proven to be taken. */
454 gswitch *dummy_switch = as_a<gswitch *> (gimple_copy (stmt));
455 gimple_switch_set_index (dummy_switch, cached_lhs);
456 cached_lhs = m_simplifier->simplify (dummy_switch, stmt, e->src,
457 m_state);
458 ggc_free (dummy_switch);
460 else
461 cached_lhs = m_simplifier->simplify (stmt, stmt, e->src, m_state);
464 /* We couldn't find an invariant. But, callers of this
465 function may be able to do something useful with the
466 unmodified destination. */
467 if (!cached_lhs)
468 cached_lhs = original_lhs;
470 else
471 cached_lhs = NULL;
473 return cached_lhs;
476 /* Recursive helper for simplify_control_stmt_condition. */
478 tree
479 jump_threader::simplify_control_stmt_condition_1
480 (edge e,
481 gimple *stmt,
482 tree op0,
483 enum tree_code cond_code,
484 tree op1,
485 unsigned limit)
487 if (limit == 0)
488 return NULL_TREE;
490 /* We may need to canonicalize the comparison. For
491 example, op0 might be a constant while op1 is an
492 SSA_NAME. Failure to canonicalize will cause us to
493 miss threading opportunities. */
494 if (tree_swap_operands_p (op0, op1))
496 cond_code = swap_tree_comparison (cond_code);
497 std::swap (op0, op1);
500 /* If the condition has the form (A & B) CMP 0 or (A | B) CMP 0 then
501 recurse into the LHS to see if there is a dominating ASSERT_EXPR
502 of A or of B that makes this condition always true or always false
503 along the edge E. */
504 if ((cond_code == EQ_EXPR || cond_code == NE_EXPR)
505 && TREE_CODE (op0) == SSA_NAME
506 && integer_zerop (op1))
508 gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
509 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
511 else if (gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR
512 || gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)
514 enum tree_code rhs_code = gimple_assign_rhs_code (def_stmt);
515 const tree rhs1 = gimple_assign_rhs1 (def_stmt);
516 const tree rhs2 = gimple_assign_rhs2 (def_stmt);
518 /* Is A != 0 ? */
519 const tree res1
520 = simplify_control_stmt_condition_1 (e, def_stmt,
521 rhs1, NE_EXPR, op1,
522 limit - 1);
523 if (res1 == NULL_TREE)
525 else if (rhs_code == BIT_AND_EXPR && integer_zerop (res1))
527 /* If A == 0 then (A & B) != 0 is always false. */
528 if (cond_code == NE_EXPR)
529 return boolean_false_node;
530 /* If A == 0 then (A & B) == 0 is always true. */
531 if (cond_code == EQ_EXPR)
532 return boolean_true_node;
534 else if (rhs_code == BIT_IOR_EXPR && integer_nonzerop (res1))
536 /* If A != 0 then (A | B) != 0 is always true. */
537 if (cond_code == NE_EXPR)
538 return boolean_true_node;
539 /* If A != 0 then (A | B) == 0 is always false. */
540 if (cond_code == EQ_EXPR)
541 return boolean_false_node;
544 /* Is B != 0 ? */
545 const tree res2
546 = simplify_control_stmt_condition_1 (e, def_stmt,
547 rhs2, NE_EXPR, op1,
548 limit - 1);
549 if (res2 == NULL_TREE)
551 else if (rhs_code == BIT_AND_EXPR && integer_zerop (res2))
553 /* If B == 0 then (A & B) != 0 is always false. */
554 if (cond_code == NE_EXPR)
555 return boolean_false_node;
556 /* If B == 0 then (A & B) == 0 is always true. */
557 if (cond_code == EQ_EXPR)
558 return boolean_true_node;
560 else if (rhs_code == BIT_IOR_EXPR && integer_nonzerop (res2))
562 /* If B != 0 then (A | B) != 0 is always true. */
563 if (cond_code == NE_EXPR)
564 return boolean_true_node;
565 /* If B != 0 then (A | B) == 0 is always false. */
566 if (cond_code == EQ_EXPR)
567 return boolean_false_node;
570 if (res1 != NULL_TREE && res2 != NULL_TREE)
572 if (rhs_code == BIT_AND_EXPR
573 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
574 && integer_nonzerop (res1)
575 && integer_nonzerop (res2))
577 /* If A != 0 and B != 0 then (bool)(A & B) != 0 is true. */
578 if (cond_code == NE_EXPR)
579 return boolean_true_node;
580 /* If A != 0 and B != 0 then (bool)(A & B) == 0 is false. */
581 if (cond_code == EQ_EXPR)
582 return boolean_false_node;
585 if (rhs_code == BIT_IOR_EXPR
586 && integer_zerop (res1)
587 && integer_zerop (res2))
589 /* If A == 0 and B == 0 then (A | B) != 0 is false. */
590 if (cond_code == NE_EXPR)
591 return boolean_false_node;
592 /* If A == 0 and B == 0 then (A | B) == 0 is true. */
593 if (cond_code == EQ_EXPR)
594 return boolean_true_node;
598 /* Handle (A CMP B) CMP 0. */
599 else if (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
600 == tcc_comparison)
602 tree rhs1 = gimple_assign_rhs1 (def_stmt);
603 tree rhs2 = gimple_assign_rhs2 (def_stmt);
605 tree_code new_cond = gimple_assign_rhs_code (def_stmt);
606 if (cond_code == EQ_EXPR)
607 new_cond = invert_tree_comparison (new_cond, false);
609 tree res
610 = simplify_control_stmt_condition_1 (e, def_stmt,
611 rhs1, new_cond, rhs2,
612 limit - 1);
613 if (res != NULL_TREE && is_gimple_min_invariant (res))
614 return res;
618 gimple_cond_set_code (dummy_cond, cond_code);
619 gimple_cond_set_lhs (dummy_cond, op0);
620 gimple_cond_set_rhs (dummy_cond, op1);
622 /* We absolutely do not care about any type conversions
623 we only care about a zero/nonzero value. */
624 fold_defer_overflow_warnings ();
626 tree res = fold_binary (cond_code, boolean_type_node, op0, op1);
627 if (res)
628 while (CONVERT_EXPR_P (res))
629 res = TREE_OPERAND (res, 0);
631 fold_undefer_overflow_warnings ((res && is_gimple_min_invariant (res)),
632 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
634 /* If we have not simplified the condition down to an invariant,
635 then use the pass specific callback to simplify the condition. */
636 if (!res
637 || !is_gimple_min_invariant (res))
638 res = m_simplifier->simplify (dummy_cond, stmt, e->src, m_state);
640 return res;
643 /* Copy debug stmts from DEST's chain of single predecessors up to
644 SRC, so that we don't lose the bindings as PHI nodes are introduced
645 when DEST gains new predecessors. */
646 void
647 propagate_threaded_block_debug_into (basic_block dest, basic_block src)
649 if (!MAY_HAVE_DEBUG_BIND_STMTS)
650 return;
652 if (!single_pred_p (dest))
653 return;
655 gcc_checking_assert (dest != src);
657 gimple_stmt_iterator gsi = gsi_after_labels (dest);
658 int i = 0;
659 const int alloc_count = 16; // ?? Should this be a PARAM?
661 /* Estimate the number of debug vars overridden in the beginning of
662 DEST, to tell how many we're going to need to begin with. */
663 for (gimple_stmt_iterator si = gsi;
664 i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
666 gimple *stmt = gsi_stmt (si);
667 if (!is_gimple_debug (stmt))
668 break;
669 if (gimple_debug_nonbind_marker_p (stmt))
670 continue;
671 i++;
674 auto_vec<tree, alloc_count> fewvars;
675 hash_set<tree> *vars = NULL;
677 /* If we're already starting with 3/4 of alloc_count, go for a
678 hash_set, otherwise start with an unordered stack-allocated
679 VEC. */
680 if (i * 4 > alloc_count * 3)
681 vars = new hash_set<tree>;
683 /* Now go through the initial debug stmts in DEST again, this time
684 actually inserting in VARS or FEWVARS. Don't bother checking for
685 duplicates in FEWVARS. */
686 for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
688 gimple *stmt = gsi_stmt (si);
689 if (!is_gimple_debug (stmt))
690 break;
692 tree var;
694 if (gimple_debug_bind_p (stmt))
695 var = gimple_debug_bind_get_var (stmt);
696 else if (gimple_debug_source_bind_p (stmt))
697 var = gimple_debug_source_bind_get_var (stmt);
698 else if (gimple_debug_nonbind_marker_p (stmt))
699 continue;
700 else
701 gcc_unreachable ();
703 if (vars)
704 vars->add (var);
705 else
706 fewvars.quick_push (var);
709 basic_block bb = dest;
713 bb = single_pred (bb);
714 for (gimple_stmt_iterator si = gsi_last_bb (bb);
715 !gsi_end_p (si); gsi_prev (&si))
717 gimple *stmt = gsi_stmt (si);
718 if (!is_gimple_debug (stmt))
719 continue;
721 tree var;
723 if (gimple_debug_bind_p (stmt))
724 var = gimple_debug_bind_get_var (stmt);
725 else if (gimple_debug_source_bind_p (stmt))
726 var = gimple_debug_source_bind_get_var (stmt);
727 else if (gimple_debug_nonbind_marker_p (stmt))
728 continue;
729 else
730 gcc_unreachable ();
732 /* Discard debug bind overlaps. Unlike stmts from src,
733 copied into a new block that will precede BB, debug bind
734 stmts in bypassed BBs may actually be discarded if
735 they're overwritten by subsequent debug bind stmts. We
736 want to copy binds for all modified variables, so that we
737 retain a bind to the shared def if there is one, or to a
738 newly introduced PHI node if there is one. Our bind will
739 end up reset if the value is dead, but that implies the
740 variable couldn't have survived, so it's fine. We are
741 not actually running the code that performed the binds at
742 this point, we're just adding binds so that they survive
743 the new confluence, so markers should not be copied. */
744 if (vars && vars->add (var))
745 continue;
746 else if (!vars)
748 int i = fewvars.length ();
749 while (i--)
750 if (fewvars[i] == var)
751 break;
752 if (i >= 0)
753 continue;
754 else if (fewvars.length () < (unsigned) alloc_count)
755 fewvars.quick_push (var);
756 else
758 vars = new hash_set<tree>;
759 for (i = 0; i < alloc_count; i++)
760 vars->add (fewvars[i]);
761 fewvars.release ();
762 vars->add (var);
766 stmt = gimple_copy (stmt);
767 /* ??? Should we drop the location of the copy to denote
768 they're artificial bindings? */
769 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
772 while (bb != src && single_pred_p (bb));
774 if (vars)
775 delete vars;
776 else if (fewvars.exists ())
777 fewvars.release ();
780 /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
781 need not be duplicated as part of the CFG/SSA updating process).
783 If it is threadable, add it to PATH and VISITED and recurse, ultimately
784 returning TRUE from the toplevel call. Otherwise do nothing and
785 return false. */
787 bool
788 jump_threader::thread_around_empty_blocks (vec<jump_thread_edge *> *path,
789 edge taken_edge,
790 bitmap visited)
792 basic_block bb = taken_edge->dest;
793 gimple_stmt_iterator gsi;
794 gimple *stmt;
795 tree cond;
797 /* The key property of these blocks is that they need not be duplicated
798 when threading. Thus they cannot have visible side effects such
799 as PHI nodes. */
800 if (has_phis_p (bb))
801 return false;
803 /* Skip over DEBUG statements at the start of the block. */
804 gsi = gsi_start_nondebug_bb (bb);
806 /* If the block has no statements, but does have a single successor, then
807 it's just a forwarding block and we can thread through it trivially.
809 However, note that just threading through empty blocks with single
810 successors is not inherently profitable. For the jump thread to
811 be profitable, we must avoid a runtime conditional.
813 By taking the return value from the recursive call, we get the
814 desired effect of returning TRUE when we found a profitable jump
815 threading opportunity and FALSE otherwise.
817 This is particularly important when this routine is called after
818 processing a joiner block. Returning TRUE too aggressively in
819 that case results in pointless duplication of the joiner block. */
820 if (gsi_end_p (gsi))
822 if (single_succ_p (bb))
824 taken_edge = single_succ_edge (bb);
826 if ((taken_edge->flags & EDGE_DFS_BACK) != 0)
827 return false;
829 if (!bitmap_bit_p (visited, taken_edge->dest->index))
831 m_registry->push_edge (path, taken_edge, EDGE_NO_COPY_SRC_BLOCK);
832 m_state->append_path (taken_edge->dest);
833 bitmap_set_bit (visited, taken_edge->dest->index);
834 return thread_around_empty_blocks (path, taken_edge, visited);
838 /* We have a block with no statements, but multiple successors? */
839 return false;
842 /* The only real statements this block can have are a control
843 flow altering statement. Anything else stops the thread. */
844 stmt = gsi_stmt (gsi);
845 if (gimple_code (stmt) != GIMPLE_COND
846 && gimple_code (stmt) != GIMPLE_GOTO
847 && gimple_code (stmt) != GIMPLE_SWITCH)
848 return false;
850 /* Extract and simplify the condition. */
851 cond = simplify_control_stmt_condition (taken_edge, stmt);
853 /* If the condition can be statically computed and we have not already
854 visited the destination edge, then add the taken edge to our thread
855 path. */
856 if (cond != NULL_TREE
857 && (is_gimple_min_invariant (cond)
858 || TREE_CODE (cond) == CASE_LABEL_EXPR))
860 if (TREE_CODE (cond) == CASE_LABEL_EXPR)
861 taken_edge = find_edge (bb, label_to_block (cfun, CASE_LABEL (cond)));
862 else
863 taken_edge = find_taken_edge (bb, cond);
865 if (!taken_edge
866 || (taken_edge->flags & EDGE_DFS_BACK) != 0)
867 return false;
869 if (bitmap_bit_p (visited, taken_edge->dest->index))
870 return false;
871 bitmap_set_bit (visited, taken_edge->dest->index);
873 m_registry->push_edge (path, taken_edge, EDGE_NO_COPY_SRC_BLOCK);
874 m_state->append_path (taken_edge->dest);
876 thread_around_empty_blocks (path, taken_edge, visited);
877 return true;
880 return false;
883 /* We are exiting E->src, see if E->dest ends with a conditional
884 jump which has a known value when reached via E.
886 E->dest can have arbitrary side effects which, if threading is
887 successful, will be maintained.
889 Special care is necessary if E is a back edge in the CFG as we
890 may have already recorded equivalences for E->dest into our
891 various tables, including the result of the conditional at
892 the end of E->dest. Threading opportunities are severely
893 limited in that case to avoid short-circuiting the loop
894 incorrectly.
896 Positive return value is success. Zero return value is failure, but
897 the block can still be duplicated as a joiner in a jump thread path,
898 negative indicates the block should not be duplicated and thus is not
899 suitable for a joiner in a jump threading path. */
902 jump_threader::thread_through_normal_block (vec<jump_thread_edge *> *path,
903 edge e, bitmap visited)
905 m_state->register_equivs_edge (e);
907 /* PHIs create temporary equivalences.
908 Note that if we found a PHI that made the block non-threadable, then
909 we need to bubble that up to our caller in the same manner we do
910 when we prematurely stop processing statements below. */
911 if (!record_temporary_equivalences_from_phis (e))
912 return -1;
914 /* Now walk each statement recording any context sensitive
915 temporary equivalences we can detect. */
916 gimple *stmt = record_temporary_equivalences_from_stmts_at_dest (e);
918 /* There's two reasons STMT might be null, and distinguishing
919 between them is important.
921 First the block may not have had any statements. For example, it
922 might have some PHIs and unconditionally transfer control elsewhere.
923 Such blocks are suitable for jump threading, particularly as a
924 joiner block.
926 The second reason would be if we did not process all the statements
927 in the block (because there were too many to make duplicating the
928 block profitable. If we did not look at all the statements, then
929 we may not have invalidated everything needing invalidation. Thus
930 we must signal to our caller that this block is not suitable for
931 use as a joiner in a threading path. */
932 if (!stmt)
934 /* First case. The statement simply doesn't have any instructions, but
935 does have PHIs. */
936 if (empty_block_with_phis_p (e->dest))
937 return 0;
939 /* Second case. */
940 return -1;
943 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
944 will be taken. */
945 if (gimple_code (stmt) == GIMPLE_COND
946 || gimple_code (stmt) == GIMPLE_GOTO
947 || gimple_code (stmt) == GIMPLE_SWITCH)
949 tree cond;
951 /* Extract and simplify the condition. */
952 cond = simplify_control_stmt_condition (e, stmt);
954 if (!cond)
955 return 0;
957 if (is_gimple_min_invariant (cond)
958 || TREE_CODE (cond) == CASE_LABEL_EXPR)
960 edge taken_edge;
961 if (TREE_CODE (cond) == CASE_LABEL_EXPR)
962 taken_edge = find_edge (e->dest,
963 label_to_block (cfun, CASE_LABEL (cond)));
964 else
965 taken_edge = find_taken_edge (e->dest, cond);
967 basic_block dest = (taken_edge ? taken_edge->dest : NULL);
969 /* DEST could be NULL for a computed jump to an absolute
970 address. */
971 if (dest == NULL
972 || dest == e->dest
973 || (taken_edge->flags & EDGE_DFS_BACK) != 0
974 || bitmap_bit_p (visited, dest->index))
975 return 0;
977 /* Only push the EDGE_START_JUMP_THREAD marker if this is
978 first edge on the path. */
979 if (path->length () == 0)
980 m_registry->push_edge (path, e, EDGE_START_JUMP_THREAD);
982 m_registry->push_edge (path, taken_edge, EDGE_COPY_SRC_BLOCK);
983 m_state->append_path (taken_edge->dest);
985 /* See if we can thread through DEST as well, this helps capture
986 secondary effects of threading without having to re-run DOM or
987 VRP.
989 We don't want to thread back to a block we have already
990 visited. This may be overly conservative. */
991 bitmap_set_bit (visited, dest->index);
992 bitmap_set_bit (visited, e->dest->index);
993 thread_around_empty_blocks (path, taken_edge, visited);
994 return 1;
997 return 0;
1000 /* There are basic blocks look like:
1001 <P0>
1002 p0 = a CMP b ; or p0 = (INT) (a CMP b)
1003 goto <X>;
1005 <P1>
1006 p1 = c CMP d
1007 goto <X>;
1010 # phi = PHI <p0 (P0), p1 (P1)>
1011 if (phi != 0) goto <Y>; else goto <Z>;
1013 Then, edge (P0,X) or (P1,X) could be marked as EDGE_START_JUMP_THREAD
1014 And edge (X,Y), (X,Z) is EDGE_COPY_SRC_JOINER_BLOCK
1016 Return true if E is (P0,X) or (P1,X) */
1018 bool
1019 edge_forwards_cmp_to_conditional_jump_through_empty_bb_p (edge e)
1021 /* See if there is only one stmt which is gcond. */
1022 gcond *gs;
1023 if (!(gs = safe_dyn_cast<gcond *> (last_and_only_stmt (e->dest))))
1024 return false;
1026 /* See if gcond's cond is "(phi !=/== 0/1)" in the basic block. */
1027 tree cond = gimple_cond_lhs (gs);
1028 enum tree_code code = gimple_cond_code (gs);
1029 tree rhs = gimple_cond_rhs (gs);
1030 if (TREE_CODE (cond) != SSA_NAME
1031 || (code != NE_EXPR && code != EQ_EXPR)
1032 || (!integer_onep (rhs) && !integer_zerop (rhs)))
1033 return false;
1034 gphi *phi = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (cond));
1035 if (phi == NULL || gimple_bb (phi) != e->dest)
1036 return false;
1038 /* Check if phi's incoming value is CMP. */
1039 gassign *def;
1040 tree value = PHI_ARG_DEF_FROM_EDGE (phi, e);
1041 if (TREE_CODE (value) != SSA_NAME
1042 || !has_single_use (value)
1043 || !(def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (value))))
1044 return false;
1046 /* Or if it is (INT) (a CMP b). */
1047 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def)))
1049 value = gimple_assign_rhs1 (def);
1050 if (TREE_CODE (value) != SSA_NAME
1051 || !has_single_use (value)
1052 || !(def = dyn_cast<gassign *> (SSA_NAME_DEF_STMT (value))))
1053 return false;
1056 if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) != tcc_comparison)
1057 return false;
1059 return true;
1062 /* We are exiting E->src, see if E->dest ends with a conditional jump
1063 which has a known value when reached via E. If so, thread the
1064 edge. */
1066 void
1067 jump_threader::thread_across_edge (edge e)
1069 auto_bitmap visited;
1071 m_state->push (e);
1073 stmt_count = 0;
1075 vec<jump_thread_edge *> *path = m_registry->allocate_thread_path ();
1076 bitmap_set_bit (visited, e->src->index);
1077 bitmap_set_bit (visited, e->dest->index);
1079 int threaded = 0;
1080 if ((e->flags & EDGE_DFS_BACK) == 0)
1081 threaded = thread_through_normal_block (path, e, visited);
1083 if (threaded > 0)
1085 propagate_threaded_block_debug_into (path->last ()->e->dest,
1086 e->dest);
1087 m_registry->register_jump_thread (path);
1088 m_state->pop ();
1089 return;
1092 gcc_checking_assert (path->length () == 0);
1093 path->release ();
1095 if (threaded < 0)
1097 /* The target block was deemed too big to duplicate. Just quit
1098 now rather than trying to use the block as a joiner in a jump
1099 threading path.
1101 This prevents unnecessary code growth, but more importantly if we
1102 do not look at all the statements in the block, then we may have
1103 missed some invalidations if we had traversed a backedge! */
1104 m_state->pop ();
1105 return;
1108 /* We were unable to determine what out edge from E->dest is taken. However,
1109 we might still be able to thread through successors of E->dest. This
1110 often occurs when E->dest is a joiner block which then fans back out
1111 based on redundant tests.
1113 If so, we'll copy E->dest and redirect the appropriate predecessor to
1114 the copy. Within the copy of E->dest, we'll thread one or more edges
1115 to points deeper in the CFG.
1117 This is a stopgap until we have a more structured approach to path
1118 isolation. */
1120 edge taken_edge;
1121 edge_iterator ei;
1122 bool found;
1124 /* If E->dest has abnormal outgoing edges, then there's no guarantee
1125 we can safely redirect any of the edges. Just punt those cases. */
1126 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1127 if (taken_edge->flags & EDGE_COMPLEX)
1129 m_state->pop ();
1130 return;
1133 /* Look at each successor of E->dest to see if we can thread through it. */
1134 FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1136 if ((e->flags & EDGE_DFS_BACK) != 0
1137 || (taken_edge->flags & EDGE_DFS_BACK) != 0)
1138 continue;
1140 m_state->push (taken_edge);
1142 /* Avoid threading to any block we have already visited. */
1143 bitmap_clear (visited);
1144 bitmap_set_bit (visited, e->src->index);
1145 bitmap_set_bit (visited, e->dest->index);
1146 bitmap_set_bit (visited, taken_edge->dest->index);
1148 vec<jump_thread_edge *> *path = m_registry->allocate_thread_path ();
1149 m_registry->push_edge (path, e, EDGE_START_JUMP_THREAD);
1150 m_registry->push_edge (path, taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1152 found = thread_around_empty_blocks (path, taken_edge, visited);
1154 if (!found)
1155 found = thread_through_normal_block (path,
1156 path->last ()->e, visited) > 0;
1158 /* If we were able to thread through a successor of E->dest, then
1159 record the jump threading opportunity. */
1160 if (found
1161 || edge_forwards_cmp_to_conditional_jump_through_empty_bb_p (e))
1163 if (taken_edge->dest != path->last ()->e->dest)
1164 propagate_threaded_block_debug_into (path->last ()->e->dest,
1165 taken_edge->dest);
1166 m_registry->register_jump_thread (path);
1168 else
1169 path->release ();
1171 m_state->pop ();
1175 m_state->pop ();
1178 /* Return TRUE if BB has a single successor to a block with multiple
1179 incoming and outgoing edges. */
1181 bool
1182 single_succ_to_potentially_threadable_block (basic_block bb)
1184 int flags = (EDGE_IGNORE | EDGE_COMPLEX | EDGE_ABNORMAL);
1185 return (single_succ_p (bb)
1186 && (single_succ_edge (bb)->flags & flags) == 0
1187 && potentially_threadable_block (single_succ (bb)));
1190 /* Examine the outgoing edges from BB and conditionally
1191 try to thread them. */
1193 void
1194 jump_threader::thread_outgoing_edges (basic_block bb)
1196 int flags = (EDGE_IGNORE | EDGE_COMPLEX | EDGE_ABNORMAL);
1197 gimple *last;
1199 if (!flag_thread_jumps)
1200 return;
1202 /* If we have an outgoing edge to a block with multiple incoming and
1203 outgoing edges, then we may be able to thread the edge, i.e., we
1204 may be able to statically determine which of the outgoing edges
1205 will be traversed when the incoming edge from BB is traversed. */
1206 if (single_succ_to_potentially_threadable_block (bb))
1207 thread_across_edge (single_succ_edge (bb));
1208 else if ((last = last_stmt (bb))
1209 && gimple_code (last) == GIMPLE_COND
1210 && EDGE_COUNT (bb->succs) == 2
1211 && (EDGE_SUCC (bb, 0)->flags & flags) == 0
1212 && (EDGE_SUCC (bb, 1)->flags & flags) == 0)
1214 edge true_edge, false_edge;
1216 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1218 /* Only try to thread the edge if it reaches a target block with
1219 more than one predecessor and more than one successor. */
1220 if (potentially_threadable_block (true_edge->dest))
1221 thread_across_edge (true_edge);
1223 /* Similarly for the ELSE arm. */
1224 if (potentially_threadable_block (false_edge->dest))
1225 thread_across_edge (false_edge);
1229 // Marker to keep track of the start of the current path.
1230 const basic_block jt_state::BB_MARKER = (basic_block) -1;
1232 // Record that E is being crossed.
1234 void
1235 jt_state::push (edge e)
1237 m_blocks.safe_push (BB_MARKER);
1238 if (m_blocks.length () == 1)
1239 m_blocks.safe_push (e->src);
1240 m_blocks.safe_push (e->dest);
1243 // Pop to the last pushed state.
1245 void
1246 jt_state::pop ()
1248 if (!m_blocks.is_empty ())
1250 while (m_blocks.last () != BB_MARKER)
1251 m_blocks.pop ();
1252 // Pop marker.
1253 m_blocks.pop ();
1257 // Add BB to the list of blocks seen.
1259 void
1260 jt_state::append_path (basic_block bb)
1262 gcc_checking_assert (!m_blocks.is_empty ());
1263 m_blocks.safe_push (bb);
1266 void
1267 jt_state::dump (FILE *out)
1269 if (!m_blocks.is_empty ())
1271 auto_vec<basic_block> path;
1272 get_path (path);
1273 dump_ranger (out, path);
1277 void
1278 jt_state::debug ()
1280 push_dump_file save (stderr, TDF_DETAILS);
1281 dump (stderr);
1284 // Convert the current path in jt_state into a path suitable for the
1285 // path solver. Return the resulting path in PATH.
1287 void
1288 jt_state::get_path (vec<basic_block> &path)
1290 path.truncate (0);
1292 for (int i = (int) m_blocks.length () - 1; i >= 0; --i)
1294 basic_block bb = m_blocks[i];
1296 if (bb != BB_MARKER)
1297 path.safe_push (bb);
1301 // Record an equivalence from DST to SRC. If UPDATE_RANGE is TRUE,
1302 // update the value range associated with DST.
1304 void
1305 jt_state::register_equiv (tree dest ATTRIBUTE_UNUSED,
1306 tree src ATTRIBUTE_UNUSED,
1307 bool update_range ATTRIBUTE_UNUSED)
1311 // Record any ranges calculated in STMT. If TEMPORARY is TRUE, then
1312 // this is a temporary equivalence and should be recorded into the
1313 // unwind table, instead of the global table.
1315 void
1316 jt_state::record_ranges_from_stmt (gimple *,
1317 bool temporary ATTRIBUTE_UNUSED)
1321 // Record any equivalences created by traversing E.
1323 void
1324 jt_state::register_equivs_edge (edge)
1328 void
1329 jt_state::register_equivs_stmt (gimple *stmt, basic_block bb,
1330 jt_simplifier *simplifier)
1332 /* At this point we have a statement which assigns an RHS to an
1333 SSA_VAR on the LHS. We want to try and simplify this statement
1334 to expose more context sensitive equivalences which in turn may
1335 allow us to simplify the condition at the end of the loop.
1337 Handle simple copy operations as well as implied copies from
1338 ASSERT_EXPRs. */
1339 tree cached_lhs = NULL;
1340 if (gimple_assign_single_p (stmt)
1341 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
1342 cached_lhs = gimple_assign_rhs1 (stmt);
1343 else if (gimple_assign_single_p (stmt)
1344 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
1345 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0);
1346 else
1348 /* A statement that is not a trivial copy or ASSERT_EXPR.
1349 Try to fold the new expression. Inserting the
1350 expression into the hash table is unlikely to help. */
1351 /* ??? The DOM callback below can be changed to setting
1352 the mprts_hook around the call to thread_across_edge,
1353 avoiding the use substitution. The VRP hook should be
1354 changed to properly valueize operands itself using
1355 SSA_NAME_VALUE in addition to its own lattice. */
1356 cached_lhs = gimple_fold_stmt_to_constant_1 (stmt,
1357 threadedge_valueize);
1358 if (NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES) != 0
1359 && (!cached_lhs
1360 || (TREE_CODE (cached_lhs) != SSA_NAME
1361 && !is_gimple_min_invariant (cached_lhs))))
1363 /* We're going to temporarily copy propagate the operands
1364 and see if that allows us to simplify this statement. */
1365 tree *copy;
1366 ssa_op_iter iter;
1367 use_operand_p use_p;
1368 unsigned int num, i = 0;
1370 num = NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES);
1371 copy = XALLOCAVEC (tree, num);
1373 /* Make a copy of the uses & vuses into USES_COPY, then cprop into
1374 the operands. */
1375 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
1377 tree tmp = NULL;
1378 tree use = USE_FROM_PTR (use_p);
1380 copy[i++] = use;
1381 if (TREE_CODE (use) == SSA_NAME)
1382 tmp = SSA_NAME_VALUE (use);
1383 if (tmp)
1384 SET_USE (use_p, tmp);
1387 cached_lhs = simplifier->simplify (stmt, stmt, bb, this);
1389 /* Restore the statement's original uses/defs. */
1390 i = 0;
1391 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
1392 SET_USE (use_p, copy[i++]);
1396 /* Record the context sensitive equivalence if we were able
1397 to simplify this statement. */
1398 if (cached_lhs
1399 && (TREE_CODE (cached_lhs) == SSA_NAME
1400 || is_gimple_min_invariant (cached_lhs)))
1401 register_equiv (gimple_get_lhs (stmt), cached_lhs,
1402 /*update_range=*/false);
1405 // Hybrid threader implementation.
1408 hybrid_jt_simplifier::hybrid_jt_simplifier (gimple_ranger *r,
1409 path_range_query *q)
1411 m_ranger = r;
1412 m_query = q;
1415 tree
1416 hybrid_jt_simplifier::simplify (gimple *stmt, gimple *, basic_block,
1417 jt_state *state)
1419 int_range_max r;
1421 compute_ranges_from_state (stmt, state);
1423 if (gimple_code (stmt) == GIMPLE_COND
1424 || gimple_code (stmt) == GIMPLE_ASSIGN)
1426 tree ret;
1427 if (m_query->range_of_stmt (r, stmt) && r.singleton_p (&ret))
1428 return ret;
1430 else if (gimple_code (stmt) == GIMPLE_SWITCH)
1432 gswitch *switch_stmt = dyn_cast <gswitch *> (stmt);
1433 tree index = gimple_switch_index (switch_stmt);
1434 if (m_query->range_of_expr (r, index, stmt))
1435 return find_case_label_range (switch_stmt, &r);
1437 return NULL;
1440 // Use STATE to generate the list of imports needed for the solver,
1441 // and calculate the ranges along the path.
1443 void
1444 hybrid_jt_simplifier::compute_ranges_from_state (gimple *stmt, jt_state *state)
1446 auto_bitmap imports;
1447 gori_compute &gori = m_ranger->gori ();
1449 state->get_path (m_path);
1451 // Start with the imports to the final conditional.
1452 bitmap_copy (imports, gori.imports (m_path[0]));
1454 // Add any other interesting operands we may have missed.
1455 if (gimple_bb (stmt) != m_path[0])
1457 for (unsigned i = 0; i < gimple_num_ops (stmt); ++i)
1459 tree op = gimple_op (stmt, i);
1460 if (op
1461 && TREE_CODE (op) == SSA_NAME
1462 && irange::supports_type_p (TREE_TYPE (op)))
1463 bitmap_set_bit (imports, SSA_NAME_VERSION (op));
1466 m_query->compute_ranges (m_path, imports);