1 /* Rtl-level induction variable analysis.
2 Copyright (C) 2004-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This is a simple analysis of induction variables of the loop. The major use
21 is for determining the number of iterations of a loop for loop unrolling,
22 doloop optimization and branch prediction. The iv information is computed
25 Induction variables are analyzed by walking the use-def chains. When
26 a basic induction variable (biv) is found, it is cached in the bivs
27 hash table. When register is proved to be a biv, its description
28 is stored to DF_REF_DATA of the def reference.
30 The analysis works always with one loop -- you must call
31 iv_analysis_loop_init (loop) for it. All the other functions then work with
32 this loop. When you need to work with another loop, just call
33 iv_analysis_loop_init for it. When you no longer need iv analysis, call
34 iv_analysis_done () to clean up the memory.
36 The available functions are:
38 iv_analyze (insn, mode, reg, iv): Stores the description of the induction
39 variable corresponding to the use of register REG in INSN to IV, given
40 that REG has mode MODE. Returns true if REG is an induction variable
41 in INSN. false otherwise. If a use of REG is not found in INSN,
42 the following insns are scanned (so that we may call this function
43 on insns returned by get_condition).
44 iv_analyze_result (insn, def, iv): Stores to IV the description of the iv
45 corresponding to DEF, which is a register defined in INSN.
46 iv_analyze_expr (insn, mode, expr, iv): Stores to IV the description of iv
47 corresponding to expression EXPR evaluated at INSN. All registers used bu
48 EXPR must also be used in INSN. MODE is the mode of EXPR.
53 #include "coretypes.h"
59 #include "diagnostic-core.h"
64 #include "tree-ssa-loop-niter.h"
66 #include "function-abi.h"
68 /* Possible return values of iv_get_reaching_def. */
72 /* More than one reaching def, or reaching def that does not
76 /* The use is trivial invariant of the loop, i.e. is not changed
80 /* The use is reached by initial value and a value from the
81 previous iteration. */
84 /* The use has single dominating def. */
88 /* Information about a biv. */
93 unsigned regno
; /* The register of the biv. */
94 class rtx_iv iv
; /* Value of the biv. */
97 static bool clean_slate
= true;
99 static unsigned int iv_ref_table_size
= 0;
101 /* Table of rtx_ivs indexed by the df_ref uid field. */
102 static class rtx_iv
** iv_ref_table
;
104 /* Induction variable stored at the reference. */
105 #define DF_REF_IV(REF) iv_ref_table[DF_REF_ID (REF)]
106 #define DF_REF_IV_SET(REF, IV) iv_ref_table[DF_REF_ID (REF)] = (IV)
108 /* The current loop. */
110 static class loop
*current_loop
;
112 /* Hashtable helper. */
114 struct biv_entry_hasher
: free_ptr_hash
<biv_entry
>
116 typedef rtx_def
*compare_type
;
117 static inline hashval_t
hash (const biv_entry
*);
118 static inline bool equal (const biv_entry
*, const rtx_def
*);
121 /* Returns hash value for biv B. */
124 biv_entry_hasher::hash (const biv_entry
*b
)
129 /* Compares biv B and register R. */
132 biv_entry_hasher::equal (const biv_entry
*b
, const rtx_def
*r
)
134 return b
->regno
== REGNO (r
);
137 /* Bivs of the current loop. */
139 static hash_table
<biv_entry_hasher
> *bivs
;
141 static bool iv_analyze_op (rtx_insn
*, scalar_int_mode
, rtx
, class rtx_iv
*);
143 /* Return the RTX code corresponding to the IV extend code EXTEND. */
144 static inline enum rtx_code
145 iv_extend_to_rtx_code (enum iv_extend_code extend
)
153 case IV_UNKNOWN_EXTEND
:
159 /* Dumps information about IV to FILE. */
161 extern void dump_iv_info (FILE *, class rtx_iv
*);
163 dump_iv_info (FILE *file
, class rtx_iv
*iv
)
167 fprintf (file
, "not simple");
171 if (iv
->step
== const0_rtx
172 && !iv
->first_special
)
173 fprintf (file
, "invariant ");
175 print_rtl (file
, iv
->base
);
176 if (iv
->step
!= const0_rtx
)
178 fprintf (file
, " + ");
179 print_rtl (file
, iv
->step
);
180 fprintf (file
, " * iteration");
182 fprintf (file
, " (in %s)", GET_MODE_NAME (iv
->mode
));
184 if (iv
->mode
!= iv
->extend_mode
)
185 fprintf (file
, " %s to %s",
186 rtx_name
[iv_extend_to_rtx_code (iv
->extend
)],
187 GET_MODE_NAME (iv
->extend_mode
));
189 if (iv
->mult
!= const1_rtx
)
191 fprintf (file
, " * ");
192 print_rtl (file
, iv
->mult
);
194 if (iv
->delta
!= const0_rtx
)
196 fprintf (file
, " + ");
197 print_rtl (file
, iv
->delta
);
199 if (iv
->first_special
)
200 fprintf (file
, " (first special)");
204 check_iv_ref_table_size (void)
206 if (iv_ref_table_size
< DF_DEFS_TABLE_SIZE ())
208 unsigned int new_size
= DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
209 iv_ref_table
= XRESIZEVEC (class rtx_iv
*, iv_ref_table
, new_size
);
210 memset (&iv_ref_table
[iv_ref_table_size
], 0,
211 (new_size
- iv_ref_table_size
) * sizeof (class rtx_iv
*));
212 iv_ref_table_size
= new_size
;
217 /* Checks whether REG is a well-behaved register. */
220 simple_reg_p (rtx reg
)
224 if (GET_CODE (reg
) == SUBREG
)
226 if (!subreg_lowpart_p (reg
))
228 reg
= SUBREG_REG (reg
);
235 if (HARD_REGISTER_NUM_P (r
))
238 if (GET_MODE_CLASS (GET_MODE (reg
)) != MODE_INT
)
244 /* Clears the information about ivs stored in df. */
249 unsigned i
, n_defs
= DF_DEFS_TABLE_SIZE ();
252 check_iv_ref_table_size ();
253 for (i
= 0; i
< n_defs
; i
++)
255 iv
= iv_ref_table
[i
];
259 iv_ref_table
[i
] = NULL
;
267 /* Prepare the data for an induction variable analysis of a LOOP. */
270 iv_analysis_loop_init (class loop
*loop
)
274 /* Clear the information from the analysis of the previous loop. */
277 df_set_flags (DF_EQ_NOTES
+ DF_DEFER_INSN_RESCAN
);
278 bivs
= new hash_table
<biv_entry_hasher
> (10);
284 /* Get rid of the ud chains before processing the rescans. Then add
286 df_remove_problem (df_chain
);
287 df_process_deferred_rescans ();
288 df_set_flags (DF_RD_PRUNE_DEAD_DEFS
);
289 df_chain_add_problem (DF_UD_CHAIN
);
290 df_note_add_problem ();
291 df_analyze_loop (loop
);
293 df_dump_region (dump_file
);
295 check_iv_ref_table_size ();
298 /* Finds the definition of REG that dominates loop latch and stores
299 it to DEF. Returns false if there is not a single definition
300 dominating the latch. If REG has no definition in loop, DEF
301 is set to NULL and true is returned. */
304 latch_dominating_def (rtx reg
, df_ref
*def
)
306 df_ref single_rd
= NULL
, adef
;
307 unsigned regno
= REGNO (reg
);
308 class df_rd_bb_info
*bb_info
= DF_RD_BB_INFO (current_loop
->latch
);
310 for (adef
= DF_REG_DEF_CHAIN (regno
); adef
; adef
= DF_REF_NEXT_REG (adef
))
312 if (!bitmap_bit_p (df
->blocks_to_analyze
, DF_REF_BBNO (adef
))
313 || !bitmap_bit_p (&bb_info
->out
, DF_REF_ID (adef
)))
316 /* More than one reaching definition. */
320 if (!just_once_each_iteration_p (current_loop
, DF_REF_BB (adef
)))
330 /* Gets definition of REG reaching its use in INSN and stores it to DEF. */
332 static enum iv_grd_result
333 iv_get_reaching_def (rtx_insn
*insn
, rtx reg
, df_ref
*def
)
336 basic_block def_bb
, use_bb
;
341 if (!simple_reg_p (reg
))
343 if (GET_CODE (reg
) == SUBREG
)
344 reg
= SUBREG_REG (reg
);
345 gcc_assert (REG_P (reg
));
347 use
= df_find_use (insn
, reg
);
348 gcc_assert (use
!= NULL
);
350 if (!DF_REF_CHAIN (use
))
351 return GRD_INVARIANT
;
353 /* More than one reaching def. */
354 if (DF_REF_CHAIN (use
)->next
)
357 adef
= DF_REF_CHAIN (use
)->ref
;
359 /* We do not handle setting only part of the register. */
360 if (DF_REF_FLAGS (adef
) & DF_REF_READ_WRITE
)
363 def_insn
= DF_REF_INSN (adef
);
364 def_bb
= DF_REF_BB (adef
);
365 use_bb
= BLOCK_FOR_INSN (insn
);
367 if (use_bb
== def_bb
)
368 dom_p
= (DF_INSN_LUID (def_insn
) < DF_INSN_LUID (insn
));
370 dom_p
= dominated_by_p (CDI_DOMINATORS
, use_bb
, def_bb
);
375 return GRD_SINGLE_DOM
;
378 /* The definition does not dominate the use. This is still OK if
379 this may be a use of a biv, i.e. if the def_bb dominates loop
381 if (just_once_each_iteration_p (current_loop
, def_bb
))
382 return GRD_MAYBE_BIV
;
387 /* Sets IV to invariant CST in MODE. Always returns true (just for
388 consistency with other iv manipulation functions that may fail). */
391 iv_constant (class rtx_iv
*iv
, scalar_int_mode mode
, rtx cst
)
395 iv
->step
= const0_rtx
;
396 iv
->first_special
= false;
397 iv
->extend
= IV_UNKNOWN_EXTEND
;
398 iv
->extend_mode
= iv
->mode
;
399 iv
->delta
= const0_rtx
;
400 iv
->mult
= const1_rtx
;
405 /* Evaluates application of subreg to MODE on IV. */
408 iv_subreg (class rtx_iv
*iv
, scalar_int_mode mode
)
410 /* If iv is invariant, just calculate the new value. */
411 if (iv
->step
== const0_rtx
412 && !iv
->first_special
)
414 rtx val
= get_iv_value (iv
, const0_rtx
);
415 val
= lowpart_subreg (mode
, val
,
416 iv
->extend
== IV_UNKNOWN_EXTEND
417 ? iv
->mode
: iv
->extend_mode
);
420 iv
->extend
= IV_UNKNOWN_EXTEND
;
421 iv
->mode
= iv
->extend_mode
= mode
;
422 iv
->delta
= const0_rtx
;
423 iv
->mult
= const1_rtx
;
427 if (iv
->extend_mode
== mode
)
430 if (GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (iv
->mode
))
433 iv
->extend
= IV_UNKNOWN_EXTEND
;
436 iv
->base
= simplify_gen_binary (PLUS
, iv
->extend_mode
, iv
->delta
,
437 simplify_gen_binary (MULT
, iv
->extend_mode
,
438 iv
->base
, iv
->mult
));
439 iv
->step
= simplify_gen_binary (MULT
, iv
->extend_mode
, iv
->step
, iv
->mult
);
440 iv
->mult
= const1_rtx
;
441 iv
->delta
= const0_rtx
;
442 iv
->first_special
= false;
447 /* Evaluates application of EXTEND to MODE on IV. */
450 iv_extend (class rtx_iv
*iv
, enum iv_extend_code extend
, scalar_int_mode mode
)
452 /* If iv is invariant, just calculate the new value. */
453 if (iv
->step
== const0_rtx
454 && !iv
->first_special
)
456 rtx val
= get_iv_value (iv
, const0_rtx
);
457 if (iv
->extend_mode
!= iv
->mode
458 && iv
->extend
!= IV_UNKNOWN_EXTEND
459 && iv
->extend
!= extend
)
460 val
= lowpart_subreg (iv
->mode
, val
, iv
->extend_mode
);
461 val
= simplify_gen_unary (iv_extend_to_rtx_code (extend
), mode
,
464 ? iv
->extend_mode
: iv
->mode
);
466 iv
->extend
= IV_UNKNOWN_EXTEND
;
467 iv
->mode
= iv
->extend_mode
= mode
;
468 iv
->delta
= const0_rtx
;
469 iv
->mult
= const1_rtx
;
473 if (mode
!= iv
->extend_mode
)
476 if (iv
->extend
!= IV_UNKNOWN_EXTEND
477 && iv
->extend
!= extend
)
485 /* Evaluates negation of IV. */
488 iv_neg (class rtx_iv
*iv
)
490 if (iv
->extend
== IV_UNKNOWN_EXTEND
)
492 iv
->base
= simplify_gen_unary (NEG
, iv
->extend_mode
,
493 iv
->base
, iv
->extend_mode
);
494 iv
->step
= simplify_gen_unary (NEG
, iv
->extend_mode
,
495 iv
->step
, iv
->extend_mode
);
499 iv
->delta
= simplify_gen_unary (NEG
, iv
->extend_mode
,
500 iv
->delta
, iv
->extend_mode
);
501 iv
->mult
= simplify_gen_unary (NEG
, iv
->extend_mode
,
502 iv
->mult
, iv
->extend_mode
);
508 /* Evaluates addition or subtraction (according to OP) of IV1 to IV0. */
511 iv_add (class rtx_iv
*iv0
, class rtx_iv
*iv1
, enum rtx_code op
)
513 scalar_int_mode mode
;
516 /* Extend the constant to extend_mode of the other operand if necessary. */
517 if (iv0
->extend
== IV_UNKNOWN_EXTEND
518 && iv0
->mode
== iv0
->extend_mode
519 && iv0
->step
== const0_rtx
520 && GET_MODE_SIZE (iv0
->extend_mode
) < GET_MODE_SIZE (iv1
->extend_mode
))
522 iv0
->extend_mode
= iv1
->extend_mode
;
523 iv0
->base
= simplify_gen_unary (ZERO_EXTEND
, iv0
->extend_mode
,
524 iv0
->base
, iv0
->mode
);
526 if (iv1
->extend
== IV_UNKNOWN_EXTEND
527 && iv1
->mode
== iv1
->extend_mode
528 && iv1
->step
== const0_rtx
529 && GET_MODE_SIZE (iv1
->extend_mode
) < GET_MODE_SIZE (iv0
->extend_mode
))
531 iv1
->extend_mode
= iv0
->extend_mode
;
532 iv1
->base
= simplify_gen_unary (ZERO_EXTEND
, iv1
->extend_mode
,
533 iv1
->base
, iv1
->mode
);
536 mode
= iv0
->extend_mode
;
537 if (mode
!= iv1
->extend_mode
)
540 if (iv0
->extend
== IV_UNKNOWN_EXTEND
541 && iv1
->extend
== IV_UNKNOWN_EXTEND
)
543 if (iv0
->mode
!= iv1
->mode
)
546 iv0
->base
= simplify_gen_binary (op
, mode
, iv0
->base
, iv1
->base
);
547 iv0
->step
= simplify_gen_binary (op
, mode
, iv0
->step
, iv1
->step
);
552 /* Handle addition of constant. */
553 if (iv1
->extend
== IV_UNKNOWN_EXTEND
555 && iv1
->step
== const0_rtx
)
557 iv0
->delta
= simplify_gen_binary (op
, mode
, iv0
->delta
, iv1
->base
);
561 if (iv0
->extend
== IV_UNKNOWN_EXTEND
563 && iv0
->step
== const0_rtx
)
571 iv0
->delta
= simplify_gen_binary (PLUS
, mode
, iv0
->delta
, arg
);
578 /* Evaluates multiplication of IV by constant CST. */
581 iv_mult (class rtx_iv
*iv
, rtx mby
)
583 scalar_int_mode mode
= iv
->extend_mode
;
585 if (GET_MODE (mby
) != VOIDmode
586 && GET_MODE (mby
) != mode
)
589 if (iv
->extend
== IV_UNKNOWN_EXTEND
)
591 iv
->base
= simplify_gen_binary (MULT
, mode
, iv
->base
, mby
);
592 iv
->step
= simplify_gen_binary (MULT
, mode
, iv
->step
, mby
);
596 iv
->delta
= simplify_gen_binary (MULT
, mode
, iv
->delta
, mby
);
597 iv
->mult
= simplify_gen_binary (MULT
, mode
, iv
->mult
, mby
);
603 /* Evaluates shift of IV by constant CST. */
606 iv_shift (class rtx_iv
*iv
, rtx mby
)
608 scalar_int_mode mode
= iv
->extend_mode
;
610 if (GET_MODE (mby
) != VOIDmode
611 && GET_MODE (mby
) != mode
)
614 if (iv
->extend
== IV_UNKNOWN_EXTEND
)
616 iv
->base
= simplify_gen_binary (ASHIFT
, mode
, iv
->base
, mby
);
617 iv
->step
= simplify_gen_binary (ASHIFT
, mode
, iv
->step
, mby
);
621 iv
->delta
= simplify_gen_binary (ASHIFT
, mode
, iv
->delta
, mby
);
622 iv
->mult
= simplify_gen_binary (ASHIFT
, mode
, iv
->mult
, mby
);
628 /* The recursive part of get_biv_step. Gets the value of the single value
629 defined by DEF wrto initial value of REG inside loop, in shape described
633 get_biv_step_1 (df_ref def
, scalar_int_mode outer_mode
, rtx reg
,
634 rtx
*inner_step
, scalar_int_mode
*inner_mode
,
635 enum iv_extend_code
*extend
,
638 rtx set
, rhs
, op0
= NULL_RTX
, op1
= NULL_RTX
;
641 rtx_insn
*insn
= DF_REF_INSN (def
);
643 enum iv_grd_result res
;
645 set
= single_set (insn
);
649 rhs
= find_reg_equal_equiv_note (insn
);
655 code
= GET_CODE (rhs
);
668 if (code
== PLUS
&& CONSTANT_P (op0
))
669 std::swap (op0
, op1
);
671 if (!simple_reg_p (op0
)
672 || !CONSTANT_P (op1
))
675 if (GET_MODE (rhs
) != outer_mode
)
677 /* ppc64 uses expressions like
679 (set x:SI (plus:SI (subreg:SI y:DI) 1)).
681 this is equivalent to
683 (set x':DI (plus:DI y:DI 1))
684 (set x:SI (subreg:SI (x':DI)). */
685 if (GET_CODE (op0
) != SUBREG
)
687 if (GET_MODE (SUBREG_REG (op0
)) != outer_mode
)
696 if (GET_MODE (rhs
) != outer_mode
)
700 if (!simple_reg_p (op0
))
710 if (GET_CODE (next
) == SUBREG
)
712 if (!subreg_lowpart_p (next
))
715 nextr
= SUBREG_REG (next
);
716 if (GET_MODE (nextr
) != outer_mode
)
722 res
= iv_get_reaching_def (insn
, nextr
, &next_def
);
724 if (res
== GRD_INVALID
|| res
== GRD_INVARIANT
)
727 if (res
== GRD_MAYBE_BIV
)
729 if (!rtx_equal_p (nextr
, reg
))
732 *inner_step
= const0_rtx
;
733 *extend
= IV_UNKNOWN_EXTEND
;
734 *inner_mode
= outer_mode
;
735 *outer_step
= const0_rtx
;
737 else if (!get_biv_step_1 (next_def
, outer_mode
, reg
,
738 inner_step
, inner_mode
, extend
,
742 if (GET_CODE (next
) == SUBREG
)
744 scalar_int_mode amode
;
745 if (!is_a
<scalar_int_mode
> (GET_MODE (next
), &amode
)
746 || GET_MODE_SIZE (amode
) > GET_MODE_SIZE (*inner_mode
))
750 *inner_step
= simplify_gen_binary (PLUS
, outer_mode
,
751 *inner_step
, *outer_step
);
752 *outer_step
= const0_rtx
;
753 *extend
= IV_UNKNOWN_EXTEND
;
764 if (*inner_mode
== outer_mode
765 /* See comment in previous switch. */
766 || GET_MODE (rhs
) != outer_mode
)
767 *inner_step
= simplify_gen_binary (code
, outer_mode
,
770 *outer_step
= simplify_gen_binary (code
, outer_mode
,
776 gcc_assert (GET_MODE (op0
) == *inner_mode
777 && *extend
== IV_UNKNOWN_EXTEND
778 && *outer_step
== const0_rtx
);
780 *extend
= (code
== SIGN_EXTEND
) ? IV_SIGN_EXTEND
: IV_ZERO_EXTEND
;
790 /* Gets the operation on register REG inside loop, in shape
792 OUTER_STEP + EXTEND_{OUTER_MODE} (SUBREG_{INNER_MODE} (REG + INNER_STEP))
794 If the operation cannot be described in this shape, return false.
795 LAST_DEF is the definition of REG that dominates loop latch. */
798 get_biv_step (df_ref last_def
, scalar_int_mode outer_mode
, rtx reg
,
799 rtx
*inner_step
, scalar_int_mode
*inner_mode
,
800 enum iv_extend_code
*extend
, rtx
*outer_step
)
802 if (!get_biv_step_1 (last_def
, outer_mode
, reg
,
803 inner_step
, inner_mode
, extend
,
807 gcc_assert ((*inner_mode
== outer_mode
) != (*extend
!= IV_UNKNOWN_EXTEND
));
808 gcc_assert (*inner_mode
!= outer_mode
|| *outer_step
== const0_rtx
);
813 /* Records information that DEF is induction variable IV. */
816 record_iv (df_ref def
, class rtx_iv
*iv
)
818 class rtx_iv
*recorded_iv
= XNEW (class rtx_iv
);
821 check_iv_ref_table_size ();
822 DF_REF_IV_SET (def
, recorded_iv
);
825 /* If DEF was already analyzed for bivness, store the description of the biv to
826 IV and return true. Otherwise return false. */
829 analyzed_for_bivness_p (rtx def
, class rtx_iv
*iv
)
831 class biv_entry
*biv
= bivs
->find_with_hash (def
, REGNO (def
));
841 record_biv (rtx def
, class rtx_iv
*iv
)
843 class biv_entry
*biv
= XNEW (class biv_entry
);
844 biv_entry
**slot
= bivs
->find_slot_with_hash (def
, REGNO (def
), INSERT
);
846 biv
->regno
= REGNO (def
);
852 /* Determines whether DEF is a biv and if so, stores its description
853 to *IV. OUTER_MODE is the mode of DEF. */
856 iv_analyze_biv (scalar_int_mode outer_mode
, rtx def
, class rtx_iv
*iv
)
858 rtx inner_step
, outer_step
;
859 scalar_int_mode inner_mode
;
860 enum iv_extend_code extend
;
865 fprintf (dump_file
, "Analyzing ");
866 print_rtl (dump_file
, def
);
867 fprintf (dump_file
, " for bivness.\n");
872 if (!CONSTANT_P (def
))
875 return iv_constant (iv
, outer_mode
, def
);
878 if (!latch_dominating_def (def
, &last_def
))
881 fprintf (dump_file
, " not simple.\n");
886 return iv_constant (iv
, outer_mode
, def
);
888 if (analyzed_for_bivness_p (def
, iv
))
891 fprintf (dump_file
, " already analysed.\n");
892 return iv
->base
!= NULL_RTX
;
895 if (!get_biv_step (last_def
, outer_mode
, def
, &inner_step
, &inner_mode
,
896 &extend
, &outer_step
))
902 /* Loop transforms base to es (base + inner_step) + outer_step,
903 where es means extend of subreg between inner_mode and outer_mode.
904 The corresponding induction variable is
906 es ((base - outer_step) + i * (inner_step + outer_step)) + outer_step */
908 iv
->base
= simplify_gen_binary (MINUS
, outer_mode
, def
, outer_step
);
909 iv
->step
= simplify_gen_binary (PLUS
, outer_mode
, inner_step
, outer_step
);
910 iv
->mode
= inner_mode
;
911 iv
->extend_mode
= outer_mode
;
913 iv
->mult
= const1_rtx
;
914 iv
->delta
= outer_step
;
915 iv
->first_special
= inner_mode
!= outer_mode
;
920 fprintf (dump_file
, " ");
921 dump_iv_info (dump_file
, iv
);
922 fprintf (dump_file
, "\n");
925 record_biv (def
, iv
);
926 return iv
->base
!= NULL_RTX
;
929 /* Analyzes expression RHS used at INSN and stores the result to *IV.
930 The mode of the induction variable is MODE. */
933 iv_analyze_expr (rtx_insn
*insn
, scalar_int_mode mode
, rtx rhs
,
937 rtx op0
= NULL_RTX
, op1
= NULL_RTX
;
938 class rtx_iv iv0
, iv1
;
939 enum rtx_code code
= GET_CODE (rhs
);
940 scalar_int_mode omode
= mode
;
945 gcc_assert (GET_MODE (rhs
) == mode
|| GET_MODE (rhs
) == VOIDmode
);
950 return iv_analyze_op (insn
, mode
, rhs
, iv
);
962 /* We don't know how many bits there are in a sign-extended constant. */
963 if (!is_a
<scalar_int_mode
> (GET_MODE (op0
), &omode
))
976 if (!CONSTANT_P (mby
))
977 std::swap (op0
, mby
);
978 if (!CONSTANT_P (mby
))
985 if (!CONSTANT_P (mby
))
994 && !iv_analyze_expr (insn
, omode
, op0
, &iv0
))
998 && !iv_analyze_expr (insn
, omode
, op1
, &iv1
))
1004 if (!iv_extend (&iv0
, IV_SIGN_EXTEND
, mode
))
1009 if (!iv_extend (&iv0
, IV_ZERO_EXTEND
, mode
))
1020 if (!iv_add (&iv0
, &iv1
, code
))
1025 if (!iv_mult (&iv0
, mby
))
1030 if (!iv_shift (&iv0
, mby
))
1039 return iv
->base
!= NULL_RTX
;
1042 /* Analyzes iv DEF and stores the result to *IV. */
1045 iv_analyze_def (df_ref def
, class rtx_iv
*iv
)
1047 rtx_insn
*insn
= DF_REF_INSN (def
);
1048 rtx reg
= DF_REF_REG (def
);
1053 fprintf (dump_file
, "Analyzing def of ");
1054 print_rtl (dump_file
, reg
);
1055 fprintf (dump_file
, " in insn ");
1056 print_rtl_single (dump_file
, insn
);
1059 check_iv_ref_table_size ();
1060 if (DF_REF_IV (def
))
1063 fprintf (dump_file
, " already analysed.\n");
1064 *iv
= *DF_REF_IV (def
);
1065 return iv
->base
!= NULL_RTX
;
1068 iv
->base
= NULL_RTX
;
1069 iv
->step
= NULL_RTX
;
1071 scalar_int_mode mode
;
1072 if (!REG_P (reg
) || !is_a
<scalar_int_mode
> (GET_MODE (reg
), &mode
))
1075 set
= single_set (insn
);
1079 if (!REG_P (SET_DEST (set
)))
1082 gcc_assert (SET_DEST (set
) == reg
);
1083 rhs
= find_reg_equal_equiv_note (insn
);
1085 rhs
= XEXP (rhs
, 0);
1087 rhs
= SET_SRC (set
);
1089 iv_analyze_expr (insn
, mode
, rhs
, iv
);
1090 record_iv (def
, iv
);
1094 print_rtl (dump_file
, reg
);
1095 fprintf (dump_file
, " in insn ");
1096 print_rtl_single (dump_file
, insn
);
1097 fprintf (dump_file
, " is ");
1098 dump_iv_info (dump_file
, iv
);
1099 fprintf (dump_file
, "\n");
1102 return iv
->base
!= NULL_RTX
;
1105 /* Analyzes operand OP of INSN and stores the result to *IV. MODE is the
1109 iv_analyze_op (rtx_insn
*insn
, scalar_int_mode mode
, rtx op
, class rtx_iv
*iv
)
1112 enum iv_grd_result res
;
1116 fprintf (dump_file
, "Analyzing operand ");
1117 print_rtl (dump_file
, op
);
1118 fprintf (dump_file
, " of insn ");
1119 print_rtl_single (dump_file
, insn
);
1122 if (function_invariant_p (op
))
1123 res
= GRD_INVARIANT
;
1124 else if (GET_CODE (op
) == SUBREG
)
1126 scalar_int_mode inner_mode
;
1127 if (!subreg_lowpart_p (op
)
1128 || !is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &inner_mode
))
1131 if (!iv_analyze_op (insn
, inner_mode
, SUBREG_REG (op
), iv
))
1134 return iv_subreg (iv
, mode
);
1138 res
= iv_get_reaching_def (insn
, op
, &def
);
1139 if (res
== GRD_INVALID
)
1142 fprintf (dump_file
, " not simple.\n");
1147 if (res
== GRD_INVARIANT
)
1149 iv_constant (iv
, mode
, op
);
1153 fprintf (dump_file
, " ");
1154 dump_iv_info (dump_file
, iv
);
1155 fprintf (dump_file
, "\n");
1160 if (res
== GRD_MAYBE_BIV
)
1161 return iv_analyze_biv (mode
, op
, iv
);
1163 return iv_analyze_def (def
, iv
);
1166 /* Analyzes value VAL at INSN and stores the result to *IV. MODE is the
1170 iv_analyze (rtx_insn
*insn
, scalar_int_mode mode
, rtx val
, class rtx_iv
*iv
)
1174 /* We must find the insn in that val is used, so that we get to UD chains.
1175 Since the function is sometimes called on result of get_condition,
1176 this does not necessarily have to be directly INSN; scan also the
1178 if (simple_reg_p (val
))
1180 if (GET_CODE (val
) == SUBREG
)
1181 reg
= SUBREG_REG (val
);
1185 while (!df_find_use (insn
, reg
))
1186 insn
= NEXT_INSN (insn
);
1189 return iv_analyze_op (insn
, mode
, val
, iv
);
1192 /* Analyzes definition of DEF in INSN and stores the result to IV. */
1195 iv_analyze_result (rtx_insn
*insn
, rtx def
, class rtx_iv
*iv
)
1199 adef
= df_find_def (insn
, def
);
1203 return iv_analyze_def (adef
, iv
);
1206 /* Checks whether definition of register REG in INSN is a basic induction
1207 variable. MODE is the mode of REG.
1209 IV analysis must have been initialized (via a call to
1210 iv_analysis_loop_init) for this function to produce a result. */
1213 biv_p (rtx_insn
*insn
, scalar_int_mode mode
, rtx reg
)
1216 df_ref def
, last_def
;
1218 if (!simple_reg_p (reg
))
1221 def
= df_find_def (insn
, reg
);
1222 gcc_assert (def
!= NULL
);
1223 if (!latch_dominating_def (reg
, &last_def
))
1225 if (last_def
!= def
)
1228 if (!iv_analyze_biv (mode
, reg
, &iv
))
1231 return iv
.step
!= const0_rtx
;
1234 /* Calculates value of IV at ITERATION-th iteration. */
1237 get_iv_value (class rtx_iv
*iv
, rtx iteration
)
1241 /* We would need to generate some if_then_else patterns, and so far
1242 it is not needed anywhere. */
1243 gcc_assert (!iv
->first_special
);
1245 if (iv
->step
!= const0_rtx
&& iteration
!= const0_rtx
)
1246 val
= simplify_gen_binary (PLUS
, iv
->extend_mode
, iv
->base
,
1247 simplify_gen_binary (MULT
, iv
->extend_mode
,
1248 iv
->step
, iteration
));
1252 if (iv
->extend_mode
== iv
->mode
)
1255 val
= lowpart_subreg (iv
->mode
, val
, iv
->extend_mode
);
1257 if (iv
->extend
== IV_UNKNOWN_EXTEND
)
1260 val
= simplify_gen_unary (iv_extend_to_rtx_code (iv
->extend
),
1261 iv
->extend_mode
, val
, iv
->mode
);
1262 val
= simplify_gen_binary (PLUS
, iv
->extend_mode
, iv
->delta
,
1263 simplify_gen_binary (MULT
, iv
->extend_mode
,
1269 /* Free the data for an induction variable analysis. */
1272 iv_analysis_done (void)
1278 df_finish_pass (true);
1281 free (iv_ref_table
);
1282 iv_ref_table
= NULL
;
1283 iv_ref_table_size
= 0;
1287 /* Computes inverse to X modulo (1 << MOD). */
1290 inverse (uint64_t x
, int mod
)
1293 ((uint64_t) 1 << (mod
- 1) << 1) - 1;
1297 for (i
= 0; i
< mod
- 1; i
++)
1299 rslt
= (rslt
* x
) & mask
;
1306 /* Checks whether any register in X is in set ALT. */
1309 altered_reg_used (const_rtx x
, bitmap alt
)
1311 subrtx_iterator::array_type array
;
1312 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1314 const_rtx x
= *iter
;
1315 if (REG_P (x
) && REGNO_REG_SET_P (alt
, REGNO (x
)))
1321 /* Marks registers altered by EXPR in set ALT. */
1324 mark_altered (rtx expr
, const_rtx by ATTRIBUTE_UNUSED
, void *alt
)
1326 if (GET_CODE (expr
) == SUBREG
)
1327 expr
= SUBREG_REG (expr
);
1331 SET_REGNO_REG_SET ((bitmap
) alt
, REGNO (expr
));
1334 /* Checks whether RHS is simple enough to process. */
1337 simple_rhs_p (rtx rhs
)
1341 if (function_invariant_p (rhs
)
1342 || (REG_P (rhs
) && !HARD_REGISTER_P (rhs
)))
1345 switch (GET_CODE (rhs
))
1350 op0
= XEXP (rhs
, 0);
1351 op1
= XEXP (rhs
, 1);
1352 /* Allow reg OP const and reg OP reg. */
1353 if (!(REG_P (op0
) && !HARD_REGISTER_P (op0
))
1354 && !function_invariant_p (op0
))
1356 if (!(REG_P (op1
) && !HARD_REGISTER_P (op1
))
1357 && !function_invariant_p (op1
))
1366 op0
= XEXP (rhs
, 0);
1367 op1
= XEXP (rhs
, 1);
1368 /* Allow reg OP const. */
1369 if (!(REG_P (op0
) && !HARD_REGISTER_P (op0
)))
1371 if (!function_invariant_p (op1
))
1381 /* If REGNO has a single definition, return its known value, otherwise return
1385 find_single_def_src (unsigned int regno
)
1393 adef
= DF_REG_DEF_CHAIN (regno
);
1394 if (adef
== NULL
|| DF_REF_NEXT_REG (adef
) != NULL
1395 || DF_REF_IS_ARTIFICIAL (adef
))
1398 set
= single_set (DF_REF_INSN (adef
));
1399 if (set
== NULL
|| !REG_P (SET_DEST (set
))
1400 || REGNO (SET_DEST (set
)) != regno
)
1403 note
= find_reg_equal_equiv_note (DF_REF_INSN (adef
));
1405 if (note
&& function_invariant_p (XEXP (note
, 0)))
1407 src
= XEXP (note
, 0);
1410 src
= SET_SRC (set
);
1414 regno
= REGNO (src
);
1419 if (!function_invariant_p (src
))
1425 /* If any registers in *EXPR that have a single definition, try to replace
1426 them with the known-equivalent values. */
1429 replace_single_def_regs (rtx
*expr
)
1431 subrtx_var_iterator::array_type array
;
1433 FOR_EACH_SUBRTX_VAR (iter
, array
, *expr
, NONCONST
)
1437 if (rtx new_x
= find_single_def_src (REGNO (x
)))
1439 *expr
= simplify_replace_rtx (*expr
, x
, new_x
);
1445 /* A subroutine of simplify_using_initial_values, this function examines INSN
1446 to see if it contains a suitable set that we can use to make a replacement.
1447 If it is suitable, return true and set DEST and SRC to the lhs and rhs of
1448 the set; return false otherwise. */
1451 suitable_set_for_replacement (rtx_insn
*insn
, rtx
*dest
, rtx
*src
)
1453 rtx set
= single_set (insn
);
1454 rtx lhs
= NULL_RTX
, rhs
;
1459 lhs
= SET_DEST (set
);
1463 rhs
= find_reg_equal_equiv_note (insn
);
1465 rhs
= XEXP (rhs
, 0);
1467 rhs
= SET_SRC (set
);
1469 if (!simple_rhs_p (rhs
))
1477 /* Using the data returned by suitable_set_for_replacement, replace DEST
1478 with SRC in *EXPR and return the new expression. Also call
1479 replace_single_def_regs if the replacement changed something. */
1481 replace_in_expr (rtx
*expr
, rtx dest
, rtx src
)
1484 *expr
= simplify_replace_rtx (*expr
, dest
, src
);
1487 replace_single_def_regs (expr
);
1490 /* Checks whether A implies B. */
1493 implies_p (rtx a
, rtx b
)
1495 rtx op0
, op1
, opb0
, opb1
;
1498 if (rtx_equal_p (a
, b
))
1501 if (GET_CODE (a
) == EQ
)
1507 || (GET_CODE (op0
) == SUBREG
1508 && REG_P (SUBREG_REG (op0
))))
1510 rtx r
= simplify_replace_rtx (b
, op0
, op1
);
1511 if (r
== const_true_rtx
)
1516 || (GET_CODE (op1
) == SUBREG
1517 && REG_P (SUBREG_REG (op1
))))
1519 rtx r
= simplify_replace_rtx (b
, op1
, op0
);
1520 if (r
== const_true_rtx
)
1525 if (b
== const_true_rtx
)
1528 if ((GET_RTX_CLASS (GET_CODE (a
)) != RTX_COMM_COMPARE
1529 && GET_RTX_CLASS (GET_CODE (a
)) != RTX_COMPARE
)
1530 || (GET_RTX_CLASS (GET_CODE (b
)) != RTX_COMM_COMPARE
1531 && GET_RTX_CLASS (GET_CODE (b
)) != RTX_COMPARE
))
1539 mode
= GET_MODE (op0
);
1540 if (mode
!= GET_MODE (opb0
))
1542 else if (mode
== VOIDmode
)
1544 mode
= GET_MODE (op1
);
1545 if (mode
!= GET_MODE (opb1
))
1549 /* A < B implies A + 1 <= B. */
1550 if ((GET_CODE (a
) == GT
|| GET_CODE (a
) == LT
)
1551 && (GET_CODE (b
) == GE
|| GET_CODE (b
) == LE
))
1554 if (GET_CODE (a
) == GT
)
1555 std::swap (op0
, op1
);
1557 if (GET_CODE (b
) == GE
)
1558 std::swap (opb0
, opb1
);
1560 if (SCALAR_INT_MODE_P (mode
)
1561 && rtx_equal_p (op1
, opb1
)
1562 && simplify_gen_binary (MINUS
, mode
, opb0
, op0
) == const1_rtx
)
1567 /* A < B or A > B imply A != B. TODO: Likewise
1568 A + n < B implies A != B + n if neither wraps. */
1569 if (GET_CODE (b
) == NE
1570 && (GET_CODE (a
) == GT
|| GET_CODE (a
) == GTU
1571 || GET_CODE (a
) == LT
|| GET_CODE (a
) == LTU
))
1573 if (rtx_equal_p (op0
, opb0
)
1574 && rtx_equal_p (op1
, opb1
))
1578 /* For unsigned comparisons, A != 0 implies A > 0 and A >= 1. */
1579 if (GET_CODE (a
) == NE
1580 && op1
== const0_rtx
)
1582 if ((GET_CODE (b
) == GTU
1583 && opb1
== const0_rtx
)
1584 || (GET_CODE (b
) == GEU
1585 && opb1
== const1_rtx
))
1586 return rtx_equal_p (op0
, opb0
);
1589 /* A != N is equivalent to A - (N + 1) <u -1. */
1590 if (GET_CODE (a
) == NE
1591 && CONST_INT_P (op1
)
1592 && GET_CODE (b
) == LTU
1593 && opb1
== constm1_rtx
1594 && GET_CODE (opb0
) == PLUS
1595 && CONST_INT_P (XEXP (opb0
, 1))
1596 /* Avoid overflows. */
1597 && ((unsigned HOST_WIDE_INT
) INTVAL (XEXP (opb0
, 1))
1598 != ((unsigned HOST_WIDE_INT
)1
1599 << (HOST_BITS_PER_WIDE_INT
- 1)) - 1)
1600 && INTVAL (XEXP (opb0
, 1)) + 1 == -INTVAL (op1
))
1601 return rtx_equal_p (op0
, XEXP (opb0
, 0));
1603 /* Likewise, A != N implies A - N > 0. */
1604 if (GET_CODE (a
) == NE
1605 && CONST_INT_P (op1
))
1607 if (GET_CODE (b
) == GTU
1608 && GET_CODE (opb0
) == PLUS
1609 && opb1
== const0_rtx
1610 && CONST_INT_P (XEXP (opb0
, 1))
1611 /* Avoid overflows. */
1612 && ((unsigned HOST_WIDE_INT
) INTVAL (XEXP (opb0
, 1))
1613 != (HOST_WIDE_INT_1U
<< (HOST_BITS_PER_WIDE_INT
- 1)))
1614 && rtx_equal_p (XEXP (opb0
, 0), op0
))
1615 return INTVAL (op1
) == -INTVAL (XEXP (opb0
, 1));
1616 if (GET_CODE (b
) == GEU
1617 && GET_CODE (opb0
) == PLUS
1618 && opb1
== const1_rtx
1619 && CONST_INT_P (XEXP (opb0
, 1))
1620 /* Avoid overflows. */
1621 && ((unsigned HOST_WIDE_INT
) INTVAL (XEXP (opb0
, 1))
1622 != (HOST_WIDE_INT_1U
<< (HOST_BITS_PER_WIDE_INT
- 1)))
1623 && rtx_equal_p (XEXP (opb0
, 0), op0
))
1624 return INTVAL (op1
) == -INTVAL (XEXP (opb0
, 1));
1627 /* A >s X, where X is positive, implies A <u Y, if Y is negative. */
1628 if ((GET_CODE (a
) == GT
|| GET_CODE (a
) == GE
)
1629 && CONST_INT_P (op1
)
1630 && ((GET_CODE (a
) == GT
&& op1
== constm1_rtx
)
1631 || INTVAL (op1
) >= 0)
1632 && GET_CODE (b
) == LTU
1633 && CONST_INT_P (opb1
)
1634 && rtx_equal_p (op0
, opb0
))
1635 return INTVAL (opb1
) < 0;
1640 /* Canonicalizes COND so that
1642 (1) Ensure that operands are ordered according to
1643 swap_commutative_operands_p.
1644 (2) (LE x const) will be replaced with (LT x <const+1>) and similarly
1645 for GE, GEU, and LEU. */
1648 canon_condition (rtx cond
)
1654 code
= GET_CODE (cond
);
1655 op0
= XEXP (cond
, 0);
1656 op1
= XEXP (cond
, 1);
1658 if (swap_commutative_operands_p (op0
, op1
))
1660 code
= swap_condition (code
);
1661 std::swap (op0
, op1
);
1664 mode
= GET_MODE (op0
);
1665 if (mode
== VOIDmode
)
1666 mode
= GET_MODE (op1
);
1667 gcc_assert (mode
!= VOIDmode
);
1669 if (CONST_SCALAR_INT_P (op1
) && GET_MODE_CLASS (mode
) != MODE_CC
)
1671 rtx_mode_t
const_val (op1
, mode
);
1676 if (wi::ne_p (const_val
, wi::max_value (mode
, SIGNED
)))
1679 op1
= immed_wide_int_const (wi::add (const_val
, 1), mode
);
1684 if (wi::ne_p (const_val
, wi::min_value (mode
, SIGNED
)))
1687 op1
= immed_wide_int_const (wi::sub (const_val
, 1), mode
);
1692 if (wi::ne_p (const_val
, -1))
1695 op1
= immed_wide_int_const (wi::add (const_val
, 1), mode
);
1700 if (wi::ne_p (const_val
, 0))
1703 op1
= immed_wide_int_const (wi::sub (const_val
, 1), mode
);
1712 if (op0
!= XEXP (cond
, 0)
1713 || op1
!= XEXP (cond
, 1)
1714 || code
!= GET_CODE (cond
)
1715 || GET_MODE (cond
) != SImode
)
1716 cond
= gen_rtx_fmt_ee (code
, SImode
, op0
, op1
);
1721 /* Reverses CONDition; returns NULL if we cannot. */
1724 reversed_condition (rtx cond
)
1726 enum rtx_code reversed
;
1727 reversed
= reversed_comparison_code (cond
, NULL
);
1728 if (reversed
== UNKNOWN
)
1731 return gen_rtx_fmt_ee (reversed
,
1732 GET_MODE (cond
), XEXP (cond
, 0),
1736 /* Tries to use the fact that COND holds to simplify EXPR. ALTERED is the
1737 set of altered regs. */
1740 simplify_using_condition (rtx cond
, rtx
*expr
, regset altered
)
1742 rtx rev
, reve
, exp
= *expr
;
1744 /* If some register gets altered later, we do not really speak about its
1745 value at the time of comparison. */
1746 if (altered
&& altered_reg_used (cond
, altered
))
1749 if (GET_CODE (cond
) == EQ
1750 && REG_P (XEXP (cond
, 0)) && CONSTANT_P (XEXP (cond
, 1)))
1752 *expr
= simplify_replace_rtx (*expr
, XEXP (cond
, 0), XEXP (cond
, 1));
1756 if (!COMPARISON_P (exp
))
1759 rev
= reversed_condition (cond
);
1760 reve
= reversed_condition (exp
);
1762 cond
= canon_condition (cond
);
1763 exp
= canon_condition (exp
);
1765 rev
= canon_condition (rev
);
1767 reve
= canon_condition (reve
);
1769 if (rtx_equal_p (exp
, cond
))
1771 *expr
= const_true_rtx
;
1775 if (rev
&& rtx_equal_p (exp
, rev
))
1781 if (implies_p (cond
, exp
))
1783 *expr
= const_true_rtx
;
1787 if (reve
&& implies_p (cond
, reve
))
1793 /* A proof by contradiction. If *EXPR implies (not cond), *EXPR must
1795 if (rev
&& implies_p (exp
, rev
))
1801 /* Similarly, If (not *EXPR) implies (not cond), *EXPR must be true. */
1802 if (rev
&& reve
&& implies_p (reve
, rev
))
1804 *expr
= const_true_rtx
;
1808 /* We would like to have some other tests here. TODO. */
1813 /* Use relationship between A and *B to eventually eliminate *B.
1814 OP is the operation we consider. */
1817 eliminate_implied_condition (enum rtx_code op
, rtx a
, rtx
*b
)
1822 /* If A implies *B, we may replace *B by true. */
1823 if (implies_p (a
, *b
))
1824 *b
= const_true_rtx
;
1828 /* If *B implies A, we may replace *B by false. */
1829 if (implies_p (*b
, a
))
1838 /* Eliminates the conditions in TAIL that are implied by HEAD. OP is the
1839 operation we consider. */
1842 eliminate_implied_conditions (enum rtx_code op
, rtx
*head
, rtx tail
)
1846 for (elt
= tail
; elt
; elt
= XEXP (elt
, 1))
1847 eliminate_implied_condition (op
, *head
, &XEXP (elt
, 0));
1848 for (elt
= tail
; elt
; elt
= XEXP (elt
, 1))
1849 eliminate_implied_condition (op
, XEXP (elt
, 0), head
);
1852 /* Simplifies *EXPR using initial values at the start of the LOOP. If *EXPR
1853 is a list, its elements are assumed to be combined using OP. */
1856 simplify_using_initial_values (class loop
*loop
, enum rtx_code op
, rtx
*expr
)
1858 bool expression_valid
;
1859 rtx head
, tail
, last_valid_expr
;
1860 rtx_expr_list
*cond_list
;
1863 regset altered
, this_altered
;
1869 if (CONSTANT_P (*expr
))
1872 if (GET_CODE (*expr
) == EXPR_LIST
)
1874 head
= XEXP (*expr
, 0);
1875 tail
= XEXP (*expr
, 1);
1877 eliminate_implied_conditions (op
, &head
, tail
);
1882 neutral
= const_true_rtx
;
1887 neutral
= const0_rtx
;
1888 aggr
= const_true_rtx
;
1895 simplify_using_initial_values (loop
, UNKNOWN
, &head
);
1898 XEXP (*expr
, 0) = aggr
;
1899 XEXP (*expr
, 1) = NULL_RTX
;
1902 else if (head
== neutral
)
1905 simplify_using_initial_values (loop
, op
, expr
);
1908 simplify_using_initial_values (loop
, op
, &tail
);
1910 if (tail
&& XEXP (tail
, 0) == aggr
)
1916 XEXP (*expr
, 0) = head
;
1917 XEXP (*expr
, 1) = tail
;
1921 gcc_assert (op
== UNKNOWN
);
1923 replace_single_def_regs (expr
);
1924 if (CONSTANT_P (*expr
))
1927 e
= loop_preheader_edge (loop
);
1928 if (e
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
1931 altered
= ALLOC_REG_SET (®_obstack
);
1932 this_altered
= ALLOC_REG_SET (®_obstack
);
1934 expression_valid
= true;
1935 last_valid_expr
= *expr
;
1939 insn
= BB_END (e
->src
);
1940 if (any_condjump_p (insn
))
1942 rtx cond
= get_condition (BB_END (e
->src
), NULL
, false, true);
1944 if (cond
&& (e
->flags
& EDGE_FALLTHRU
))
1945 cond
= reversed_condition (cond
);
1949 simplify_using_condition (cond
, expr
, altered
);
1953 if (CONSTANT_P (*expr
))
1955 for (note
= cond_list
; note
; note
= XEXP (note
, 1))
1957 simplify_using_condition (XEXP (note
, 0), expr
, altered
);
1958 if (CONSTANT_P (*expr
))
1962 cond_list
= alloc_EXPR_LIST (0, cond
, cond_list
);
1966 FOR_BB_INSNS_REVERSE (e
->src
, insn
)
1974 CLEAR_REG_SET (this_altered
);
1975 note_stores (insn
, mark_altered
, this_altered
);
1978 /* Kill all registers that might be clobbered by the call.
1979 We don't track modes of hard registers, so we need to be
1980 conservative and assume that partial kills are full kills. */
1981 function_abi callee_abi
= insn_callee_abi (insn
);
1982 IOR_REG_SET_HRS (this_altered
,
1983 callee_abi
.full_and_partial_reg_clobbers ());
1986 if (suitable_set_for_replacement (insn
, &dest
, &src
))
1988 rtx_expr_list
**pnote
, **pnote_next
;
1990 replace_in_expr (expr
, dest
, src
);
1991 if (CONSTANT_P (*expr
))
1994 for (pnote
= &cond_list
; *pnote
; pnote
= pnote_next
)
1996 rtx_expr_list
*note
= *pnote
;
1997 rtx old_cond
= XEXP (note
, 0);
1999 pnote_next
= (rtx_expr_list
**)&XEXP (note
, 1);
2000 replace_in_expr (&XEXP (note
, 0), dest
, src
);
2002 /* We can no longer use a condition that has been simplified
2003 to a constant, and simplify_using_condition will abort if
2005 if (CONSTANT_P (XEXP (note
, 0)))
2007 *pnote
= *pnote_next
;
2009 free_EXPR_LIST_node (note
);
2011 /* Retry simplifications with this condition if either the
2012 expression or the condition changed. */
2013 else if (old_cond
!= XEXP (note
, 0) || old
!= *expr
)
2014 simplify_using_condition (XEXP (note
, 0), expr
, altered
);
2019 rtx_expr_list
**pnote
, **pnote_next
;
2021 /* If we did not use this insn to make a replacement, any overlap
2022 between stores in this insn and our expression will cause the
2023 expression to become invalid. */
2024 if (altered_reg_used (*expr
, this_altered
))
2027 /* Likewise for the conditions. */
2028 for (pnote
= &cond_list
; *pnote
; pnote
= pnote_next
)
2030 rtx_expr_list
*note
= *pnote
;
2031 rtx old_cond
= XEXP (note
, 0);
2033 pnote_next
= (rtx_expr_list
**)&XEXP (note
, 1);
2034 if (altered_reg_used (old_cond
, this_altered
))
2036 *pnote
= *pnote_next
;
2038 free_EXPR_LIST_node (note
);
2043 if (CONSTANT_P (*expr
))
2046 IOR_REG_SET (altered
, this_altered
);
2048 /* If the expression now contains regs that have been altered, we
2049 can't return it to the caller. However, it is still valid for
2050 further simplification, so keep searching to see if we can
2051 eventually turn it into a constant. */
2052 if (altered_reg_used (*expr
, altered
))
2053 expression_valid
= false;
2054 if (expression_valid
)
2055 last_valid_expr
= *expr
;
2058 if (!single_pred_p (e
->src
)
2059 || single_pred (e
->src
) == ENTRY_BLOCK_PTR_FOR_FN (cfun
))
2061 e
= single_pred_edge (e
->src
);
2065 free_EXPR_LIST_list (&cond_list
);
2066 if (!CONSTANT_P (*expr
))
2067 *expr
= last_valid_expr
;
2068 FREE_REG_SET (altered
);
2069 FREE_REG_SET (this_altered
);
2072 /* Transforms invariant IV into MODE. Adds assumptions based on the fact
2073 that IV occurs as left operands of comparison COND and its signedness
2074 is SIGNED_P to DESC. */
2077 shorten_into_mode (class rtx_iv
*iv
, scalar_int_mode mode
,
2078 enum rtx_code cond
, bool signed_p
, class niter_desc
*desc
)
2080 rtx mmin
, mmax
, cond_over
, cond_under
;
2082 get_mode_bounds (mode
, signed_p
, iv
->extend_mode
, &mmin
, &mmax
);
2083 cond_under
= simplify_gen_relational (LT
, SImode
, iv
->extend_mode
,
2085 cond_over
= simplify_gen_relational (GT
, SImode
, iv
->extend_mode
,
2094 if (cond_under
!= const0_rtx
)
2096 alloc_EXPR_LIST (0, cond_under
, desc
->infinite
);
2097 if (cond_over
!= const0_rtx
)
2098 desc
->noloop_assumptions
=
2099 alloc_EXPR_LIST (0, cond_over
, desc
->noloop_assumptions
);
2106 if (cond_over
!= const0_rtx
)
2108 alloc_EXPR_LIST (0, cond_over
, desc
->infinite
);
2109 if (cond_under
!= const0_rtx
)
2110 desc
->noloop_assumptions
=
2111 alloc_EXPR_LIST (0, cond_under
, desc
->noloop_assumptions
);
2115 if (cond_over
!= const0_rtx
)
2117 alloc_EXPR_LIST (0, cond_over
, desc
->infinite
);
2118 if (cond_under
!= const0_rtx
)
2120 alloc_EXPR_LIST (0, cond_under
, desc
->infinite
);
2128 iv
->extend
= signed_p
? IV_SIGN_EXTEND
: IV_ZERO_EXTEND
;
2131 /* Transforms IV0 and IV1 compared by COND so that they are both compared as
2132 subregs of the same mode if possible (sometimes it is necessary to add
2133 some assumptions to DESC). */
2136 canonicalize_iv_subregs (class rtx_iv
*iv0
, class rtx_iv
*iv1
,
2137 enum rtx_code cond
, class niter_desc
*desc
)
2139 scalar_int_mode comp_mode
;
2142 /* If the ivs behave specially in the first iteration, or are
2143 added/multiplied after extending, we ignore them. */
2144 if (iv0
->first_special
|| iv0
->mult
!= const1_rtx
|| iv0
->delta
!= const0_rtx
)
2146 if (iv1
->first_special
|| iv1
->mult
!= const1_rtx
|| iv1
->delta
!= const0_rtx
)
2149 /* If there is some extend, it must match signedness of the comparison. */
2154 if (iv0
->extend
== IV_ZERO_EXTEND
2155 || iv1
->extend
== IV_ZERO_EXTEND
)
2162 if (iv0
->extend
== IV_SIGN_EXTEND
2163 || iv1
->extend
== IV_SIGN_EXTEND
)
2169 if (iv0
->extend
!= IV_UNKNOWN_EXTEND
2170 && iv1
->extend
!= IV_UNKNOWN_EXTEND
2171 && iv0
->extend
!= iv1
->extend
)
2175 if (iv0
->extend
!= IV_UNKNOWN_EXTEND
)
2176 signed_p
= iv0
->extend
== IV_SIGN_EXTEND
;
2177 if (iv1
->extend
!= IV_UNKNOWN_EXTEND
)
2178 signed_p
= iv1
->extend
== IV_SIGN_EXTEND
;
2185 /* Values of both variables should be computed in the same mode. These
2186 might indeed be different, if we have comparison like
2188 (compare (subreg:SI (iv0)) (subreg:SI (iv1)))
2190 and iv0 and iv1 are both ivs iterating in SI mode, but calculated
2191 in different modes. This does not seem impossible to handle, but
2192 it hardly ever occurs in practice.
2194 The only exception is the case when one of operands is invariant.
2195 For example pentium 3 generates comparisons like
2196 (lt (subreg:HI (reg:SI)) 100). Here we assign HImode to 100, but we
2197 definitely do not want this prevent the optimization. */
2198 comp_mode
= iv0
->extend_mode
;
2199 if (GET_MODE_BITSIZE (comp_mode
) < GET_MODE_BITSIZE (iv1
->extend_mode
))
2200 comp_mode
= iv1
->extend_mode
;
2202 if (iv0
->extend_mode
!= comp_mode
)
2204 if (iv0
->mode
!= iv0
->extend_mode
2205 || iv0
->step
!= const0_rtx
)
2208 iv0
->base
= simplify_gen_unary (signed_p
? SIGN_EXTEND
: ZERO_EXTEND
,
2209 comp_mode
, iv0
->base
, iv0
->mode
);
2210 iv0
->extend_mode
= comp_mode
;
2213 if (iv1
->extend_mode
!= comp_mode
)
2215 if (iv1
->mode
!= iv1
->extend_mode
2216 || iv1
->step
!= const0_rtx
)
2219 iv1
->base
= simplify_gen_unary (signed_p
? SIGN_EXTEND
: ZERO_EXTEND
,
2220 comp_mode
, iv1
->base
, iv1
->mode
);
2221 iv1
->extend_mode
= comp_mode
;
2224 /* Check that both ivs belong to a range of a single mode. If one of the
2225 operands is an invariant, we may need to shorten it into the common
2227 if (iv0
->mode
== iv0
->extend_mode
2228 && iv0
->step
== const0_rtx
2229 && iv0
->mode
!= iv1
->mode
)
2230 shorten_into_mode (iv0
, iv1
->mode
, cond
, signed_p
, desc
);
2232 if (iv1
->mode
== iv1
->extend_mode
2233 && iv1
->step
== const0_rtx
2234 && iv0
->mode
!= iv1
->mode
)
2235 shorten_into_mode (iv1
, iv0
->mode
, swap_condition (cond
), signed_p
, desc
);
2237 if (iv0
->mode
!= iv1
->mode
)
2240 desc
->mode
= iv0
->mode
;
2241 desc
->signed_p
= signed_p
;
2246 /* Tries to estimate the maximum number of iterations in LOOP, and return the
2247 result. This function is called from iv_number_of_iterations with
2248 a number of fields in DESC already filled in. OLD_NITER is the original
2249 expression for the number of iterations, before we tried to simplify it. */
2252 determine_max_iter (class loop
*loop
, class niter_desc
*desc
, rtx old_niter
)
2254 rtx niter
= desc
->niter_expr
;
2255 rtx mmin
, mmax
, cmp
;
2257 uint64_t andmax
= 0;
2259 /* We used to look for constant operand 0 of AND,
2260 but canonicalization should always make this impossible. */
2261 gcc_checking_assert (GET_CODE (niter
) != AND
2262 || !CONST_INT_P (XEXP (niter
, 0)));
2264 if (GET_CODE (niter
) == AND
2265 && CONST_INT_P (XEXP (niter
, 1)))
2267 andmax
= UINTVAL (XEXP (niter
, 1));
2268 niter
= XEXP (niter
, 0);
2271 get_mode_bounds (desc
->mode
, desc
->signed_p
, desc
->mode
, &mmin
, &mmax
);
2272 nmax
= UINTVAL (mmax
) - UINTVAL (mmin
);
2274 if (GET_CODE (niter
) == UDIV
)
2276 if (!CONST_INT_P (XEXP (niter
, 1)))
2278 inc
= INTVAL (XEXP (niter
, 1));
2279 niter
= XEXP (niter
, 0);
2284 /* We could use a binary search here, but for now improving the upper
2285 bound by just one eliminates one important corner case. */
2286 cmp
= simplify_gen_relational (desc
->signed_p
? LT
: LTU
, VOIDmode
,
2287 desc
->mode
, old_niter
, mmax
);
2288 simplify_using_initial_values (loop
, UNKNOWN
, &cmp
);
2289 if (cmp
== const_true_rtx
)
2294 fprintf (dump_file
, ";; improved upper bound by one.\n");
2298 nmax
= MIN (nmax
, andmax
);
2300 fprintf (dump_file
, ";; Determined upper bound %" PRId64
".\n",
2305 /* Computes number of iterations of the CONDITION in INSN in LOOP and stores
2306 the result into DESC. Very similar to determine_number_of_iterations
2307 (basically its rtl version), complicated by things like subregs. */
2310 iv_number_of_iterations (class loop
*loop
, rtx_insn
*insn
, rtx condition
,
2311 class niter_desc
*desc
)
2313 rtx op0
, op1
, delta
, step
, bound
, may_xform
, tmp
, tmp0
, tmp1
;
2314 class rtx_iv iv0
, iv1
;
2315 rtx assumption
, may_not_xform
;
2317 machine_mode nonvoid_mode
;
2318 scalar_int_mode comp_mode
;
2319 rtx mmin
, mmax
, mode_mmin
, mode_mmax
;
2320 uint64_t s
, size
, d
, inv
, max
, up
, down
;
2321 int64_t inc
, step_val
;
2322 int was_sharp
= false;
2326 /* The meaning of these assumptions is this:
2328 then the rest of information does not have to be valid
2329 if noloop_assumptions then the loop does not roll
2330 if infinite then this exit is never used */
2332 desc
->assumptions
= NULL_RTX
;
2333 desc
->noloop_assumptions
= NULL_RTX
;
2334 desc
->infinite
= NULL_RTX
;
2335 desc
->simple_p
= true;
2337 desc
->const_iter
= false;
2338 desc
->niter_expr
= NULL_RTX
;
2340 cond
= GET_CODE (condition
);
2341 gcc_assert (COMPARISON_P (condition
));
2343 nonvoid_mode
= GET_MODE (XEXP (condition
, 0));
2344 if (nonvoid_mode
== VOIDmode
)
2345 nonvoid_mode
= GET_MODE (XEXP (condition
, 1));
2346 /* The constant comparisons should be folded. */
2347 gcc_assert (nonvoid_mode
!= VOIDmode
);
2349 /* We only handle integers or pointers. */
2350 scalar_int_mode mode
;
2351 if (!is_a
<scalar_int_mode
> (nonvoid_mode
, &mode
))
2354 op0
= XEXP (condition
, 0);
2355 if (!iv_analyze (insn
, mode
, op0
, &iv0
))
2358 op1
= XEXP (condition
, 1);
2359 if (!iv_analyze (insn
, mode
, op1
, &iv1
))
2362 if (GET_MODE_BITSIZE (iv0
.extend_mode
) > HOST_BITS_PER_WIDE_INT
2363 || GET_MODE_BITSIZE (iv1
.extend_mode
) > HOST_BITS_PER_WIDE_INT
)
2366 /* Check condition and normalize it. */
2374 std::swap (iv0
, iv1
);
2375 cond
= swap_condition (cond
);
2387 /* Handle extends. This is relatively nontrivial, so we only try in some
2388 easy cases, when we can canonicalize the ivs (possibly by adding some
2389 assumptions) to shape subreg (base + i * step). This function also fills
2390 in desc->mode and desc->signed_p. */
2392 if (!canonicalize_iv_subregs (&iv0
, &iv1
, cond
, desc
))
2395 comp_mode
= iv0
.extend_mode
;
2397 size
= GET_MODE_PRECISION (mode
);
2398 get_mode_bounds (mode
, (cond
== LE
|| cond
== LT
), comp_mode
, &mmin
, &mmax
);
2399 mode_mmin
= lowpart_subreg (mode
, mmin
, comp_mode
);
2400 mode_mmax
= lowpart_subreg (mode
, mmax
, comp_mode
);
2402 if (!CONST_INT_P (iv0
.step
) || !CONST_INT_P (iv1
.step
))
2405 /* We can take care of the case of two induction variables chasing each other
2406 if the test is NE. I have never seen a loop using it, but still it is
2408 if (iv0
.step
!= const0_rtx
&& iv1
.step
!= const0_rtx
)
2413 iv0
.step
= simplify_gen_binary (MINUS
, comp_mode
, iv0
.step
, iv1
.step
);
2414 iv1
.step
= const0_rtx
;
2417 iv0
.step
= lowpart_subreg (mode
, iv0
.step
, comp_mode
);
2418 iv1
.step
= lowpart_subreg (mode
, iv1
.step
, comp_mode
);
2420 /* This is either infinite loop or the one that ends immediately, depending
2421 on initial values. Unswitching should remove this kind of conditions. */
2422 if (iv0
.step
== const0_rtx
&& iv1
.step
== const0_rtx
)
2427 if (iv0
.step
== const0_rtx
)
2428 step_val
= -INTVAL (iv1
.step
);
2430 step_val
= INTVAL (iv0
.step
);
2432 /* Ignore loops of while (i-- < 10) type. */
2436 step_is_pow2
= !(step_val
& (step_val
- 1));
2440 /* We do not care about whether the step is power of two in this
2442 step_is_pow2
= false;
2446 /* Some more condition normalization. We must record some assumptions
2447 due to overflows. */
2452 /* We want to take care only of non-sharp relationals; this is easy,
2453 as in cases the overflow would make the transformation unsafe
2454 the loop does not roll. Seemingly it would make more sense to want
2455 to take care of sharp relationals instead, as NE is more similar to
2456 them, but the problem is that here the transformation would be more
2457 difficult due to possibly infinite loops. */
2458 if (iv0
.step
== const0_rtx
)
2460 tmp
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2461 assumption
= simplify_gen_relational (EQ
, SImode
, mode
, tmp
,
2463 if (assumption
== const_true_rtx
)
2464 goto zero_iter_simplify
;
2465 iv0
.base
= simplify_gen_binary (PLUS
, comp_mode
,
2466 iv0
.base
, const1_rtx
);
2470 tmp
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2471 assumption
= simplify_gen_relational (EQ
, SImode
, mode
, tmp
,
2473 if (assumption
== const_true_rtx
)
2474 goto zero_iter_simplify
;
2475 iv1
.base
= simplify_gen_binary (PLUS
, comp_mode
,
2476 iv1
.base
, constm1_rtx
);
2479 if (assumption
!= const0_rtx
)
2480 desc
->noloop_assumptions
=
2481 alloc_EXPR_LIST (0, assumption
, desc
->noloop_assumptions
);
2482 cond
= (cond
== LT
) ? LE
: LEU
;
2484 /* It will be useful to be able to tell the difference once more in
2485 LE -> NE reduction. */
2491 /* Take care of trivially infinite loops. */
2494 if (iv0
.step
== const0_rtx
)
2496 tmp
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2497 if (rtx_equal_p (tmp
, mode_mmin
))
2500 alloc_EXPR_LIST (0, const_true_rtx
, NULL_RTX
);
2501 /* Fill in the remaining fields somehow. */
2502 goto zero_iter_simplify
;
2507 tmp
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2508 if (rtx_equal_p (tmp
, mode_mmax
))
2511 alloc_EXPR_LIST (0, const_true_rtx
, NULL_RTX
);
2512 /* Fill in the remaining fields somehow. */
2513 goto zero_iter_simplify
;
2518 /* If we can we want to take care of NE conditions instead of size
2519 comparisons, as they are much more friendly (most importantly
2520 this takes care of special handling of loops with step 1). We can
2521 do it if we first check that upper bound is greater or equal to
2522 lower bound, their difference is constant c modulo step and that
2523 there is not an overflow. */
2526 if (iv0
.step
== const0_rtx
)
2527 step
= simplify_gen_unary (NEG
, comp_mode
, iv1
.step
, comp_mode
);
2530 step
= lowpart_subreg (mode
, step
, comp_mode
);
2531 delta
= simplify_gen_binary (MINUS
, comp_mode
, iv1
.base
, iv0
.base
);
2532 delta
= lowpart_subreg (mode
, delta
, comp_mode
);
2533 delta
= simplify_gen_binary (UMOD
, mode
, delta
, step
);
2534 may_xform
= const0_rtx
;
2535 may_not_xform
= const_true_rtx
;
2537 if (CONST_INT_P (delta
))
2539 if (was_sharp
&& INTVAL (delta
) == INTVAL (step
) - 1)
2541 /* A special case. We have transformed condition of type
2542 for (i = 0; i < 4; i += 4)
2544 for (i = 0; i <= 3; i += 4)
2545 obviously if the test for overflow during that transformation
2546 passed, we cannot overflow here. Most importantly any
2547 loop with sharp end condition and step 1 falls into this
2548 category, so handling this case specially is definitely
2549 worth the troubles. */
2550 may_xform
= const_true_rtx
;
2552 else if (iv0
.step
== const0_rtx
)
2554 bound
= simplify_gen_binary (PLUS
, comp_mode
, mmin
, step
);
2555 bound
= simplify_gen_binary (MINUS
, comp_mode
, bound
, delta
);
2556 bound
= lowpart_subreg (mode
, bound
, comp_mode
);
2557 tmp
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2558 may_xform
= simplify_gen_relational (cond
, SImode
, mode
,
2560 may_not_xform
= simplify_gen_relational (reverse_condition (cond
),
2566 bound
= simplify_gen_binary (MINUS
, comp_mode
, mmax
, step
);
2567 bound
= simplify_gen_binary (PLUS
, comp_mode
, bound
, delta
);
2568 bound
= lowpart_subreg (mode
, bound
, comp_mode
);
2569 tmp
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2570 may_xform
= simplify_gen_relational (cond
, SImode
, mode
,
2572 may_not_xform
= simplify_gen_relational (reverse_condition (cond
),
2578 if (may_xform
!= const0_rtx
)
2580 /* We perform the transformation always provided that it is not
2581 completely senseless. This is OK, as we would need this assumption
2582 to determine the number of iterations anyway. */
2583 if (may_xform
!= const_true_rtx
)
2585 /* If the step is a power of two and the final value we have
2586 computed overflows, the cycle is infinite. Otherwise it
2587 is nontrivial to compute the number of iterations. */
2589 desc
->infinite
= alloc_EXPR_LIST (0, may_not_xform
,
2592 desc
->assumptions
= alloc_EXPR_LIST (0, may_xform
,
2596 /* We are going to lose some information about upper bound on
2597 number of iterations in this step, so record the information
2599 inc
= INTVAL (iv0
.step
) - INTVAL (iv1
.step
);
2600 if (CONST_INT_P (iv1
.base
))
2601 up
= INTVAL (iv1
.base
);
2603 up
= INTVAL (mode_mmax
) - inc
;
2604 down
= INTVAL (CONST_INT_P (iv0
.base
)
2607 max
= (up
- down
) / inc
+ 1;
2609 && !desc
->assumptions
)
2610 record_niter_bound (loop
, max
, false, true);
2612 if (iv0
.step
== const0_rtx
)
2614 iv0
.base
= simplify_gen_binary (PLUS
, comp_mode
, iv0
.base
, delta
);
2615 iv0
.base
= simplify_gen_binary (MINUS
, comp_mode
, iv0
.base
, step
);
2619 iv1
.base
= simplify_gen_binary (MINUS
, comp_mode
, iv1
.base
, delta
);
2620 iv1
.base
= simplify_gen_binary (PLUS
, comp_mode
, iv1
.base
, step
);
2623 tmp0
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2624 tmp1
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2625 assumption
= simplify_gen_relational (reverse_condition (cond
),
2626 SImode
, mode
, tmp0
, tmp1
);
2627 if (assumption
== const_true_rtx
)
2628 goto zero_iter_simplify
;
2629 else if (assumption
!= const0_rtx
)
2630 desc
->noloop_assumptions
=
2631 alloc_EXPR_LIST (0, assumption
, desc
->noloop_assumptions
);
2636 /* Count the number of iterations. */
2639 /* Everything we do here is just arithmetics modulo size of mode. This
2640 makes us able to do more involved computations of number of iterations
2641 than in other cases. First transform the condition into shape
2642 s * i <> c, with s positive. */
2643 iv1
.base
= simplify_gen_binary (MINUS
, comp_mode
, iv1
.base
, iv0
.base
);
2644 iv0
.base
= const0_rtx
;
2645 iv0
.step
= simplify_gen_binary (MINUS
, comp_mode
, iv0
.step
, iv1
.step
);
2646 iv1
.step
= const0_rtx
;
2647 if (INTVAL (iv0
.step
) < 0)
2649 iv0
.step
= simplify_gen_unary (NEG
, comp_mode
, iv0
.step
, comp_mode
);
2650 iv1
.base
= simplify_gen_unary (NEG
, comp_mode
, iv1
.base
, comp_mode
);
2652 iv0
.step
= lowpart_subreg (mode
, iv0
.step
, comp_mode
);
2654 /* Let nsd (s, size of mode) = d. If d does not divide c, the loop
2655 is infinite. Otherwise, the number of iterations is
2656 (inverse(s/d) * (c/d)) mod (size of mode/d). */
2657 s
= INTVAL (iv0
.step
); d
= 1;
2664 bound
= GEN_INT (((uint64_t) 1 << (size
- 1 ) << 1) - 1);
2666 tmp1
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2667 tmp
= simplify_gen_binary (UMOD
, mode
, tmp1
, gen_int_mode (d
, mode
));
2668 assumption
= simplify_gen_relational (NE
, SImode
, mode
, tmp
, const0_rtx
);
2669 desc
->infinite
= alloc_EXPR_LIST (0, assumption
, desc
->infinite
);
2671 tmp
= simplify_gen_binary (UDIV
, mode
, tmp1
, gen_int_mode (d
, mode
));
2672 inv
= inverse (s
, size
);
2673 tmp
= simplify_gen_binary (MULT
, mode
, tmp
, gen_int_mode (inv
, mode
));
2674 desc
->niter_expr
= simplify_gen_binary (AND
, mode
, tmp
, bound
);
2678 if (iv1
.step
== const0_rtx
)
2679 /* Condition in shape a + s * i <= b
2680 We must know that b + s does not overflow and a <= b + s and then we
2681 can compute number of iterations as (b + s - a) / s. (It might
2682 seem that we in fact could be more clever about testing the b + s
2683 overflow condition using some information about b - a mod s,
2684 but it was already taken into account during LE -> NE transform). */
2687 tmp0
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2688 tmp1
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2690 bound
= simplify_gen_binary (MINUS
, mode
, mode_mmax
,
2691 lowpart_subreg (mode
, step
,
2697 /* If s is power of 2, we know that the loop is infinite if
2698 a % s <= b % s and b + s overflows. */
2699 assumption
= simplify_gen_relational (reverse_condition (cond
),
2703 t0
= simplify_gen_binary (UMOD
, mode
, copy_rtx (tmp0
), step
);
2704 t1
= simplify_gen_binary (UMOD
, mode
, copy_rtx (tmp1
), step
);
2705 tmp
= simplify_gen_relational (cond
, SImode
, mode
, t0
, t1
);
2706 assumption
= simplify_gen_binary (AND
, SImode
, assumption
, tmp
);
2708 alloc_EXPR_LIST (0, assumption
, desc
->infinite
);
2712 assumption
= simplify_gen_relational (cond
, SImode
, mode
,
2715 alloc_EXPR_LIST (0, assumption
, desc
->assumptions
);
2718 tmp
= simplify_gen_binary (PLUS
, comp_mode
, iv1
.base
, iv0
.step
);
2719 tmp
= lowpart_subreg (mode
, tmp
, comp_mode
);
2720 assumption
= simplify_gen_relational (reverse_condition (cond
),
2721 SImode
, mode
, tmp0
, tmp
);
2723 delta
= simplify_gen_binary (PLUS
, mode
, tmp1
, step
);
2724 delta
= simplify_gen_binary (MINUS
, mode
, delta
, tmp0
);
2728 /* Condition in shape a <= b - s * i
2729 We must know that a - s does not overflow and a - s <= b and then
2730 we can again compute number of iterations as (b - (a - s)) / s. */
2731 step
= simplify_gen_unary (NEG
, mode
, iv1
.step
, mode
);
2732 tmp0
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2733 tmp1
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2735 bound
= simplify_gen_binary (PLUS
, mode
, mode_mmin
,
2736 lowpart_subreg (mode
, step
, comp_mode
));
2741 /* If s is power of 2, we know that the loop is infinite if
2742 a % s <= b % s and a - s overflows. */
2743 assumption
= simplify_gen_relational (reverse_condition (cond
),
2747 t0
= simplify_gen_binary (UMOD
, mode
, copy_rtx (tmp0
), step
);
2748 t1
= simplify_gen_binary (UMOD
, mode
, copy_rtx (tmp1
), step
);
2749 tmp
= simplify_gen_relational (cond
, SImode
, mode
, t0
, t1
);
2750 assumption
= simplify_gen_binary (AND
, SImode
, assumption
, tmp
);
2752 alloc_EXPR_LIST (0, assumption
, desc
->infinite
);
2756 assumption
= simplify_gen_relational (cond
, SImode
, mode
,
2759 alloc_EXPR_LIST (0, assumption
, desc
->assumptions
);
2762 tmp
= simplify_gen_binary (PLUS
, comp_mode
, iv0
.base
, iv1
.step
);
2763 tmp
= lowpart_subreg (mode
, tmp
, comp_mode
);
2764 assumption
= simplify_gen_relational (reverse_condition (cond
),
2767 delta
= simplify_gen_binary (MINUS
, mode
, tmp0
, step
);
2768 delta
= simplify_gen_binary (MINUS
, mode
, tmp1
, delta
);
2770 if (assumption
== const_true_rtx
)
2771 goto zero_iter_simplify
;
2772 else if (assumption
!= const0_rtx
)
2773 desc
->noloop_assumptions
=
2774 alloc_EXPR_LIST (0, assumption
, desc
->noloop_assumptions
);
2775 delta
= simplify_gen_binary (UDIV
, mode
, delta
, step
);
2776 desc
->niter_expr
= delta
;
2779 old_niter
= desc
->niter_expr
;
2781 simplify_using_initial_values (loop
, AND
, &desc
->assumptions
);
2782 if (desc
->assumptions
2783 && XEXP (desc
->assumptions
, 0) == const0_rtx
)
2785 simplify_using_initial_values (loop
, IOR
, &desc
->noloop_assumptions
);
2786 simplify_using_initial_values (loop
, IOR
, &desc
->infinite
);
2787 simplify_using_initial_values (loop
, UNKNOWN
, &desc
->niter_expr
);
2789 /* Rerun the simplification. Consider code (created by copying loop headers)
2801 The first pass determines that i = 0, the second pass uses it to eliminate
2802 noloop assumption. */
2804 simplify_using_initial_values (loop
, AND
, &desc
->assumptions
);
2805 if (desc
->assumptions
2806 && XEXP (desc
->assumptions
, 0) == const0_rtx
)
2808 simplify_using_initial_values (loop
, IOR
, &desc
->noloop_assumptions
);
2809 simplify_using_initial_values (loop
, IOR
, &desc
->infinite
);
2810 simplify_using_initial_values (loop
, UNKNOWN
, &desc
->niter_expr
);
2812 if (desc
->noloop_assumptions
2813 && XEXP (desc
->noloop_assumptions
, 0) == const_true_rtx
)
2816 if (CONST_INT_P (desc
->niter_expr
))
2818 uint64_t val
= INTVAL (desc
->niter_expr
);
2820 desc
->const_iter
= true;
2821 desc
->niter
= val
& GET_MODE_MASK (desc
->mode
);
2823 && !desc
->assumptions
)
2824 record_niter_bound (loop
, desc
->niter
, false, true);
2828 max
= determine_max_iter (loop
, desc
, old_niter
);
2830 goto zero_iter_simplify
;
2832 && !desc
->assumptions
)
2833 record_niter_bound (loop
, max
, false, true);
2835 /* simplify_using_initial_values does a copy propagation on the registers
2836 in the expression for the number of iterations. This prolongs life
2837 ranges of registers and increases register pressure, and usually
2838 brings no gain (and if it happens to do, the cse pass will take care
2839 of it anyway). So prevent this behavior, unless it enabled us to
2840 derive that the number of iterations is a constant. */
2841 desc
->niter_expr
= old_niter
;
2847 /* Simplify the assumptions. */
2848 simplify_using_initial_values (loop
, AND
, &desc
->assumptions
);
2849 if (desc
->assumptions
2850 && XEXP (desc
->assumptions
, 0) == const0_rtx
)
2852 simplify_using_initial_values (loop
, IOR
, &desc
->infinite
);
2856 desc
->const_iter
= true;
2858 record_niter_bound (loop
, 0, true, true);
2859 desc
->noloop_assumptions
= NULL_RTX
;
2860 desc
->niter_expr
= const0_rtx
;
2864 desc
->simple_p
= false;
2868 /* Checks whether E is a simple exit from LOOP and stores its description
2872 check_simple_exit (class loop
*loop
, edge e
, class niter_desc
*desc
)
2874 basic_block exit_bb
;
2880 desc
->simple_p
= false;
2882 /* It must belong directly to the loop. */
2883 if (exit_bb
->loop_father
!= loop
)
2886 /* It must be tested (at least) once during any iteration. */
2887 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, exit_bb
))
2890 /* It must end in a simple conditional jump. */
2891 if (!any_condjump_p (BB_END (exit_bb
)))
2894 ein
= EDGE_SUCC (exit_bb
, 0);
2896 ein
= EDGE_SUCC (exit_bb
, 1);
2899 desc
->in_edge
= ein
;
2901 /* Test whether the condition is suitable. */
2902 if (!(condition
= get_condition (BB_END (ein
->src
), &at
, false, false)))
2905 if (ein
->flags
& EDGE_FALLTHRU
)
2907 condition
= reversed_condition (condition
);
2912 /* Check that we are able to determine number of iterations and fill
2913 in information about it. */
2914 iv_number_of_iterations (loop
, at
, condition
, desc
);
2917 /* Finds a simple exit of LOOP and stores its description into DESC. */
2920 find_simple_exit (class loop
*loop
, class niter_desc
*desc
)
2925 class niter_desc act
;
2929 desc
->simple_p
= false;
2930 body
= get_loop_body (loop
);
2932 for (i
= 0; i
< loop
->num_nodes
; i
++)
2934 FOR_EACH_EDGE (e
, ei
, body
[i
]->succs
)
2936 if (flow_bb_inside_loop_p (loop
, e
->dest
))
2939 check_simple_exit (loop
, e
, &act
);
2947 /* Prefer constant iterations; the less the better. */
2949 || (desc
->const_iter
&& act
.niter
>= desc
->niter
))
2952 /* Also if the actual exit may be infinite, while the old one
2953 not, prefer the old one. */
2954 if (act
.infinite
&& !desc
->infinite
)
2966 fprintf (dump_file
, "Loop %d is simple:\n", loop
->num
);
2967 fprintf (dump_file
, " simple exit %d -> %d\n",
2968 desc
->out_edge
->src
->index
,
2969 desc
->out_edge
->dest
->index
);
2970 if (desc
->assumptions
)
2972 fprintf (dump_file
, " assumptions: ");
2973 print_rtl (dump_file
, desc
->assumptions
);
2974 fprintf (dump_file
, "\n");
2976 if (desc
->noloop_assumptions
)
2978 fprintf (dump_file
, " does not roll if: ");
2979 print_rtl (dump_file
, desc
->noloop_assumptions
);
2980 fprintf (dump_file
, "\n");
2984 fprintf (dump_file
, " infinite if: ");
2985 print_rtl (dump_file
, desc
->infinite
);
2986 fprintf (dump_file
, "\n");
2989 fprintf (dump_file
, " number of iterations: ");
2990 print_rtl (dump_file
, desc
->niter_expr
);
2991 fprintf (dump_file
, "\n");
2993 fprintf (dump_file
, " upper bound: %li\n",
2994 (long)get_max_loop_iterations_int (loop
));
2995 fprintf (dump_file
, " likely upper bound: %li\n",
2996 (long)get_likely_max_loop_iterations_int (loop
));
2997 fprintf (dump_file
, " realistic bound: %li\n",
2998 (long)get_estimated_loop_iterations_int (loop
));
3001 fprintf (dump_file
, "Loop %d is not simple.\n", loop
->num
);
3004 /* Fix up the finiteness if possible. We can only do it for single exit,
3005 since the loop is finite, but it's possible that we predicate one loop
3006 exit to be finite which can not be determined as finite in middle-end as
3007 well. It results in incorrect predicate information on the exit condition
3008 expression. For example, if says [(int) _1 + -8, + , -8] != 0 finite,
3009 it means _1 can exactly divide -8. */
3010 if (desc
->infinite
&& single_exit (loop
) && finite_loop_p (loop
))
3012 desc
->infinite
= NULL_RTX
;
3014 fprintf (dump_file
, " infinite updated to finite.\n");
3020 /* Creates a simple loop description of LOOP if it was not computed
3024 get_simple_loop_desc (class loop
*loop
)
3026 class niter_desc
*desc
= simple_loop_desc (loop
);
3031 /* At least desc->infinite is not always initialized by
3032 find_simple_loop_exit. */
3033 desc
= ggc_cleared_alloc
<niter_desc
> ();
3034 iv_analysis_loop_init (loop
);
3035 find_simple_exit (loop
, desc
);
3036 loop
->simple_loop_desc
= desc
;
3040 /* Releases simple loop description for LOOP. */
3043 free_simple_loop_desc (class loop
*loop
)
3045 class niter_desc
*desc
= simple_loop_desc (loop
);
3051 loop
->simple_loop_desc
= NULL
;