1 /* Rtl-level induction variable analysis.
2 Copyright (C) 2004-2020 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This is a simple analysis of induction variables of the loop. The major use
21 is for determining the number of iterations of a loop for loop unrolling,
22 doloop optimization and branch prediction. The iv information is computed
25 Induction variables are analyzed by walking the use-def chains. When
26 a basic induction variable (biv) is found, it is cached in the bivs
27 hash table. When register is proved to be a biv, its description
28 is stored to DF_REF_DATA of the def reference.
30 The analysis works always with one loop -- you must call
31 iv_analysis_loop_init (loop) for it. All the other functions then work with
32 this loop. When you need to work with another loop, just call
33 iv_analysis_loop_init for it. When you no longer need iv analysis, call
34 iv_analysis_done () to clean up the memory.
36 The available functions are:
38 iv_analyze (insn, mode, reg, iv): Stores the description of the induction
39 variable corresponding to the use of register REG in INSN to IV, given
40 that REG has mode MODE. Returns true if REG is an induction variable
41 in INSN. false otherwise. If a use of REG is not found in INSN,
42 the following insns are scanned (so that we may call this function
43 on insns returned by get_condition).
44 iv_analyze_result (insn, def, iv): Stores to IV the description of the iv
45 corresponding to DEF, which is a register defined in INSN.
46 iv_analyze_expr (insn, mode, expr, iv): Stores to IV the description of iv
47 corresponding to expression EXPR evaluated at INSN. All registers used bu
48 EXPR must also be used in INSN. MODE is the mode of EXPR.
53 #include "coretypes.h"
59 #include "diagnostic-core.h"
64 #include "tree-ssa-loop-niter.h"
66 #include "function-abi.h"
68 /* Possible return values of iv_get_reaching_def. */
72 /* More than one reaching def, or reaching def that does not
76 /* The use is trivial invariant of the loop, i.e. is not changed
80 /* The use is reached by initial value and a value from the
81 previous iteration. */
84 /* The use has single dominating def. */
88 /* Information about a biv. */
93 unsigned regno
; /* The register of the biv. */
94 class rtx_iv iv
; /* Value of the biv. */
97 static bool clean_slate
= true;
99 static unsigned int iv_ref_table_size
= 0;
101 /* Table of rtx_ivs indexed by the df_ref uid field. */
102 static class rtx_iv
** iv_ref_table
;
104 /* Induction variable stored at the reference. */
105 #define DF_REF_IV(REF) iv_ref_table[DF_REF_ID (REF)]
106 #define DF_REF_IV_SET(REF, IV) iv_ref_table[DF_REF_ID (REF)] = (IV)
108 /* The current loop. */
110 static class loop
*current_loop
;
112 /* Hashtable helper. */
114 struct biv_entry_hasher
: free_ptr_hash
<biv_entry
>
116 typedef rtx_def
*compare_type
;
117 static inline hashval_t
hash (const biv_entry
*);
118 static inline bool equal (const biv_entry
*, const rtx_def
*);
121 /* Returns hash value for biv B. */
124 biv_entry_hasher::hash (const biv_entry
*b
)
129 /* Compares biv B and register R. */
132 biv_entry_hasher::equal (const biv_entry
*b
, const rtx_def
*r
)
134 return b
->regno
== REGNO (r
);
137 /* Bivs of the current loop. */
139 static hash_table
<biv_entry_hasher
> *bivs
;
141 static bool iv_analyze_op (rtx_insn
*, scalar_int_mode
, rtx
, class rtx_iv
*);
143 /* Return the RTX code corresponding to the IV extend code EXTEND. */
144 static inline enum rtx_code
145 iv_extend_to_rtx_code (enum iv_extend_code extend
)
153 case IV_UNKNOWN_EXTEND
:
159 /* Dumps information about IV to FILE. */
161 extern void dump_iv_info (FILE *, class rtx_iv
*);
163 dump_iv_info (FILE *file
, class rtx_iv
*iv
)
167 fprintf (file
, "not simple");
171 if (iv
->step
== const0_rtx
172 && !iv
->first_special
)
173 fprintf (file
, "invariant ");
175 print_rtl (file
, iv
->base
);
176 if (iv
->step
!= const0_rtx
)
178 fprintf (file
, " + ");
179 print_rtl (file
, iv
->step
);
180 fprintf (file
, " * iteration");
182 fprintf (file
, " (in %s)", GET_MODE_NAME (iv
->mode
));
184 if (iv
->mode
!= iv
->extend_mode
)
185 fprintf (file
, " %s to %s",
186 rtx_name
[iv_extend_to_rtx_code (iv
->extend
)],
187 GET_MODE_NAME (iv
->extend_mode
));
189 if (iv
->mult
!= const1_rtx
)
191 fprintf (file
, " * ");
192 print_rtl (file
, iv
->mult
);
194 if (iv
->delta
!= const0_rtx
)
196 fprintf (file
, " + ");
197 print_rtl (file
, iv
->delta
);
199 if (iv
->first_special
)
200 fprintf (file
, " (first special)");
204 check_iv_ref_table_size (void)
206 if (iv_ref_table_size
< DF_DEFS_TABLE_SIZE ())
208 unsigned int new_size
= DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
209 iv_ref_table
= XRESIZEVEC (class rtx_iv
*, iv_ref_table
, new_size
);
210 memset (&iv_ref_table
[iv_ref_table_size
], 0,
211 (new_size
- iv_ref_table_size
) * sizeof (class rtx_iv
*));
212 iv_ref_table_size
= new_size
;
217 /* Checks whether REG is a well-behaved register. */
220 simple_reg_p (rtx reg
)
224 if (GET_CODE (reg
) == SUBREG
)
226 if (!subreg_lowpart_p (reg
))
228 reg
= SUBREG_REG (reg
);
235 if (HARD_REGISTER_NUM_P (r
))
238 if (GET_MODE_CLASS (GET_MODE (reg
)) != MODE_INT
)
244 /* Clears the information about ivs stored in df. */
249 unsigned i
, n_defs
= DF_DEFS_TABLE_SIZE ();
252 check_iv_ref_table_size ();
253 for (i
= 0; i
< n_defs
; i
++)
255 iv
= iv_ref_table
[i
];
259 iv_ref_table
[i
] = NULL
;
267 /* Prepare the data for an induction variable analysis of a LOOP. */
270 iv_analysis_loop_init (class loop
*loop
)
274 /* Clear the information from the analysis of the previous loop. */
277 df_set_flags (DF_EQ_NOTES
+ DF_DEFER_INSN_RESCAN
);
278 bivs
= new hash_table
<biv_entry_hasher
> (10);
284 /* Get rid of the ud chains before processing the rescans. Then add
286 df_remove_problem (df_chain
);
287 df_process_deferred_rescans ();
288 df_set_flags (DF_RD_PRUNE_DEAD_DEFS
);
289 df_chain_add_problem (DF_UD_CHAIN
);
290 df_note_add_problem ();
291 df_analyze_loop (loop
);
293 df_dump_region (dump_file
);
295 check_iv_ref_table_size ();
298 /* Finds the definition of REG that dominates loop latch and stores
299 it to DEF. Returns false if there is not a single definition
300 dominating the latch. If REG has no definition in loop, DEF
301 is set to NULL and true is returned. */
304 latch_dominating_def (rtx reg
, df_ref
*def
)
306 df_ref single_rd
= NULL
, adef
;
307 unsigned regno
= REGNO (reg
);
308 class df_rd_bb_info
*bb_info
= DF_RD_BB_INFO (current_loop
->latch
);
310 for (adef
= DF_REG_DEF_CHAIN (regno
); adef
; adef
= DF_REF_NEXT_REG (adef
))
312 if (!bitmap_bit_p (df
->blocks_to_analyze
, DF_REF_BBNO (adef
))
313 || !bitmap_bit_p (&bb_info
->out
, DF_REF_ID (adef
)))
316 /* More than one reaching definition. */
320 if (!just_once_each_iteration_p (current_loop
, DF_REF_BB (adef
)))
330 /* Gets definition of REG reaching its use in INSN and stores it to DEF. */
332 static enum iv_grd_result
333 iv_get_reaching_def (rtx_insn
*insn
, rtx reg
, df_ref
*def
)
336 basic_block def_bb
, use_bb
;
341 if (!simple_reg_p (reg
))
343 if (GET_CODE (reg
) == SUBREG
)
344 reg
= SUBREG_REG (reg
);
345 gcc_assert (REG_P (reg
));
347 use
= df_find_use (insn
, reg
);
348 gcc_assert (use
!= NULL
);
350 if (!DF_REF_CHAIN (use
))
351 return GRD_INVARIANT
;
353 /* More than one reaching def. */
354 if (DF_REF_CHAIN (use
)->next
)
357 adef
= DF_REF_CHAIN (use
)->ref
;
359 /* We do not handle setting only part of the register. */
360 if (DF_REF_FLAGS (adef
) & DF_REF_READ_WRITE
)
363 def_insn
= DF_REF_INSN (adef
);
364 def_bb
= DF_REF_BB (adef
);
365 use_bb
= BLOCK_FOR_INSN (insn
);
367 if (use_bb
== def_bb
)
368 dom_p
= (DF_INSN_LUID (def_insn
) < DF_INSN_LUID (insn
));
370 dom_p
= dominated_by_p (CDI_DOMINATORS
, use_bb
, def_bb
);
375 return GRD_SINGLE_DOM
;
378 /* The definition does not dominate the use. This is still OK if
379 this may be a use of a biv, i.e. if the def_bb dominates loop
381 if (just_once_each_iteration_p (current_loop
, def_bb
))
382 return GRD_MAYBE_BIV
;
387 /* Sets IV to invariant CST in MODE. Always returns true (just for
388 consistency with other iv manipulation functions that may fail). */
391 iv_constant (class rtx_iv
*iv
, scalar_int_mode mode
, rtx cst
)
395 iv
->step
= const0_rtx
;
396 iv
->first_special
= false;
397 iv
->extend
= IV_UNKNOWN_EXTEND
;
398 iv
->extend_mode
= iv
->mode
;
399 iv
->delta
= const0_rtx
;
400 iv
->mult
= const1_rtx
;
405 /* Evaluates application of subreg to MODE on IV. */
408 iv_subreg (class rtx_iv
*iv
, scalar_int_mode mode
)
410 /* If iv is invariant, just calculate the new value. */
411 if (iv
->step
== const0_rtx
412 && !iv
->first_special
)
414 rtx val
= get_iv_value (iv
, const0_rtx
);
415 val
= lowpart_subreg (mode
, val
,
416 iv
->extend
== IV_UNKNOWN_EXTEND
417 ? iv
->mode
: iv
->extend_mode
);
420 iv
->extend
= IV_UNKNOWN_EXTEND
;
421 iv
->mode
= iv
->extend_mode
= mode
;
422 iv
->delta
= const0_rtx
;
423 iv
->mult
= const1_rtx
;
427 if (iv
->extend_mode
== mode
)
430 if (GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (iv
->mode
))
433 iv
->extend
= IV_UNKNOWN_EXTEND
;
436 iv
->base
= simplify_gen_binary (PLUS
, iv
->extend_mode
, iv
->delta
,
437 simplify_gen_binary (MULT
, iv
->extend_mode
,
438 iv
->base
, iv
->mult
));
439 iv
->step
= simplify_gen_binary (MULT
, iv
->extend_mode
, iv
->step
, iv
->mult
);
440 iv
->mult
= const1_rtx
;
441 iv
->delta
= const0_rtx
;
442 iv
->first_special
= false;
447 /* Evaluates application of EXTEND to MODE on IV. */
450 iv_extend (class rtx_iv
*iv
, enum iv_extend_code extend
, scalar_int_mode mode
)
452 /* If iv is invariant, just calculate the new value. */
453 if (iv
->step
== const0_rtx
454 && !iv
->first_special
)
456 rtx val
= get_iv_value (iv
, const0_rtx
);
457 if (iv
->extend_mode
!= iv
->mode
458 && iv
->extend
!= IV_UNKNOWN_EXTEND
459 && iv
->extend
!= extend
)
460 val
= lowpart_subreg (iv
->mode
, val
, iv
->extend_mode
);
461 val
= simplify_gen_unary (iv_extend_to_rtx_code (extend
), mode
,
464 ? iv
->extend_mode
: iv
->mode
);
466 iv
->extend
= IV_UNKNOWN_EXTEND
;
467 iv
->mode
= iv
->extend_mode
= mode
;
468 iv
->delta
= const0_rtx
;
469 iv
->mult
= const1_rtx
;
473 if (mode
!= iv
->extend_mode
)
476 if (iv
->extend
!= IV_UNKNOWN_EXTEND
477 && iv
->extend
!= extend
)
485 /* Evaluates negation of IV. */
488 iv_neg (class rtx_iv
*iv
)
490 if (iv
->extend
== IV_UNKNOWN_EXTEND
)
492 iv
->base
= simplify_gen_unary (NEG
, iv
->extend_mode
,
493 iv
->base
, iv
->extend_mode
);
494 iv
->step
= simplify_gen_unary (NEG
, iv
->extend_mode
,
495 iv
->step
, iv
->extend_mode
);
499 iv
->delta
= simplify_gen_unary (NEG
, iv
->extend_mode
,
500 iv
->delta
, iv
->extend_mode
);
501 iv
->mult
= simplify_gen_unary (NEG
, iv
->extend_mode
,
502 iv
->mult
, iv
->extend_mode
);
508 /* Evaluates addition or subtraction (according to OP) of IV1 to IV0. */
511 iv_add (class rtx_iv
*iv0
, class rtx_iv
*iv1
, enum rtx_code op
)
513 scalar_int_mode mode
;
516 /* Extend the constant to extend_mode of the other operand if necessary. */
517 if (iv0
->extend
== IV_UNKNOWN_EXTEND
518 && iv0
->mode
== iv0
->extend_mode
519 && iv0
->step
== const0_rtx
520 && GET_MODE_SIZE (iv0
->extend_mode
) < GET_MODE_SIZE (iv1
->extend_mode
))
522 iv0
->extend_mode
= iv1
->extend_mode
;
523 iv0
->base
= simplify_gen_unary (ZERO_EXTEND
, iv0
->extend_mode
,
524 iv0
->base
, iv0
->mode
);
526 if (iv1
->extend
== IV_UNKNOWN_EXTEND
527 && iv1
->mode
== iv1
->extend_mode
528 && iv1
->step
== const0_rtx
529 && GET_MODE_SIZE (iv1
->extend_mode
) < GET_MODE_SIZE (iv0
->extend_mode
))
531 iv1
->extend_mode
= iv0
->extend_mode
;
532 iv1
->base
= simplify_gen_unary (ZERO_EXTEND
, iv1
->extend_mode
,
533 iv1
->base
, iv1
->mode
);
536 mode
= iv0
->extend_mode
;
537 if (mode
!= iv1
->extend_mode
)
540 if (iv0
->extend
== IV_UNKNOWN_EXTEND
541 && iv1
->extend
== IV_UNKNOWN_EXTEND
)
543 if (iv0
->mode
!= iv1
->mode
)
546 iv0
->base
= simplify_gen_binary (op
, mode
, iv0
->base
, iv1
->base
);
547 iv0
->step
= simplify_gen_binary (op
, mode
, iv0
->step
, iv1
->step
);
552 /* Handle addition of constant. */
553 if (iv1
->extend
== IV_UNKNOWN_EXTEND
555 && iv1
->step
== const0_rtx
)
557 iv0
->delta
= simplify_gen_binary (op
, mode
, iv0
->delta
, iv1
->base
);
561 if (iv0
->extend
== IV_UNKNOWN_EXTEND
563 && iv0
->step
== const0_rtx
)
571 iv0
->delta
= simplify_gen_binary (PLUS
, mode
, iv0
->delta
, arg
);
578 /* Evaluates multiplication of IV by constant CST. */
581 iv_mult (class rtx_iv
*iv
, rtx mby
)
583 scalar_int_mode mode
= iv
->extend_mode
;
585 if (GET_MODE (mby
) != VOIDmode
586 && GET_MODE (mby
) != mode
)
589 if (iv
->extend
== IV_UNKNOWN_EXTEND
)
591 iv
->base
= simplify_gen_binary (MULT
, mode
, iv
->base
, mby
);
592 iv
->step
= simplify_gen_binary (MULT
, mode
, iv
->step
, mby
);
596 iv
->delta
= simplify_gen_binary (MULT
, mode
, iv
->delta
, mby
);
597 iv
->mult
= simplify_gen_binary (MULT
, mode
, iv
->mult
, mby
);
603 /* Evaluates shift of IV by constant CST. */
606 iv_shift (class rtx_iv
*iv
, rtx mby
)
608 scalar_int_mode mode
= iv
->extend_mode
;
610 if (GET_MODE (mby
) != VOIDmode
611 && GET_MODE (mby
) != mode
)
614 if (iv
->extend
== IV_UNKNOWN_EXTEND
)
616 iv
->base
= simplify_gen_binary (ASHIFT
, mode
, iv
->base
, mby
);
617 iv
->step
= simplify_gen_binary (ASHIFT
, mode
, iv
->step
, mby
);
621 iv
->delta
= simplify_gen_binary (ASHIFT
, mode
, iv
->delta
, mby
);
622 iv
->mult
= simplify_gen_binary (ASHIFT
, mode
, iv
->mult
, mby
);
628 /* The recursive part of get_biv_step. Gets the value of the single value
629 defined by DEF wrto initial value of REG inside loop, in shape described
633 get_biv_step_1 (df_ref def
, scalar_int_mode outer_mode
, rtx reg
,
634 rtx
*inner_step
, scalar_int_mode
*inner_mode
,
635 enum iv_extend_code
*extend
,
638 rtx set
, rhs
, op0
= NULL_RTX
, op1
= NULL_RTX
;
641 rtx_insn
*insn
= DF_REF_INSN (def
);
643 enum iv_grd_result res
;
645 set
= single_set (insn
);
649 rhs
= find_reg_equal_equiv_note (insn
);
655 code
= GET_CODE (rhs
);
668 if (code
== PLUS
&& CONSTANT_P (op0
))
669 std::swap (op0
, op1
);
671 if (!simple_reg_p (op0
)
672 || !CONSTANT_P (op1
))
675 if (GET_MODE (rhs
) != outer_mode
)
677 /* ppc64 uses expressions like
679 (set x:SI (plus:SI (subreg:SI y:DI) 1)).
681 this is equivalent to
683 (set x':DI (plus:DI y:DI 1))
684 (set x:SI (subreg:SI (x':DI)). */
685 if (GET_CODE (op0
) != SUBREG
)
687 if (GET_MODE (SUBREG_REG (op0
)) != outer_mode
)
696 if (GET_MODE (rhs
) != outer_mode
)
700 if (!simple_reg_p (op0
))
710 if (GET_CODE (next
) == SUBREG
)
712 if (!subreg_lowpart_p (next
))
715 nextr
= SUBREG_REG (next
);
716 if (GET_MODE (nextr
) != outer_mode
)
722 res
= iv_get_reaching_def (insn
, nextr
, &next_def
);
724 if (res
== GRD_INVALID
|| res
== GRD_INVARIANT
)
727 if (res
== GRD_MAYBE_BIV
)
729 if (!rtx_equal_p (nextr
, reg
))
732 *inner_step
= const0_rtx
;
733 *extend
= IV_UNKNOWN_EXTEND
;
734 *inner_mode
= outer_mode
;
735 *outer_step
= const0_rtx
;
737 else if (!get_biv_step_1 (next_def
, outer_mode
, reg
,
738 inner_step
, inner_mode
, extend
,
742 if (GET_CODE (next
) == SUBREG
)
744 scalar_int_mode amode
;
745 if (!is_a
<scalar_int_mode
> (GET_MODE (next
), &amode
)
746 || GET_MODE_SIZE (amode
) > GET_MODE_SIZE (*inner_mode
))
750 *inner_step
= simplify_gen_binary (PLUS
, outer_mode
,
751 *inner_step
, *outer_step
);
752 *outer_step
= const0_rtx
;
753 *extend
= IV_UNKNOWN_EXTEND
;
764 if (*inner_mode
== outer_mode
765 /* See comment in previous switch. */
766 || GET_MODE (rhs
) != outer_mode
)
767 *inner_step
= simplify_gen_binary (code
, outer_mode
,
770 *outer_step
= simplify_gen_binary (code
, outer_mode
,
776 gcc_assert (GET_MODE (op0
) == *inner_mode
777 && *extend
== IV_UNKNOWN_EXTEND
778 && *outer_step
== const0_rtx
);
780 *extend
= (code
== SIGN_EXTEND
) ? IV_SIGN_EXTEND
: IV_ZERO_EXTEND
;
790 /* Gets the operation on register REG inside loop, in shape
792 OUTER_STEP + EXTEND_{OUTER_MODE} (SUBREG_{INNER_MODE} (REG + INNER_STEP))
794 If the operation cannot be described in this shape, return false.
795 LAST_DEF is the definition of REG that dominates loop latch. */
798 get_biv_step (df_ref last_def
, scalar_int_mode outer_mode
, rtx reg
,
799 rtx
*inner_step
, scalar_int_mode
*inner_mode
,
800 enum iv_extend_code
*extend
, rtx
*outer_step
)
802 if (!get_biv_step_1 (last_def
, outer_mode
, reg
,
803 inner_step
, inner_mode
, extend
,
807 gcc_assert ((*inner_mode
== outer_mode
) != (*extend
!= IV_UNKNOWN_EXTEND
));
808 gcc_assert (*inner_mode
!= outer_mode
|| *outer_step
== const0_rtx
);
813 /* Records information that DEF is induction variable IV. */
816 record_iv (df_ref def
, class rtx_iv
*iv
)
818 class rtx_iv
*recorded_iv
= XNEW (class rtx_iv
);
821 check_iv_ref_table_size ();
822 DF_REF_IV_SET (def
, recorded_iv
);
825 /* If DEF was already analyzed for bivness, store the description of the biv to
826 IV and return true. Otherwise return false. */
829 analyzed_for_bivness_p (rtx def
, class rtx_iv
*iv
)
831 class biv_entry
*biv
= bivs
->find_with_hash (def
, REGNO (def
));
841 record_biv (rtx def
, class rtx_iv
*iv
)
843 class biv_entry
*biv
= XNEW (class biv_entry
);
844 biv_entry
**slot
= bivs
->find_slot_with_hash (def
, REGNO (def
), INSERT
);
846 biv
->regno
= REGNO (def
);
852 /* Determines whether DEF is a biv and if so, stores its description
853 to *IV. OUTER_MODE is the mode of DEF. */
856 iv_analyze_biv (scalar_int_mode outer_mode
, rtx def
, class rtx_iv
*iv
)
858 rtx inner_step
, outer_step
;
859 scalar_int_mode inner_mode
;
860 enum iv_extend_code extend
;
865 fprintf (dump_file
, "Analyzing ");
866 print_rtl (dump_file
, def
);
867 fprintf (dump_file
, " for bivness.\n");
872 if (!CONSTANT_P (def
))
875 return iv_constant (iv
, outer_mode
, def
);
878 if (!latch_dominating_def (def
, &last_def
))
881 fprintf (dump_file
, " not simple.\n");
886 return iv_constant (iv
, outer_mode
, def
);
888 if (analyzed_for_bivness_p (def
, iv
))
891 fprintf (dump_file
, " already analysed.\n");
892 return iv
->base
!= NULL_RTX
;
895 if (!get_biv_step (last_def
, outer_mode
, def
, &inner_step
, &inner_mode
,
896 &extend
, &outer_step
))
902 /* Loop transforms base to es (base + inner_step) + outer_step,
903 where es means extend of subreg between inner_mode and outer_mode.
904 The corresponding induction variable is
906 es ((base - outer_step) + i * (inner_step + outer_step)) + outer_step */
908 iv
->base
= simplify_gen_binary (MINUS
, outer_mode
, def
, outer_step
);
909 iv
->step
= simplify_gen_binary (PLUS
, outer_mode
, inner_step
, outer_step
);
910 iv
->mode
= inner_mode
;
911 iv
->extend_mode
= outer_mode
;
913 iv
->mult
= const1_rtx
;
914 iv
->delta
= outer_step
;
915 iv
->first_special
= inner_mode
!= outer_mode
;
920 fprintf (dump_file
, " ");
921 dump_iv_info (dump_file
, iv
);
922 fprintf (dump_file
, "\n");
925 record_biv (def
, iv
);
926 return iv
->base
!= NULL_RTX
;
929 /* Analyzes expression RHS used at INSN and stores the result to *IV.
930 The mode of the induction variable is MODE. */
933 iv_analyze_expr (rtx_insn
*insn
, scalar_int_mode mode
, rtx rhs
,
937 rtx op0
= NULL_RTX
, op1
= NULL_RTX
;
938 class rtx_iv iv0
, iv1
;
939 enum rtx_code code
= GET_CODE (rhs
);
940 scalar_int_mode omode
= mode
;
945 gcc_assert (GET_MODE (rhs
) == mode
|| GET_MODE (rhs
) == VOIDmode
);
950 return iv_analyze_op (insn
, mode
, rhs
, iv
);
962 /* We don't know how many bits there are in a sign-extended constant. */
963 if (!is_a
<scalar_int_mode
> (GET_MODE (op0
), &omode
))
976 if (!CONSTANT_P (mby
))
977 std::swap (op0
, mby
);
978 if (!CONSTANT_P (mby
))
985 if (!CONSTANT_P (mby
))
994 && !iv_analyze_expr (insn
, omode
, op0
, &iv0
))
998 && !iv_analyze_expr (insn
, omode
, op1
, &iv1
))
1004 if (!iv_extend (&iv0
, IV_SIGN_EXTEND
, mode
))
1009 if (!iv_extend (&iv0
, IV_ZERO_EXTEND
, mode
))
1020 if (!iv_add (&iv0
, &iv1
, code
))
1025 if (!iv_mult (&iv0
, mby
))
1030 if (!iv_shift (&iv0
, mby
))
1039 return iv
->base
!= NULL_RTX
;
1042 /* Analyzes iv DEF and stores the result to *IV. */
1045 iv_analyze_def (df_ref def
, class rtx_iv
*iv
)
1047 rtx_insn
*insn
= DF_REF_INSN (def
);
1048 rtx reg
= DF_REF_REG (def
);
1053 fprintf (dump_file
, "Analyzing def of ");
1054 print_rtl (dump_file
, reg
);
1055 fprintf (dump_file
, " in insn ");
1056 print_rtl_single (dump_file
, insn
);
1059 check_iv_ref_table_size ();
1060 if (DF_REF_IV (def
))
1063 fprintf (dump_file
, " already analysed.\n");
1064 *iv
= *DF_REF_IV (def
);
1065 return iv
->base
!= NULL_RTX
;
1068 iv
->base
= NULL_RTX
;
1069 iv
->step
= NULL_RTX
;
1071 scalar_int_mode mode
;
1072 if (!REG_P (reg
) || !is_a
<scalar_int_mode
> (GET_MODE (reg
), &mode
))
1075 set
= single_set (insn
);
1079 if (!REG_P (SET_DEST (set
)))
1082 gcc_assert (SET_DEST (set
) == reg
);
1083 rhs
= find_reg_equal_equiv_note (insn
);
1085 rhs
= XEXP (rhs
, 0);
1087 rhs
= SET_SRC (set
);
1089 iv_analyze_expr (insn
, mode
, rhs
, iv
);
1090 record_iv (def
, iv
);
1094 print_rtl (dump_file
, reg
);
1095 fprintf (dump_file
, " in insn ");
1096 print_rtl_single (dump_file
, insn
);
1097 fprintf (dump_file
, " is ");
1098 dump_iv_info (dump_file
, iv
);
1099 fprintf (dump_file
, "\n");
1102 return iv
->base
!= NULL_RTX
;
1105 /* Analyzes operand OP of INSN and stores the result to *IV. MODE is the
1109 iv_analyze_op (rtx_insn
*insn
, scalar_int_mode mode
, rtx op
, class rtx_iv
*iv
)
1112 enum iv_grd_result res
;
1116 fprintf (dump_file
, "Analyzing operand ");
1117 print_rtl (dump_file
, op
);
1118 fprintf (dump_file
, " of insn ");
1119 print_rtl_single (dump_file
, insn
);
1122 if (function_invariant_p (op
))
1123 res
= GRD_INVARIANT
;
1124 else if (GET_CODE (op
) == SUBREG
)
1126 scalar_int_mode inner_mode
;
1127 if (!subreg_lowpart_p (op
)
1128 || !is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &inner_mode
))
1131 if (!iv_analyze_op (insn
, inner_mode
, SUBREG_REG (op
), iv
))
1134 return iv_subreg (iv
, mode
);
1138 res
= iv_get_reaching_def (insn
, op
, &def
);
1139 if (res
== GRD_INVALID
)
1142 fprintf (dump_file
, " not simple.\n");
1147 if (res
== GRD_INVARIANT
)
1149 iv_constant (iv
, mode
, op
);
1153 fprintf (dump_file
, " ");
1154 dump_iv_info (dump_file
, iv
);
1155 fprintf (dump_file
, "\n");
1160 if (res
== GRD_MAYBE_BIV
)
1161 return iv_analyze_biv (mode
, op
, iv
);
1163 return iv_analyze_def (def
, iv
);
1166 /* Analyzes value VAL at INSN and stores the result to *IV. MODE is the
1170 iv_analyze (rtx_insn
*insn
, scalar_int_mode mode
, rtx val
, class rtx_iv
*iv
)
1174 /* We must find the insn in that val is used, so that we get to UD chains.
1175 Since the function is sometimes called on result of get_condition,
1176 this does not necessarily have to be directly INSN; scan also the
1178 if (simple_reg_p (val
))
1180 if (GET_CODE (val
) == SUBREG
)
1181 reg
= SUBREG_REG (val
);
1185 while (!df_find_use (insn
, reg
))
1186 insn
= NEXT_INSN (insn
);
1189 return iv_analyze_op (insn
, mode
, val
, iv
);
1192 /* Analyzes definition of DEF in INSN and stores the result to IV. */
1195 iv_analyze_result (rtx_insn
*insn
, rtx def
, class rtx_iv
*iv
)
1199 adef
= df_find_def (insn
, def
);
1203 return iv_analyze_def (adef
, iv
);
1206 /* Checks whether definition of register REG in INSN is a basic induction
1207 variable. MODE is the mode of REG.
1209 IV analysis must have been initialized (via a call to
1210 iv_analysis_loop_init) for this function to produce a result. */
1213 biv_p (rtx_insn
*insn
, scalar_int_mode mode
, rtx reg
)
1216 df_ref def
, last_def
;
1218 if (!simple_reg_p (reg
))
1221 def
= df_find_def (insn
, reg
);
1222 gcc_assert (def
!= NULL
);
1223 if (!latch_dominating_def (reg
, &last_def
))
1225 if (last_def
!= def
)
1228 if (!iv_analyze_biv (mode
, reg
, &iv
))
1231 return iv
.step
!= const0_rtx
;
1234 /* Calculates value of IV at ITERATION-th iteration. */
1237 get_iv_value (class rtx_iv
*iv
, rtx iteration
)
1241 /* We would need to generate some if_then_else patterns, and so far
1242 it is not needed anywhere. */
1243 gcc_assert (!iv
->first_special
);
1245 if (iv
->step
!= const0_rtx
&& iteration
!= const0_rtx
)
1246 val
= simplify_gen_binary (PLUS
, iv
->extend_mode
, iv
->base
,
1247 simplify_gen_binary (MULT
, iv
->extend_mode
,
1248 iv
->step
, iteration
));
1252 if (iv
->extend_mode
== iv
->mode
)
1255 val
= lowpart_subreg (iv
->mode
, val
, iv
->extend_mode
);
1257 if (iv
->extend
== IV_UNKNOWN_EXTEND
)
1260 val
= simplify_gen_unary (iv_extend_to_rtx_code (iv
->extend
),
1261 iv
->extend_mode
, val
, iv
->mode
);
1262 val
= simplify_gen_binary (PLUS
, iv
->extend_mode
, iv
->delta
,
1263 simplify_gen_binary (MULT
, iv
->extend_mode
,
1269 /* Free the data for an induction variable analysis. */
1272 iv_analysis_done (void)
1278 df_finish_pass (true);
1281 free (iv_ref_table
);
1282 iv_ref_table
= NULL
;
1283 iv_ref_table_size
= 0;
1287 /* Computes inverse to X modulo (1 << MOD). */
1290 inverse (uint64_t x
, int mod
)
1293 ((uint64_t) 1 << (mod
- 1) << 1) - 1;
1297 for (i
= 0; i
< mod
- 1; i
++)
1299 rslt
= (rslt
* x
) & mask
;
1306 /* Checks whether any register in X is in set ALT. */
1309 altered_reg_used (const_rtx x
, bitmap alt
)
1311 subrtx_iterator::array_type array
;
1312 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1314 const_rtx x
= *iter
;
1315 if (REG_P (x
) && REGNO_REG_SET_P (alt
, REGNO (x
)))
1321 /* Marks registers altered by EXPR in set ALT. */
1324 mark_altered (rtx expr
, const_rtx by ATTRIBUTE_UNUSED
, void *alt
)
1326 if (GET_CODE (expr
) == SUBREG
)
1327 expr
= SUBREG_REG (expr
);
1331 SET_REGNO_REG_SET ((bitmap
) alt
, REGNO (expr
));
1334 /* Checks whether RHS is simple enough to process. */
1337 simple_rhs_p (rtx rhs
)
1341 if (function_invariant_p (rhs
)
1342 || (REG_P (rhs
) && !HARD_REGISTER_P (rhs
)))
1345 switch (GET_CODE (rhs
))
1350 op0
= XEXP (rhs
, 0);
1351 op1
= XEXP (rhs
, 1);
1352 /* Allow reg OP const and reg OP reg. */
1353 if (!(REG_P (op0
) && !HARD_REGISTER_P (op0
))
1354 && !function_invariant_p (op0
))
1356 if (!(REG_P (op1
) && !HARD_REGISTER_P (op1
))
1357 && !function_invariant_p (op1
))
1366 op0
= XEXP (rhs
, 0);
1367 op1
= XEXP (rhs
, 1);
1368 /* Allow reg OP const. */
1369 if (!(REG_P (op0
) && !HARD_REGISTER_P (op0
)))
1371 if (!function_invariant_p (op1
))
1381 /* If REGNO has a single definition, return its known value, otherwise return
1385 find_single_def_src (unsigned int regno
)
1389 /* Don't look through unbounded number of single definition REG copies,
1390 there might be loops for sources with uninitialized variables. */
1391 for (int cnt
= 0; cnt
< 128; cnt
++)
1393 df_ref adef
= DF_REG_DEF_CHAIN (regno
);
1394 if (adef
== NULL
|| DF_REF_NEXT_REG (adef
) != NULL
1395 || DF_REF_IS_ARTIFICIAL (adef
))
1398 rtx set
= single_set (DF_REF_INSN (adef
));
1399 if (set
== NULL
|| !REG_P (SET_DEST (set
))
1400 || REGNO (SET_DEST (set
)) != regno
)
1403 rtx note
= find_reg_equal_equiv_note (DF_REF_INSN (adef
));
1404 if (note
&& function_invariant_p (XEXP (note
, 0)))
1406 src
= XEXP (note
, 0);
1409 src
= SET_SRC (set
);
1413 regno
= REGNO (src
);
1418 if (!function_invariant_p (src
))
1424 /* If any registers in *EXPR that have a single definition, try to replace
1425 them with the known-equivalent values. */
1428 replace_single_def_regs (rtx
*expr
)
1430 subrtx_var_iterator::array_type array
;
1432 FOR_EACH_SUBRTX_VAR (iter
, array
, *expr
, NONCONST
)
1436 if (rtx new_x
= find_single_def_src (REGNO (x
)))
1438 *expr
= simplify_replace_rtx (*expr
, x
, new_x
);
1444 /* A subroutine of simplify_using_initial_values, this function examines INSN
1445 to see if it contains a suitable set that we can use to make a replacement.
1446 If it is suitable, return true and set DEST and SRC to the lhs and rhs of
1447 the set; return false otherwise. */
1450 suitable_set_for_replacement (rtx_insn
*insn
, rtx
*dest
, rtx
*src
)
1452 rtx set
= single_set (insn
);
1453 rtx lhs
= NULL_RTX
, rhs
;
1458 lhs
= SET_DEST (set
);
1462 rhs
= find_reg_equal_equiv_note (insn
);
1464 rhs
= XEXP (rhs
, 0);
1466 rhs
= SET_SRC (set
);
1468 if (!simple_rhs_p (rhs
))
1476 /* Using the data returned by suitable_set_for_replacement, replace DEST
1477 with SRC in *EXPR and return the new expression. Also call
1478 replace_single_def_regs if the replacement changed something. */
1480 replace_in_expr (rtx
*expr
, rtx dest
, rtx src
)
1483 *expr
= simplify_replace_rtx (*expr
, dest
, src
);
1486 replace_single_def_regs (expr
);
1489 /* Checks whether A implies B. */
1492 implies_p (rtx a
, rtx b
)
1494 rtx op0
, op1
, opb0
, opb1
;
1497 if (rtx_equal_p (a
, b
))
1500 if (GET_CODE (a
) == EQ
)
1506 || (GET_CODE (op0
) == SUBREG
1507 && REG_P (SUBREG_REG (op0
))))
1509 rtx r
= simplify_replace_rtx (b
, op0
, op1
);
1510 if (r
== const_true_rtx
)
1515 || (GET_CODE (op1
) == SUBREG
1516 && REG_P (SUBREG_REG (op1
))))
1518 rtx r
= simplify_replace_rtx (b
, op1
, op0
);
1519 if (r
== const_true_rtx
)
1524 if (b
== const_true_rtx
)
1527 if ((GET_RTX_CLASS (GET_CODE (a
)) != RTX_COMM_COMPARE
1528 && GET_RTX_CLASS (GET_CODE (a
)) != RTX_COMPARE
)
1529 || (GET_RTX_CLASS (GET_CODE (b
)) != RTX_COMM_COMPARE
1530 && GET_RTX_CLASS (GET_CODE (b
)) != RTX_COMPARE
))
1538 mode
= GET_MODE (op0
);
1539 if (mode
!= GET_MODE (opb0
))
1541 else if (mode
== VOIDmode
)
1543 mode
= GET_MODE (op1
);
1544 if (mode
!= GET_MODE (opb1
))
1548 /* A < B implies A + 1 <= B. */
1549 if ((GET_CODE (a
) == GT
|| GET_CODE (a
) == LT
)
1550 && (GET_CODE (b
) == GE
|| GET_CODE (b
) == LE
))
1553 if (GET_CODE (a
) == GT
)
1554 std::swap (op0
, op1
);
1556 if (GET_CODE (b
) == GE
)
1557 std::swap (opb0
, opb1
);
1559 if (SCALAR_INT_MODE_P (mode
)
1560 && rtx_equal_p (op1
, opb1
)
1561 && simplify_gen_binary (MINUS
, mode
, opb0
, op0
) == const1_rtx
)
1566 /* A < B or A > B imply A != B. TODO: Likewise
1567 A + n < B implies A != B + n if neither wraps. */
1568 if (GET_CODE (b
) == NE
1569 && (GET_CODE (a
) == GT
|| GET_CODE (a
) == GTU
1570 || GET_CODE (a
) == LT
|| GET_CODE (a
) == LTU
))
1572 if (rtx_equal_p (op0
, opb0
)
1573 && rtx_equal_p (op1
, opb1
))
1577 /* For unsigned comparisons, A != 0 implies A > 0 and A >= 1. */
1578 if (GET_CODE (a
) == NE
1579 && op1
== const0_rtx
)
1581 if ((GET_CODE (b
) == GTU
1582 && opb1
== const0_rtx
)
1583 || (GET_CODE (b
) == GEU
1584 && opb1
== const1_rtx
))
1585 return rtx_equal_p (op0
, opb0
);
1588 /* A != N is equivalent to A - (N + 1) <u -1. */
1589 if (GET_CODE (a
) == NE
1590 && CONST_INT_P (op1
)
1591 && GET_CODE (b
) == LTU
1592 && opb1
== constm1_rtx
1593 && GET_CODE (opb0
) == PLUS
1594 && CONST_INT_P (XEXP (opb0
, 1))
1595 /* Avoid overflows. */
1596 && ((unsigned HOST_WIDE_INT
) INTVAL (XEXP (opb0
, 1))
1597 != ((unsigned HOST_WIDE_INT
)1
1598 << (HOST_BITS_PER_WIDE_INT
- 1)) - 1)
1599 && INTVAL (XEXP (opb0
, 1)) + 1 == -INTVAL (op1
))
1600 return rtx_equal_p (op0
, XEXP (opb0
, 0));
1602 /* Likewise, A != N implies A - N > 0. */
1603 if (GET_CODE (a
) == NE
1604 && CONST_INT_P (op1
))
1606 if (GET_CODE (b
) == GTU
1607 && GET_CODE (opb0
) == PLUS
1608 && opb1
== const0_rtx
1609 && CONST_INT_P (XEXP (opb0
, 1))
1610 /* Avoid overflows. */
1611 && ((unsigned HOST_WIDE_INT
) INTVAL (XEXP (opb0
, 1))
1612 != (HOST_WIDE_INT_1U
<< (HOST_BITS_PER_WIDE_INT
- 1)))
1613 && rtx_equal_p (XEXP (opb0
, 0), op0
))
1614 return INTVAL (op1
) == -INTVAL (XEXP (opb0
, 1));
1615 if (GET_CODE (b
) == GEU
1616 && GET_CODE (opb0
) == PLUS
1617 && opb1
== const1_rtx
1618 && CONST_INT_P (XEXP (opb0
, 1))
1619 /* Avoid overflows. */
1620 && ((unsigned HOST_WIDE_INT
) INTVAL (XEXP (opb0
, 1))
1621 != (HOST_WIDE_INT_1U
<< (HOST_BITS_PER_WIDE_INT
- 1)))
1622 && rtx_equal_p (XEXP (opb0
, 0), op0
))
1623 return INTVAL (op1
) == -INTVAL (XEXP (opb0
, 1));
1626 /* A >s X, where X is positive, implies A <u Y, if Y is negative. */
1627 if ((GET_CODE (a
) == GT
|| GET_CODE (a
) == GE
)
1628 && CONST_INT_P (op1
)
1629 && ((GET_CODE (a
) == GT
&& op1
== constm1_rtx
)
1630 || INTVAL (op1
) >= 0)
1631 && GET_CODE (b
) == LTU
1632 && CONST_INT_P (opb1
)
1633 && rtx_equal_p (op0
, opb0
))
1634 return INTVAL (opb1
) < 0;
1639 /* Canonicalizes COND so that
1641 (1) Ensure that operands are ordered according to
1642 swap_commutative_operands_p.
1643 (2) (LE x const) will be replaced with (LT x <const+1>) and similarly
1644 for GE, GEU, and LEU. */
1647 canon_condition (rtx cond
)
1653 code
= GET_CODE (cond
);
1654 op0
= XEXP (cond
, 0);
1655 op1
= XEXP (cond
, 1);
1657 if (swap_commutative_operands_p (op0
, op1
))
1659 code
= swap_condition (code
);
1660 std::swap (op0
, op1
);
1663 mode
= GET_MODE (op0
);
1664 if (mode
== VOIDmode
)
1665 mode
= GET_MODE (op1
);
1666 gcc_assert (mode
!= VOIDmode
);
1668 if (CONST_SCALAR_INT_P (op1
) && GET_MODE_CLASS (mode
) != MODE_CC
)
1670 rtx_mode_t
const_val (op1
, mode
);
1675 if (wi::ne_p (const_val
, wi::max_value (mode
, SIGNED
)))
1678 op1
= immed_wide_int_const (wi::add (const_val
, 1), mode
);
1683 if (wi::ne_p (const_val
, wi::min_value (mode
, SIGNED
)))
1686 op1
= immed_wide_int_const (wi::sub (const_val
, 1), mode
);
1691 if (wi::ne_p (const_val
, -1))
1694 op1
= immed_wide_int_const (wi::add (const_val
, 1), mode
);
1699 if (wi::ne_p (const_val
, 0))
1702 op1
= immed_wide_int_const (wi::sub (const_val
, 1), mode
);
1711 if (op0
!= XEXP (cond
, 0)
1712 || op1
!= XEXP (cond
, 1)
1713 || code
!= GET_CODE (cond
)
1714 || GET_MODE (cond
) != SImode
)
1715 cond
= gen_rtx_fmt_ee (code
, SImode
, op0
, op1
);
1720 /* Reverses CONDition; returns NULL if we cannot. */
1723 reversed_condition (rtx cond
)
1725 enum rtx_code reversed
;
1726 reversed
= reversed_comparison_code (cond
, NULL
);
1727 if (reversed
== UNKNOWN
)
1730 return gen_rtx_fmt_ee (reversed
,
1731 GET_MODE (cond
), XEXP (cond
, 0),
1735 /* Tries to use the fact that COND holds to simplify EXPR. ALTERED is the
1736 set of altered regs. */
1739 simplify_using_condition (rtx cond
, rtx
*expr
, regset altered
)
1741 rtx rev
, reve
, exp
= *expr
;
1743 /* If some register gets altered later, we do not really speak about its
1744 value at the time of comparison. */
1745 if (altered
&& altered_reg_used (cond
, altered
))
1748 if (GET_CODE (cond
) == EQ
1749 && REG_P (XEXP (cond
, 0)) && CONSTANT_P (XEXP (cond
, 1)))
1751 *expr
= simplify_replace_rtx (*expr
, XEXP (cond
, 0), XEXP (cond
, 1));
1755 if (!COMPARISON_P (exp
))
1758 rev
= reversed_condition (cond
);
1759 reve
= reversed_condition (exp
);
1761 cond
= canon_condition (cond
);
1762 exp
= canon_condition (exp
);
1764 rev
= canon_condition (rev
);
1766 reve
= canon_condition (reve
);
1768 if (rtx_equal_p (exp
, cond
))
1770 *expr
= const_true_rtx
;
1774 if (rev
&& rtx_equal_p (exp
, rev
))
1780 if (implies_p (cond
, exp
))
1782 *expr
= const_true_rtx
;
1786 if (reve
&& implies_p (cond
, reve
))
1792 /* A proof by contradiction. If *EXPR implies (not cond), *EXPR must
1794 if (rev
&& implies_p (exp
, rev
))
1800 /* Similarly, If (not *EXPR) implies (not cond), *EXPR must be true. */
1801 if (rev
&& reve
&& implies_p (reve
, rev
))
1803 *expr
= const_true_rtx
;
1807 /* We would like to have some other tests here. TODO. */
1812 /* Use relationship between A and *B to eventually eliminate *B.
1813 OP is the operation we consider. */
1816 eliminate_implied_condition (enum rtx_code op
, rtx a
, rtx
*b
)
1821 /* If A implies *B, we may replace *B by true. */
1822 if (implies_p (a
, *b
))
1823 *b
= const_true_rtx
;
1827 /* If *B implies A, we may replace *B by false. */
1828 if (implies_p (*b
, a
))
1837 /* Eliminates the conditions in TAIL that are implied by HEAD. OP is the
1838 operation we consider. */
1841 eliminate_implied_conditions (enum rtx_code op
, rtx
*head
, rtx tail
)
1845 for (elt
= tail
; elt
; elt
= XEXP (elt
, 1))
1846 eliminate_implied_condition (op
, *head
, &XEXP (elt
, 0));
1847 for (elt
= tail
; elt
; elt
= XEXP (elt
, 1))
1848 eliminate_implied_condition (op
, XEXP (elt
, 0), head
);
1851 /* Simplifies *EXPR using initial values at the start of the LOOP. If *EXPR
1852 is a list, its elements are assumed to be combined using OP. */
1855 simplify_using_initial_values (class loop
*loop
, enum rtx_code op
, rtx
*expr
)
1857 bool expression_valid
;
1858 rtx head
, tail
, last_valid_expr
;
1859 rtx_expr_list
*cond_list
;
1862 regset altered
, this_altered
;
1868 if (CONSTANT_P (*expr
))
1871 if (GET_CODE (*expr
) == EXPR_LIST
)
1873 head
= XEXP (*expr
, 0);
1874 tail
= XEXP (*expr
, 1);
1876 eliminate_implied_conditions (op
, &head
, tail
);
1881 neutral
= const_true_rtx
;
1886 neutral
= const0_rtx
;
1887 aggr
= const_true_rtx
;
1894 simplify_using_initial_values (loop
, UNKNOWN
, &head
);
1897 XEXP (*expr
, 0) = aggr
;
1898 XEXP (*expr
, 1) = NULL_RTX
;
1901 else if (head
== neutral
)
1904 simplify_using_initial_values (loop
, op
, expr
);
1907 simplify_using_initial_values (loop
, op
, &tail
);
1909 if (tail
&& XEXP (tail
, 0) == aggr
)
1915 XEXP (*expr
, 0) = head
;
1916 XEXP (*expr
, 1) = tail
;
1920 gcc_assert (op
== UNKNOWN
);
1922 replace_single_def_regs (expr
);
1923 if (CONSTANT_P (*expr
))
1926 e
= loop_preheader_edge (loop
);
1927 if (e
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
1930 altered
= ALLOC_REG_SET (®_obstack
);
1931 this_altered
= ALLOC_REG_SET (®_obstack
);
1933 expression_valid
= true;
1934 last_valid_expr
= *expr
;
1938 insn
= BB_END (e
->src
);
1939 if (any_condjump_p (insn
))
1941 rtx cond
= get_condition (BB_END (e
->src
), NULL
, false, true);
1943 if (cond
&& (e
->flags
& EDGE_FALLTHRU
))
1944 cond
= reversed_condition (cond
);
1948 simplify_using_condition (cond
, expr
, altered
);
1952 if (CONSTANT_P (*expr
))
1954 for (note
= cond_list
; note
; note
= XEXP (note
, 1))
1956 simplify_using_condition (XEXP (note
, 0), expr
, altered
);
1957 if (CONSTANT_P (*expr
))
1961 cond_list
= alloc_EXPR_LIST (0, cond
, cond_list
);
1965 FOR_BB_INSNS_REVERSE (e
->src
, insn
)
1973 CLEAR_REG_SET (this_altered
);
1974 note_stores (insn
, mark_altered
, this_altered
);
1977 /* Kill all registers that might be clobbered by the call.
1978 We don't track modes of hard registers, so we need to be
1979 conservative and assume that partial kills are full kills. */
1980 function_abi callee_abi
= insn_callee_abi (insn
);
1981 IOR_REG_SET_HRS (this_altered
,
1982 callee_abi
.full_and_partial_reg_clobbers ());
1985 if (suitable_set_for_replacement (insn
, &dest
, &src
))
1987 rtx_expr_list
**pnote
, **pnote_next
;
1989 replace_in_expr (expr
, dest
, src
);
1990 if (CONSTANT_P (*expr
))
1993 for (pnote
= &cond_list
; *pnote
; pnote
= pnote_next
)
1995 rtx_expr_list
*note
= *pnote
;
1996 rtx old_cond
= XEXP (note
, 0);
1998 pnote_next
= (rtx_expr_list
**)&XEXP (note
, 1);
1999 replace_in_expr (&XEXP (note
, 0), dest
, src
);
2001 /* We can no longer use a condition that has been simplified
2002 to a constant, and simplify_using_condition will abort if
2004 if (CONSTANT_P (XEXP (note
, 0)))
2006 *pnote
= *pnote_next
;
2008 free_EXPR_LIST_node (note
);
2010 /* Retry simplifications with this condition if either the
2011 expression or the condition changed. */
2012 else if (old_cond
!= XEXP (note
, 0) || old
!= *expr
)
2013 simplify_using_condition (XEXP (note
, 0), expr
, altered
);
2018 rtx_expr_list
**pnote
, **pnote_next
;
2020 /* If we did not use this insn to make a replacement, any overlap
2021 between stores in this insn and our expression will cause the
2022 expression to become invalid. */
2023 if (altered_reg_used (*expr
, this_altered
))
2026 /* Likewise for the conditions. */
2027 for (pnote
= &cond_list
; *pnote
; pnote
= pnote_next
)
2029 rtx_expr_list
*note
= *pnote
;
2030 rtx old_cond
= XEXP (note
, 0);
2032 pnote_next
= (rtx_expr_list
**)&XEXP (note
, 1);
2033 if (altered_reg_used (old_cond
, this_altered
))
2035 *pnote
= *pnote_next
;
2037 free_EXPR_LIST_node (note
);
2042 if (CONSTANT_P (*expr
))
2045 IOR_REG_SET (altered
, this_altered
);
2047 /* If the expression now contains regs that have been altered, we
2048 can't return it to the caller. However, it is still valid for
2049 further simplification, so keep searching to see if we can
2050 eventually turn it into a constant. */
2051 if (altered_reg_used (*expr
, altered
))
2052 expression_valid
= false;
2053 if (expression_valid
)
2054 last_valid_expr
= *expr
;
2057 if (!single_pred_p (e
->src
)
2058 || single_pred (e
->src
) == ENTRY_BLOCK_PTR_FOR_FN (cfun
))
2060 e
= single_pred_edge (e
->src
);
2064 free_EXPR_LIST_list (&cond_list
);
2065 if (!CONSTANT_P (*expr
))
2066 *expr
= last_valid_expr
;
2067 FREE_REG_SET (altered
);
2068 FREE_REG_SET (this_altered
);
2071 /* Transforms invariant IV into MODE. Adds assumptions based on the fact
2072 that IV occurs as left operands of comparison COND and its signedness
2073 is SIGNED_P to DESC. */
2076 shorten_into_mode (class rtx_iv
*iv
, scalar_int_mode mode
,
2077 enum rtx_code cond
, bool signed_p
, class niter_desc
*desc
)
2079 rtx mmin
, mmax
, cond_over
, cond_under
;
2081 get_mode_bounds (mode
, signed_p
, iv
->extend_mode
, &mmin
, &mmax
);
2082 cond_under
= simplify_gen_relational (LT
, SImode
, iv
->extend_mode
,
2084 cond_over
= simplify_gen_relational (GT
, SImode
, iv
->extend_mode
,
2093 if (cond_under
!= const0_rtx
)
2095 alloc_EXPR_LIST (0, cond_under
, desc
->infinite
);
2096 if (cond_over
!= const0_rtx
)
2097 desc
->noloop_assumptions
=
2098 alloc_EXPR_LIST (0, cond_over
, desc
->noloop_assumptions
);
2105 if (cond_over
!= const0_rtx
)
2107 alloc_EXPR_LIST (0, cond_over
, desc
->infinite
);
2108 if (cond_under
!= const0_rtx
)
2109 desc
->noloop_assumptions
=
2110 alloc_EXPR_LIST (0, cond_under
, desc
->noloop_assumptions
);
2114 if (cond_over
!= const0_rtx
)
2116 alloc_EXPR_LIST (0, cond_over
, desc
->infinite
);
2117 if (cond_under
!= const0_rtx
)
2119 alloc_EXPR_LIST (0, cond_under
, desc
->infinite
);
2127 iv
->extend
= signed_p
? IV_SIGN_EXTEND
: IV_ZERO_EXTEND
;
2130 /* Transforms IV0 and IV1 compared by COND so that they are both compared as
2131 subregs of the same mode if possible (sometimes it is necessary to add
2132 some assumptions to DESC). */
2135 canonicalize_iv_subregs (class rtx_iv
*iv0
, class rtx_iv
*iv1
,
2136 enum rtx_code cond
, class niter_desc
*desc
)
2138 scalar_int_mode comp_mode
;
2141 /* If the ivs behave specially in the first iteration, or are
2142 added/multiplied after extending, we ignore them. */
2143 if (iv0
->first_special
|| iv0
->mult
!= const1_rtx
|| iv0
->delta
!= const0_rtx
)
2145 if (iv1
->first_special
|| iv1
->mult
!= const1_rtx
|| iv1
->delta
!= const0_rtx
)
2148 /* If there is some extend, it must match signedness of the comparison. */
2153 if (iv0
->extend
== IV_ZERO_EXTEND
2154 || iv1
->extend
== IV_ZERO_EXTEND
)
2161 if (iv0
->extend
== IV_SIGN_EXTEND
2162 || iv1
->extend
== IV_SIGN_EXTEND
)
2168 if (iv0
->extend
!= IV_UNKNOWN_EXTEND
2169 && iv1
->extend
!= IV_UNKNOWN_EXTEND
2170 && iv0
->extend
!= iv1
->extend
)
2174 if (iv0
->extend
!= IV_UNKNOWN_EXTEND
)
2175 signed_p
= iv0
->extend
== IV_SIGN_EXTEND
;
2176 if (iv1
->extend
!= IV_UNKNOWN_EXTEND
)
2177 signed_p
= iv1
->extend
== IV_SIGN_EXTEND
;
2184 /* Values of both variables should be computed in the same mode. These
2185 might indeed be different, if we have comparison like
2187 (compare (subreg:SI (iv0)) (subreg:SI (iv1)))
2189 and iv0 and iv1 are both ivs iterating in SI mode, but calculated
2190 in different modes. This does not seem impossible to handle, but
2191 it hardly ever occurs in practice.
2193 The only exception is the case when one of operands is invariant.
2194 For example pentium 3 generates comparisons like
2195 (lt (subreg:HI (reg:SI)) 100). Here we assign HImode to 100, but we
2196 definitely do not want this prevent the optimization. */
2197 comp_mode
= iv0
->extend_mode
;
2198 if (GET_MODE_BITSIZE (comp_mode
) < GET_MODE_BITSIZE (iv1
->extend_mode
))
2199 comp_mode
= iv1
->extend_mode
;
2201 if (iv0
->extend_mode
!= comp_mode
)
2203 if (iv0
->mode
!= iv0
->extend_mode
2204 || iv0
->step
!= const0_rtx
)
2207 iv0
->base
= simplify_gen_unary (signed_p
? SIGN_EXTEND
: ZERO_EXTEND
,
2208 comp_mode
, iv0
->base
, iv0
->mode
);
2209 iv0
->extend_mode
= comp_mode
;
2212 if (iv1
->extend_mode
!= comp_mode
)
2214 if (iv1
->mode
!= iv1
->extend_mode
2215 || iv1
->step
!= const0_rtx
)
2218 iv1
->base
= simplify_gen_unary (signed_p
? SIGN_EXTEND
: ZERO_EXTEND
,
2219 comp_mode
, iv1
->base
, iv1
->mode
);
2220 iv1
->extend_mode
= comp_mode
;
2223 /* Check that both ivs belong to a range of a single mode. If one of the
2224 operands is an invariant, we may need to shorten it into the common
2226 if (iv0
->mode
== iv0
->extend_mode
2227 && iv0
->step
== const0_rtx
2228 && iv0
->mode
!= iv1
->mode
)
2229 shorten_into_mode (iv0
, iv1
->mode
, cond
, signed_p
, desc
);
2231 if (iv1
->mode
== iv1
->extend_mode
2232 && iv1
->step
== const0_rtx
2233 && iv0
->mode
!= iv1
->mode
)
2234 shorten_into_mode (iv1
, iv0
->mode
, swap_condition (cond
), signed_p
, desc
);
2236 if (iv0
->mode
!= iv1
->mode
)
2239 desc
->mode
= iv0
->mode
;
2240 desc
->signed_p
= signed_p
;
2245 /* Tries to estimate the maximum number of iterations in LOOP, and return the
2246 result. This function is called from iv_number_of_iterations with
2247 a number of fields in DESC already filled in. OLD_NITER is the original
2248 expression for the number of iterations, before we tried to simplify it. */
2251 determine_max_iter (class loop
*loop
, class niter_desc
*desc
, rtx old_niter
)
2253 rtx niter
= desc
->niter_expr
;
2254 rtx mmin
, mmax
, cmp
;
2256 uint64_t andmax
= 0;
2258 /* We used to look for constant operand 0 of AND,
2259 but canonicalization should always make this impossible. */
2260 gcc_checking_assert (GET_CODE (niter
) != AND
2261 || !CONST_INT_P (XEXP (niter
, 0)));
2263 if (GET_CODE (niter
) == AND
2264 && CONST_INT_P (XEXP (niter
, 1)))
2266 andmax
= UINTVAL (XEXP (niter
, 1));
2267 niter
= XEXP (niter
, 0);
2270 get_mode_bounds (desc
->mode
, desc
->signed_p
, desc
->mode
, &mmin
, &mmax
);
2271 nmax
= UINTVAL (mmax
) - UINTVAL (mmin
);
2273 if (GET_CODE (niter
) == UDIV
)
2275 if (!CONST_INT_P (XEXP (niter
, 1)))
2277 inc
= INTVAL (XEXP (niter
, 1));
2278 niter
= XEXP (niter
, 0);
2283 /* We could use a binary search here, but for now improving the upper
2284 bound by just one eliminates one important corner case. */
2285 cmp
= simplify_gen_relational (desc
->signed_p
? LT
: LTU
, VOIDmode
,
2286 desc
->mode
, old_niter
, mmax
);
2287 simplify_using_initial_values (loop
, UNKNOWN
, &cmp
);
2288 if (cmp
== const_true_rtx
)
2293 fprintf (dump_file
, ";; improved upper bound by one.\n");
2297 nmax
= MIN (nmax
, andmax
);
2299 fprintf (dump_file
, ";; Determined upper bound %" PRId64
".\n",
2304 /* Computes number of iterations of the CONDITION in INSN in LOOP and stores
2305 the result into DESC. Very similar to determine_number_of_iterations
2306 (basically its rtl version), complicated by things like subregs. */
2309 iv_number_of_iterations (class loop
*loop
, rtx_insn
*insn
, rtx condition
,
2310 class niter_desc
*desc
)
2312 rtx op0
, op1
, delta
, step
, bound
, may_xform
, tmp
, tmp0
, tmp1
;
2313 class rtx_iv iv0
, iv1
;
2314 rtx assumption
, may_not_xform
;
2316 machine_mode nonvoid_mode
;
2317 scalar_int_mode comp_mode
;
2318 rtx mmin
, mmax
, mode_mmin
, mode_mmax
;
2319 uint64_t s
, size
, d
, inv
, max
, up
, down
;
2320 int64_t inc
, step_val
;
2321 int was_sharp
= false;
2325 /* The meaning of these assumptions is this:
2327 then the rest of information does not have to be valid
2328 if noloop_assumptions then the loop does not roll
2329 if infinite then this exit is never used */
2331 desc
->assumptions
= NULL_RTX
;
2332 desc
->noloop_assumptions
= NULL_RTX
;
2333 desc
->infinite
= NULL_RTX
;
2334 desc
->simple_p
= true;
2336 desc
->const_iter
= false;
2337 desc
->niter_expr
= NULL_RTX
;
2339 cond
= GET_CODE (condition
);
2340 gcc_assert (COMPARISON_P (condition
));
2342 nonvoid_mode
= GET_MODE (XEXP (condition
, 0));
2343 if (nonvoid_mode
== VOIDmode
)
2344 nonvoid_mode
= GET_MODE (XEXP (condition
, 1));
2345 /* The constant comparisons should be folded. */
2346 gcc_assert (nonvoid_mode
!= VOIDmode
);
2348 /* We only handle integers or pointers. */
2349 scalar_int_mode mode
;
2350 if (!is_a
<scalar_int_mode
> (nonvoid_mode
, &mode
))
2353 op0
= XEXP (condition
, 0);
2354 if (!iv_analyze (insn
, mode
, op0
, &iv0
))
2357 op1
= XEXP (condition
, 1);
2358 if (!iv_analyze (insn
, mode
, op1
, &iv1
))
2361 if (GET_MODE_BITSIZE (iv0
.extend_mode
) > HOST_BITS_PER_WIDE_INT
2362 || GET_MODE_BITSIZE (iv1
.extend_mode
) > HOST_BITS_PER_WIDE_INT
)
2365 /* Check condition and normalize it. */
2373 std::swap (iv0
, iv1
);
2374 cond
= swap_condition (cond
);
2386 /* Handle extends. This is relatively nontrivial, so we only try in some
2387 easy cases, when we can canonicalize the ivs (possibly by adding some
2388 assumptions) to shape subreg (base + i * step). This function also fills
2389 in desc->mode and desc->signed_p. */
2391 if (!canonicalize_iv_subregs (&iv0
, &iv1
, cond
, desc
))
2394 comp_mode
= iv0
.extend_mode
;
2396 size
= GET_MODE_PRECISION (mode
);
2397 get_mode_bounds (mode
, (cond
== LE
|| cond
== LT
), comp_mode
, &mmin
, &mmax
);
2398 mode_mmin
= lowpart_subreg (mode
, mmin
, comp_mode
);
2399 mode_mmax
= lowpart_subreg (mode
, mmax
, comp_mode
);
2401 if (!CONST_INT_P (iv0
.step
) || !CONST_INT_P (iv1
.step
))
2404 /* We can take care of the case of two induction variables chasing each other
2405 if the test is NE. I have never seen a loop using it, but still it is
2407 if (iv0
.step
!= const0_rtx
&& iv1
.step
!= const0_rtx
)
2412 iv0
.step
= simplify_gen_binary (MINUS
, comp_mode
, iv0
.step
, iv1
.step
);
2413 iv1
.step
= const0_rtx
;
2416 iv0
.step
= lowpart_subreg (mode
, iv0
.step
, comp_mode
);
2417 iv1
.step
= lowpart_subreg (mode
, iv1
.step
, comp_mode
);
2419 /* This is either infinite loop or the one that ends immediately, depending
2420 on initial values. Unswitching should remove this kind of conditions. */
2421 if (iv0
.step
== const0_rtx
&& iv1
.step
== const0_rtx
)
2426 if (iv0
.step
== const0_rtx
)
2427 step_val
= -INTVAL (iv1
.step
);
2429 step_val
= INTVAL (iv0
.step
);
2431 /* Ignore loops of while (i-- < 10) type. */
2435 step_is_pow2
= !(step_val
& (step_val
- 1));
2439 /* We do not care about whether the step is power of two in this
2441 step_is_pow2
= false;
2445 /* Some more condition normalization. We must record some assumptions
2446 due to overflows. */
2451 /* We want to take care only of non-sharp relationals; this is easy,
2452 as in cases the overflow would make the transformation unsafe
2453 the loop does not roll. Seemingly it would make more sense to want
2454 to take care of sharp relationals instead, as NE is more similar to
2455 them, but the problem is that here the transformation would be more
2456 difficult due to possibly infinite loops. */
2457 if (iv0
.step
== const0_rtx
)
2459 tmp
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2460 assumption
= simplify_gen_relational (EQ
, SImode
, mode
, tmp
,
2462 if (assumption
== const_true_rtx
)
2463 goto zero_iter_simplify
;
2464 iv0
.base
= simplify_gen_binary (PLUS
, comp_mode
,
2465 iv0
.base
, const1_rtx
);
2469 tmp
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2470 assumption
= simplify_gen_relational (EQ
, SImode
, mode
, tmp
,
2472 if (assumption
== const_true_rtx
)
2473 goto zero_iter_simplify
;
2474 iv1
.base
= simplify_gen_binary (PLUS
, comp_mode
,
2475 iv1
.base
, constm1_rtx
);
2478 if (assumption
!= const0_rtx
)
2479 desc
->noloop_assumptions
=
2480 alloc_EXPR_LIST (0, assumption
, desc
->noloop_assumptions
);
2481 cond
= (cond
== LT
) ? LE
: LEU
;
2483 /* It will be useful to be able to tell the difference once more in
2484 LE -> NE reduction. */
2490 /* Take care of trivially infinite loops. */
2493 if (iv0
.step
== const0_rtx
)
2495 tmp
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2496 if (rtx_equal_p (tmp
, mode_mmin
))
2499 alloc_EXPR_LIST (0, const_true_rtx
, NULL_RTX
);
2500 /* Fill in the remaining fields somehow. */
2501 goto zero_iter_simplify
;
2506 tmp
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2507 if (rtx_equal_p (tmp
, mode_mmax
))
2510 alloc_EXPR_LIST (0, const_true_rtx
, NULL_RTX
);
2511 /* Fill in the remaining fields somehow. */
2512 goto zero_iter_simplify
;
2517 /* If we can we want to take care of NE conditions instead of size
2518 comparisons, as they are much more friendly (most importantly
2519 this takes care of special handling of loops with step 1). We can
2520 do it if we first check that upper bound is greater or equal to
2521 lower bound, their difference is constant c modulo step and that
2522 there is not an overflow. */
2525 if (iv0
.step
== const0_rtx
)
2526 step
= simplify_gen_unary (NEG
, comp_mode
, iv1
.step
, comp_mode
);
2529 step
= lowpart_subreg (mode
, step
, comp_mode
);
2530 delta
= simplify_gen_binary (MINUS
, comp_mode
, iv1
.base
, iv0
.base
);
2531 delta
= lowpart_subreg (mode
, delta
, comp_mode
);
2532 delta
= simplify_gen_binary (UMOD
, mode
, delta
, step
);
2533 may_xform
= const0_rtx
;
2534 may_not_xform
= const_true_rtx
;
2536 if (CONST_INT_P (delta
))
2538 if (was_sharp
&& INTVAL (delta
) == INTVAL (step
) - 1)
2540 /* A special case. We have transformed condition of type
2541 for (i = 0; i < 4; i += 4)
2543 for (i = 0; i <= 3; i += 4)
2544 obviously if the test for overflow during that transformation
2545 passed, we cannot overflow here. Most importantly any
2546 loop with sharp end condition and step 1 falls into this
2547 category, so handling this case specially is definitely
2548 worth the troubles. */
2549 may_xform
= const_true_rtx
;
2551 else if (iv0
.step
== const0_rtx
)
2553 bound
= simplify_gen_binary (PLUS
, comp_mode
, mmin
, step
);
2554 bound
= simplify_gen_binary (MINUS
, comp_mode
, bound
, delta
);
2555 bound
= lowpart_subreg (mode
, bound
, comp_mode
);
2556 tmp
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2557 may_xform
= simplify_gen_relational (cond
, SImode
, mode
,
2559 may_not_xform
= simplify_gen_relational (reverse_condition (cond
),
2565 bound
= simplify_gen_binary (MINUS
, comp_mode
, mmax
, step
);
2566 bound
= simplify_gen_binary (PLUS
, comp_mode
, bound
, delta
);
2567 bound
= lowpart_subreg (mode
, bound
, comp_mode
);
2568 tmp
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2569 may_xform
= simplify_gen_relational (cond
, SImode
, mode
,
2571 may_not_xform
= simplify_gen_relational (reverse_condition (cond
),
2577 if (may_xform
!= const0_rtx
)
2579 /* We perform the transformation always provided that it is not
2580 completely senseless. This is OK, as we would need this assumption
2581 to determine the number of iterations anyway. */
2582 if (may_xform
!= const_true_rtx
)
2584 /* If the step is a power of two and the final value we have
2585 computed overflows, the cycle is infinite. Otherwise it
2586 is nontrivial to compute the number of iterations. */
2588 desc
->infinite
= alloc_EXPR_LIST (0, may_not_xform
,
2591 desc
->assumptions
= alloc_EXPR_LIST (0, may_xform
,
2595 /* We are going to lose some information about upper bound on
2596 number of iterations in this step, so record the information
2598 inc
= INTVAL (iv0
.step
) - INTVAL (iv1
.step
);
2599 if (CONST_INT_P (iv1
.base
))
2600 up
= INTVAL (iv1
.base
);
2602 up
= INTVAL (mode_mmax
) - inc
;
2603 down
= INTVAL (CONST_INT_P (iv0
.base
)
2606 max
= (up
- down
) / inc
+ 1;
2608 && !desc
->assumptions
)
2609 record_niter_bound (loop
, max
, false, true);
2611 if (iv0
.step
== const0_rtx
)
2613 iv0
.base
= simplify_gen_binary (PLUS
, comp_mode
, iv0
.base
, delta
);
2614 iv0
.base
= simplify_gen_binary (MINUS
, comp_mode
, iv0
.base
, step
);
2618 iv1
.base
= simplify_gen_binary (MINUS
, comp_mode
, iv1
.base
, delta
);
2619 iv1
.base
= simplify_gen_binary (PLUS
, comp_mode
, iv1
.base
, step
);
2622 tmp0
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2623 tmp1
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2624 assumption
= simplify_gen_relational (reverse_condition (cond
),
2625 SImode
, mode
, tmp0
, tmp1
);
2626 if (assumption
== const_true_rtx
)
2627 goto zero_iter_simplify
;
2628 else if (assumption
!= const0_rtx
)
2629 desc
->noloop_assumptions
=
2630 alloc_EXPR_LIST (0, assumption
, desc
->noloop_assumptions
);
2635 /* Count the number of iterations. */
2638 /* Everything we do here is just arithmetics modulo size of mode. This
2639 makes us able to do more involved computations of number of iterations
2640 than in other cases. First transform the condition into shape
2641 s * i <> c, with s positive. */
2642 iv1
.base
= simplify_gen_binary (MINUS
, comp_mode
, iv1
.base
, iv0
.base
);
2643 iv0
.base
= const0_rtx
;
2644 iv0
.step
= simplify_gen_binary (MINUS
, comp_mode
, iv0
.step
, iv1
.step
);
2645 iv1
.step
= const0_rtx
;
2646 if (INTVAL (iv0
.step
) < 0)
2648 iv0
.step
= simplify_gen_unary (NEG
, comp_mode
, iv0
.step
, comp_mode
);
2649 iv1
.base
= simplify_gen_unary (NEG
, comp_mode
, iv1
.base
, comp_mode
);
2651 iv0
.step
= lowpart_subreg (mode
, iv0
.step
, comp_mode
);
2653 /* Let nsd (s, size of mode) = d. If d does not divide c, the loop
2654 is infinite. Otherwise, the number of iterations is
2655 (inverse(s/d) * (c/d)) mod (size of mode/d). */
2656 s
= INTVAL (iv0
.step
); d
= 1;
2663 bound
= GEN_INT (((uint64_t) 1 << (size
- 1 ) << 1) - 1);
2665 tmp1
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2666 tmp
= simplify_gen_binary (UMOD
, mode
, tmp1
, gen_int_mode (d
, mode
));
2667 assumption
= simplify_gen_relational (NE
, SImode
, mode
, tmp
, const0_rtx
);
2668 desc
->infinite
= alloc_EXPR_LIST (0, assumption
, desc
->infinite
);
2670 tmp
= simplify_gen_binary (UDIV
, mode
, tmp1
, gen_int_mode (d
, mode
));
2671 inv
= inverse (s
, size
);
2672 tmp
= simplify_gen_binary (MULT
, mode
, tmp
, gen_int_mode (inv
, mode
));
2673 desc
->niter_expr
= simplify_gen_binary (AND
, mode
, tmp
, bound
);
2677 if (iv1
.step
== const0_rtx
)
2678 /* Condition in shape a + s * i <= b
2679 We must know that b + s does not overflow and a <= b + s and then we
2680 can compute number of iterations as (b + s - a) / s. (It might
2681 seem that we in fact could be more clever about testing the b + s
2682 overflow condition using some information about b - a mod s,
2683 but it was already taken into account during LE -> NE transform). */
2686 tmp0
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2687 tmp1
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2689 bound
= simplify_gen_binary (MINUS
, mode
, mode_mmax
,
2690 lowpart_subreg (mode
, step
,
2696 /* If s is power of 2, we know that the loop is infinite if
2697 a % s <= b % s and b + s overflows. */
2698 assumption
= simplify_gen_relational (reverse_condition (cond
),
2702 t0
= simplify_gen_binary (UMOD
, mode
, copy_rtx (tmp0
), step
);
2703 t1
= simplify_gen_binary (UMOD
, mode
, copy_rtx (tmp1
), step
);
2704 tmp
= simplify_gen_relational (cond
, SImode
, mode
, t0
, t1
);
2705 assumption
= simplify_gen_binary (AND
, SImode
, assumption
, tmp
);
2707 alloc_EXPR_LIST (0, assumption
, desc
->infinite
);
2711 assumption
= simplify_gen_relational (cond
, SImode
, mode
,
2714 alloc_EXPR_LIST (0, assumption
, desc
->assumptions
);
2717 tmp
= simplify_gen_binary (PLUS
, comp_mode
, iv1
.base
, iv0
.step
);
2718 tmp
= lowpart_subreg (mode
, tmp
, comp_mode
);
2719 assumption
= simplify_gen_relational (reverse_condition (cond
),
2720 SImode
, mode
, tmp0
, tmp
);
2722 delta
= simplify_gen_binary (PLUS
, mode
, tmp1
, step
);
2723 delta
= simplify_gen_binary (MINUS
, mode
, delta
, tmp0
);
2727 /* Condition in shape a <= b - s * i
2728 We must know that a - s does not overflow and a - s <= b and then
2729 we can again compute number of iterations as (b - (a - s)) / s. */
2730 step
= simplify_gen_unary (NEG
, mode
, iv1
.step
, mode
);
2731 tmp0
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2732 tmp1
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2734 bound
= simplify_gen_binary (PLUS
, mode
, mode_mmin
,
2735 lowpart_subreg (mode
, step
, comp_mode
));
2740 /* If s is power of 2, we know that the loop is infinite if
2741 a % s <= b % s and a - s overflows. */
2742 assumption
= simplify_gen_relational (reverse_condition (cond
),
2746 t0
= simplify_gen_binary (UMOD
, mode
, copy_rtx (tmp0
), step
);
2747 t1
= simplify_gen_binary (UMOD
, mode
, copy_rtx (tmp1
), step
);
2748 tmp
= simplify_gen_relational (cond
, SImode
, mode
, t0
, t1
);
2749 assumption
= simplify_gen_binary (AND
, SImode
, assumption
, tmp
);
2751 alloc_EXPR_LIST (0, assumption
, desc
->infinite
);
2755 assumption
= simplify_gen_relational (cond
, SImode
, mode
,
2758 alloc_EXPR_LIST (0, assumption
, desc
->assumptions
);
2761 tmp
= simplify_gen_binary (PLUS
, comp_mode
, iv0
.base
, iv1
.step
);
2762 tmp
= lowpart_subreg (mode
, tmp
, comp_mode
);
2763 assumption
= simplify_gen_relational (reverse_condition (cond
),
2766 delta
= simplify_gen_binary (MINUS
, mode
, tmp0
, step
);
2767 delta
= simplify_gen_binary (MINUS
, mode
, tmp1
, delta
);
2769 if (assumption
== const_true_rtx
)
2770 goto zero_iter_simplify
;
2771 else if (assumption
!= const0_rtx
)
2772 desc
->noloop_assumptions
=
2773 alloc_EXPR_LIST (0, assumption
, desc
->noloop_assumptions
);
2774 delta
= simplify_gen_binary (UDIV
, mode
, delta
, step
);
2775 desc
->niter_expr
= delta
;
2778 old_niter
= desc
->niter_expr
;
2780 simplify_using_initial_values (loop
, AND
, &desc
->assumptions
);
2781 if (desc
->assumptions
2782 && XEXP (desc
->assumptions
, 0) == const0_rtx
)
2784 simplify_using_initial_values (loop
, IOR
, &desc
->noloop_assumptions
);
2785 simplify_using_initial_values (loop
, IOR
, &desc
->infinite
);
2786 simplify_using_initial_values (loop
, UNKNOWN
, &desc
->niter_expr
);
2788 /* Rerun the simplification. Consider code (created by copying loop headers)
2800 The first pass determines that i = 0, the second pass uses it to eliminate
2801 noloop assumption. */
2803 simplify_using_initial_values (loop
, AND
, &desc
->assumptions
);
2804 if (desc
->assumptions
2805 && XEXP (desc
->assumptions
, 0) == const0_rtx
)
2807 simplify_using_initial_values (loop
, IOR
, &desc
->noloop_assumptions
);
2808 simplify_using_initial_values (loop
, IOR
, &desc
->infinite
);
2809 simplify_using_initial_values (loop
, UNKNOWN
, &desc
->niter_expr
);
2811 if (desc
->noloop_assumptions
2812 && XEXP (desc
->noloop_assumptions
, 0) == const_true_rtx
)
2815 if (CONST_INT_P (desc
->niter_expr
))
2817 uint64_t val
= INTVAL (desc
->niter_expr
);
2819 desc
->const_iter
= true;
2820 desc
->niter
= val
& GET_MODE_MASK (desc
->mode
);
2822 && !desc
->assumptions
)
2823 record_niter_bound (loop
, desc
->niter
, false, true);
2827 max
= determine_max_iter (loop
, desc
, old_niter
);
2829 goto zero_iter_simplify
;
2831 && !desc
->assumptions
)
2832 record_niter_bound (loop
, max
, false, true);
2834 /* simplify_using_initial_values does a copy propagation on the registers
2835 in the expression for the number of iterations. This prolongs life
2836 ranges of registers and increases register pressure, and usually
2837 brings no gain (and if it happens to do, the cse pass will take care
2838 of it anyway). So prevent this behavior, unless it enabled us to
2839 derive that the number of iterations is a constant. */
2840 desc
->niter_expr
= old_niter
;
2846 /* Simplify the assumptions. */
2847 simplify_using_initial_values (loop
, AND
, &desc
->assumptions
);
2848 if (desc
->assumptions
2849 && XEXP (desc
->assumptions
, 0) == const0_rtx
)
2851 simplify_using_initial_values (loop
, IOR
, &desc
->infinite
);
2855 desc
->const_iter
= true;
2857 record_niter_bound (loop
, 0, true, true);
2858 desc
->noloop_assumptions
= NULL_RTX
;
2859 desc
->niter_expr
= const0_rtx
;
2863 desc
->simple_p
= false;
2867 /* Checks whether E is a simple exit from LOOP and stores its description
2871 check_simple_exit (class loop
*loop
, edge e
, class niter_desc
*desc
)
2873 basic_block exit_bb
;
2879 desc
->simple_p
= false;
2881 /* It must belong directly to the loop. */
2882 if (exit_bb
->loop_father
!= loop
)
2885 /* It must be tested (at least) once during any iteration. */
2886 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, exit_bb
))
2889 /* It must end in a simple conditional jump. */
2890 if (!any_condjump_p (BB_END (exit_bb
)))
2893 ein
= EDGE_SUCC (exit_bb
, 0);
2895 ein
= EDGE_SUCC (exit_bb
, 1);
2898 desc
->in_edge
= ein
;
2900 /* Test whether the condition is suitable. */
2901 if (!(condition
= get_condition (BB_END (ein
->src
), &at
, false, false)))
2904 if (ein
->flags
& EDGE_FALLTHRU
)
2906 condition
= reversed_condition (condition
);
2911 /* Check that we are able to determine number of iterations and fill
2912 in information about it. */
2913 iv_number_of_iterations (loop
, at
, condition
, desc
);
2916 /* Finds a simple exit of LOOP and stores its description into DESC. */
2919 find_simple_exit (class loop
*loop
, class niter_desc
*desc
)
2924 class niter_desc act
;
2928 desc
->simple_p
= false;
2929 body
= get_loop_body (loop
);
2931 for (i
= 0; i
< loop
->num_nodes
; i
++)
2933 FOR_EACH_EDGE (e
, ei
, body
[i
]->succs
)
2935 if (flow_bb_inside_loop_p (loop
, e
->dest
))
2938 check_simple_exit (loop
, e
, &act
);
2946 /* Prefer constant iterations; the less the better. */
2948 || (desc
->const_iter
&& act
.niter
>= desc
->niter
))
2951 /* Also if the actual exit may be infinite, while the old one
2952 not, prefer the old one. */
2953 if (act
.infinite
&& !desc
->infinite
)
2965 fprintf (dump_file
, "Loop %d is simple:\n", loop
->num
);
2966 fprintf (dump_file
, " simple exit %d -> %d\n",
2967 desc
->out_edge
->src
->index
,
2968 desc
->out_edge
->dest
->index
);
2969 if (desc
->assumptions
)
2971 fprintf (dump_file
, " assumptions: ");
2972 print_rtl (dump_file
, desc
->assumptions
);
2973 fprintf (dump_file
, "\n");
2975 if (desc
->noloop_assumptions
)
2977 fprintf (dump_file
, " does not roll if: ");
2978 print_rtl (dump_file
, desc
->noloop_assumptions
);
2979 fprintf (dump_file
, "\n");
2983 fprintf (dump_file
, " infinite if: ");
2984 print_rtl (dump_file
, desc
->infinite
);
2985 fprintf (dump_file
, "\n");
2988 fprintf (dump_file
, " number of iterations: ");
2989 print_rtl (dump_file
, desc
->niter_expr
);
2990 fprintf (dump_file
, "\n");
2992 fprintf (dump_file
, " upper bound: %li\n",
2993 (long)get_max_loop_iterations_int (loop
));
2994 fprintf (dump_file
, " likely upper bound: %li\n",
2995 (long)get_likely_max_loop_iterations_int (loop
));
2996 fprintf (dump_file
, " realistic bound: %li\n",
2997 (long)get_estimated_loop_iterations_int (loop
));
3000 fprintf (dump_file
, "Loop %d is not simple.\n", loop
->num
);
3003 /* Fix up the finiteness if possible. We can only do it for single exit,
3004 since the loop is finite, but it's possible that we predicate one loop
3005 exit to be finite which can not be determined as finite in middle-end as
3006 well. It results in incorrect predicate information on the exit condition
3007 expression. For example, if says [(int) _1 + -8, + , -8] != 0 finite,
3008 it means _1 can exactly divide -8. */
3009 if (desc
->infinite
&& single_exit (loop
) && finite_loop_p (loop
))
3011 desc
->infinite
= NULL_RTX
;
3013 fprintf (dump_file
, " infinite updated to finite.\n");
3019 /* Creates a simple loop description of LOOP if it was not computed
3023 get_simple_loop_desc (class loop
*loop
)
3025 class niter_desc
*desc
= simple_loop_desc (loop
);
3030 /* At least desc->infinite is not always initialized by
3031 find_simple_loop_exit. */
3032 desc
= ggc_cleared_alloc
<niter_desc
> ();
3033 iv_analysis_loop_init (loop
);
3034 find_simple_exit (loop
, desc
);
3035 loop
->simple_loop_desc
= desc
;
3039 /* Releases simple loop description for LOOP. */
3042 free_simple_loop_desc (class loop
*loop
)
3044 class niter_desc
*desc
= simple_loop_desc (loop
);
3050 loop
->simple_loop_desc
= NULL
;