1 /* Rtl-level induction variable analysis.
2 Copyright (C) 2004-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This is a simple analysis of induction variables of the loop. The major use
21 is for determining the number of iterations of a loop for loop unrolling,
22 doloop optimization and branch prediction. The iv information is computed
25 Induction variables are analyzed by walking the use-def chains. When
26 a basic induction variable (biv) is found, it is cached in the bivs
27 hash table. When register is proved to be a biv, its description
28 is stored to DF_REF_DATA of the def reference.
30 The analysis works always with one loop -- you must call
31 iv_analysis_loop_init (loop) for it. All the other functions then work with
32 this loop. When you need to work with another loop, just call
33 iv_analysis_loop_init for it. When you no longer need iv analysis, call
34 iv_analysis_done () to clean up the memory.
36 The available functions are:
38 iv_analyze (insn, mode, reg, iv): Stores the description of the induction
39 variable corresponding to the use of register REG in INSN to IV, given
40 that REG has mode MODE. Returns true if REG is an induction variable
41 in INSN. false otherwise. If a use of REG is not found in INSN,
42 the following insns are scanned (so that we may call this function
43 on insns returned by get_condition).
44 iv_analyze_result (insn, def, iv): Stores to IV the description of the iv
45 corresponding to DEF, which is a register defined in INSN.
46 iv_analyze_expr (insn, mode, expr, iv): Stores to IV the description of iv
47 corresponding to expression EXPR evaluated at INSN. All registers used by
48 EXPR must also be used in INSN. MODE is the mode of EXPR.
53 #include "coretypes.h"
59 #include "diagnostic-core.h"
64 #include "tree-ssa-loop-niter.h"
66 #include "function-abi.h"
68 /* Possible return values of iv_get_reaching_def. */
72 /* More than one reaching def, or reaching def that does not
76 /* The use is trivial invariant of the loop, i.e. is not changed
80 /* The use is reached by initial value and a value from the
81 previous iteration. */
84 /* The use has single dominating def. */
88 /* Information about a biv. */
93 unsigned regno
; /* The register of the biv. */
94 class rtx_iv iv
; /* Value of the biv. */
97 static bool clean_slate
= true;
99 static unsigned int iv_ref_table_size
= 0;
101 /* Table of rtx_ivs indexed by the df_ref uid field. */
102 static class rtx_iv
** iv_ref_table
;
104 /* Induction variable stored at the reference. */
105 #define DF_REF_IV(REF) iv_ref_table[DF_REF_ID (REF)]
106 #define DF_REF_IV_SET(REF, IV) iv_ref_table[DF_REF_ID (REF)] = (IV)
108 /* The current loop. */
110 static class loop
*current_loop
;
112 /* Hashtable helper. */
114 struct biv_entry_hasher
: free_ptr_hash
<biv_entry
>
116 typedef rtx_def
*compare_type
;
117 static inline hashval_t
hash (const biv_entry
*);
118 static inline bool equal (const biv_entry
*, const rtx_def
*);
121 /* Returns hash value for biv B. */
124 biv_entry_hasher::hash (const biv_entry
*b
)
129 /* Compares biv B and register R. */
132 biv_entry_hasher::equal (const biv_entry
*b
, const rtx_def
*r
)
134 return b
->regno
== REGNO (r
);
137 /* Bivs of the current loop. */
139 static hash_table
<biv_entry_hasher
> *bivs
;
141 static bool iv_analyze_op (rtx_insn
*, scalar_int_mode
, rtx
, class rtx_iv
*);
143 /* Return the RTX code corresponding to the IV extend code EXTEND. */
144 static inline enum rtx_code
145 iv_extend_to_rtx_code (enum iv_extend_code extend
)
153 case IV_UNKNOWN_EXTEND
:
159 /* Dumps information about IV to FILE. */
161 extern void dump_iv_info (FILE *, class rtx_iv
*);
163 dump_iv_info (FILE *file
, class rtx_iv
*iv
)
167 fprintf (file
, "not simple");
171 if (iv
->step
== const0_rtx
172 && !iv
->first_special
)
173 fprintf (file
, "invariant ");
175 print_rtl (file
, iv
->base
);
176 if (iv
->step
!= const0_rtx
)
178 fprintf (file
, " + ");
179 print_rtl (file
, iv
->step
);
180 fprintf (file
, " * iteration");
182 fprintf (file
, " (in %s)", GET_MODE_NAME (iv
->mode
));
184 if (iv
->mode
!= iv
->extend_mode
)
185 fprintf (file
, " %s to %s",
186 rtx_name
[iv_extend_to_rtx_code (iv
->extend
)],
187 GET_MODE_NAME (iv
->extend_mode
));
189 if (iv
->mult
!= const1_rtx
)
191 fprintf (file
, " * ");
192 print_rtl (file
, iv
->mult
);
194 if (iv
->delta
!= const0_rtx
)
196 fprintf (file
, " + ");
197 print_rtl (file
, iv
->delta
);
199 if (iv
->first_special
)
200 fprintf (file
, " (first special)");
204 check_iv_ref_table_size (void)
206 if (iv_ref_table_size
< DF_DEFS_TABLE_SIZE ())
208 unsigned int new_size
= DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
209 iv_ref_table
= XRESIZEVEC (class rtx_iv
*, iv_ref_table
, new_size
);
210 memset (&iv_ref_table
[iv_ref_table_size
], 0,
211 (new_size
- iv_ref_table_size
) * sizeof (class rtx_iv
*));
212 iv_ref_table_size
= new_size
;
217 /* Checks whether REG is a well-behaved register. */
220 simple_reg_p (rtx reg
)
224 if (GET_CODE (reg
) == SUBREG
)
226 if (!subreg_lowpart_p (reg
))
228 reg
= SUBREG_REG (reg
);
235 if (HARD_REGISTER_NUM_P (r
))
238 if (GET_MODE_CLASS (GET_MODE (reg
)) != MODE_INT
)
244 /* Clears the information about ivs stored in df. */
249 unsigned i
, n_defs
= DF_DEFS_TABLE_SIZE ();
252 check_iv_ref_table_size ();
253 for (i
= 0; i
< n_defs
; i
++)
255 iv
= iv_ref_table
[i
];
259 iv_ref_table
[i
] = NULL
;
267 /* Prepare the data for an induction variable analysis of a LOOP. */
270 iv_analysis_loop_init (class loop
*loop
)
274 /* Clear the information from the analysis of the previous loop. */
277 df_set_flags (DF_EQ_NOTES
+ DF_DEFER_INSN_RESCAN
);
278 bivs
= new hash_table
<biv_entry_hasher
> (10);
284 /* Get rid of the ud chains before processing the rescans. Then add
286 df_remove_problem (df_chain
);
287 df_process_deferred_rescans ();
288 df_set_flags (DF_RD_PRUNE_DEAD_DEFS
);
289 df_chain_add_problem (DF_UD_CHAIN
);
290 df_note_add_problem ();
291 df_analyze_loop (loop
);
293 df_dump_region (dump_file
);
295 check_iv_ref_table_size ();
298 /* Finds the definition of REG that dominates loop latch and stores
299 it to DEF. Returns false if there is not a single definition
300 dominating the latch. If REG has no definition in loop, DEF
301 is set to NULL and true is returned. */
304 latch_dominating_def (rtx reg
, df_ref
*def
)
306 df_ref single_rd
= NULL
, adef
;
307 unsigned regno
= REGNO (reg
);
308 class df_rd_bb_info
*bb_info
= DF_RD_BB_INFO (current_loop
->latch
);
310 for (adef
= DF_REG_DEF_CHAIN (regno
); adef
; adef
= DF_REF_NEXT_REG (adef
))
312 if (!bitmap_bit_p (df
->blocks_to_analyze
, DF_REF_BBNO (adef
))
313 || !bitmap_bit_p (&bb_info
->out
, DF_REF_ID (adef
)))
316 /* More than one reaching definition. */
320 if (!just_once_each_iteration_p (current_loop
, DF_REF_BB (adef
)))
330 /* Gets definition of REG reaching its use in INSN and stores it to DEF. */
332 static enum iv_grd_result
333 iv_get_reaching_def (rtx_insn
*insn
, rtx reg
, df_ref
*def
)
336 basic_block def_bb
, use_bb
;
341 if (!simple_reg_p (reg
))
343 if (GET_CODE (reg
) == SUBREG
)
344 reg
= SUBREG_REG (reg
);
345 gcc_assert (REG_P (reg
));
347 use
= df_find_use (insn
, reg
);
348 gcc_assert (use
!= NULL
);
350 if (!DF_REF_CHAIN (use
))
351 return GRD_INVARIANT
;
353 /* More than one reaching def. */
354 if (DF_REF_CHAIN (use
)->next
)
357 adef
= DF_REF_CHAIN (use
)->ref
;
359 /* We do not handle setting only part of the register. */
360 if (DF_REF_FLAGS (adef
) & DF_REF_READ_WRITE
)
363 def_insn
= DF_REF_INSN (adef
);
364 def_bb
= DF_REF_BB (adef
);
365 use_bb
= BLOCK_FOR_INSN (insn
);
367 if (use_bb
== def_bb
)
368 dom_p
= (DF_INSN_LUID (def_insn
) < DF_INSN_LUID (insn
));
370 dom_p
= dominated_by_p (CDI_DOMINATORS
, use_bb
, def_bb
);
375 return GRD_SINGLE_DOM
;
378 /* The definition does not dominate the use. This is still OK if
379 this may be a use of a biv, i.e. if the def_bb dominates loop
381 if (just_once_each_iteration_p (current_loop
, def_bb
))
382 return GRD_MAYBE_BIV
;
387 /* Sets IV to invariant CST in MODE. Always returns true (just for
388 consistency with other iv manipulation functions that may fail). */
391 iv_constant (class rtx_iv
*iv
, scalar_int_mode mode
, rtx cst
)
395 iv
->step
= const0_rtx
;
396 iv
->first_special
= false;
397 iv
->extend
= IV_UNKNOWN_EXTEND
;
398 iv
->extend_mode
= iv
->mode
;
399 iv
->delta
= const0_rtx
;
400 iv
->mult
= const1_rtx
;
405 /* Evaluates application of subreg to MODE on IV. */
408 iv_subreg (class rtx_iv
*iv
, scalar_int_mode mode
)
410 /* If iv is invariant, just calculate the new value. */
411 if (iv
->step
== const0_rtx
412 && !iv
->first_special
)
414 rtx val
= get_iv_value (iv
, const0_rtx
);
415 val
= lowpart_subreg (mode
, val
,
416 iv
->extend
== IV_UNKNOWN_EXTEND
417 ? iv
->mode
: iv
->extend_mode
);
420 iv
->extend
= IV_UNKNOWN_EXTEND
;
421 iv
->mode
= iv
->extend_mode
= mode
;
422 iv
->delta
= const0_rtx
;
423 iv
->mult
= const1_rtx
;
427 if (iv
->extend_mode
== mode
)
430 if (GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (iv
->mode
))
433 iv
->extend
= IV_UNKNOWN_EXTEND
;
436 iv
->base
= simplify_gen_binary (PLUS
, iv
->extend_mode
, iv
->delta
,
437 simplify_gen_binary (MULT
, iv
->extend_mode
,
438 iv
->base
, iv
->mult
));
439 iv
->step
= simplify_gen_binary (MULT
, iv
->extend_mode
, iv
->step
, iv
->mult
);
440 iv
->mult
= const1_rtx
;
441 iv
->delta
= const0_rtx
;
442 iv
->first_special
= false;
447 /* Evaluates application of EXTEND to MODE on IV. */
450 iv_extend (class rtx_iv
*iv
, enum iv_extend_code extend
, scalar_int_mode mode
)
452 /* If iv is invariant, just calculate the new value. */
453 if (iv
->step
== const0_rtx
454 && !iv
->first_special
)
456 rtx val
= get_iv_value (iv
, const0_rtx
);
457 if (iv
->extend_mode
!= iv
->mode
458 && iv
->extend
!= IV_UNKNOWN_EXTEND
459 && iv
->extend
!= extend
)
460 val
= lowpart_subreg (iv
->mode
, val
, iv
->extend_mode
);
461 val
= simplify_gen_unary (iv_extend_to_rtx_code (extend
), mode
,
464 ? iv
->extend_mode
: iv
->mode
);
466 iv
->extend
= IV_UNKNOWN_EXTEND
;
467 iv
->mode
= iv
->extend_mode
= mode
;
468 iv
->delta
= const0_rtx
;
469 iv
->mult
= const1_rtx
;
473 if (mode
!= iv
->extend_mode
)
476 if (iv
->extend
!= IV_UNKNOWN_EXTEND
477 && iv
->extend
!= extend
)
485 /* Evaluates negation of IV. */
488 iv_neg (class rtx_iv
*iv
)
490 if (iv
->extend
== IV_UNKNOWN_EXTEND
)
492 iv
->base
= simplify_gen_unary (NEG
, iv
->extend_mode
,
493 iv
->base
, iv
->extend_mode
);
494 iv
->step
= simplify_gen_unary (NEG
, iv
->extend_mode
,
495 iv
->step
, iv
->extend_mode
);
499 iv
->delta
= simplify_gen_unary (NEG
, iv
->extend_mode
,
500 iv
->delta
, iv
->extend_mode
);
501 iv
->mult
= simplify_gen_unary (NEG
, iv
->extend_mode
,
502 iv
->mult
, iv
->extend_mode
);
508 /* Evaluates addition or subtraction (according to OP) of IV1 to IV0. */
511 iv_add (class rtx_iv
*iv0
, class rtx_iv
*iv1
, enum rtx_code op
)
513 scalar_int_mode mode
;
516 /* Extend the constant to extend_mode of the other operand if necessary. */
517 if (iv0
->extend
== IV_UNKNOWN_EXTEND
518 && iv0
->mode
== iv0
->extend_mode
519 && iv0
->step
== const0_rtx
520 && GET_MODE_SIZE (iv0
->extend_mode
) < GET_MODE_SIZE (iv1
->extend_mode
))
522 iv0
->extend_mode
= iv1
->extend_mode
;
523 iv0
->base
= simplify_gen_unary (ZERO_EXTEND
, iv0
->extend_mode
,
524 iv0
->base
, iv0
->mode
);
526 if (iv1
->extend
== IV_UNKNOWN_EXTEND
527 && iv1
->mode
== iv1
->extend_mode
528 && iv1
->step
== const0_rtx
529 && GET_MODE_SIZE (iv1
->extend_mode
) < GET_MODE_SIZE (iv0
->extend_mode
))
531 iv1
->extend_mode
= iv0
->extend_mode
;
532 iv1
->base
= simplify_gen_unary (ZERO_EXTEND
, iv1
->extend_mode
,
533 iv1
->base
, iv1
->mode
);
536 mode
= iv0
->extend_mode
;
537 if (mode
!= iv1
->extend_mode
)
540 if (iv0
->extend
== IV_UNKNOWN_EXTEND
541 && iv1
->extend
== IV_UNKNOWN_EXTEND
)
543 if (iv0
->mode
!= iv1
->mode
)
546 iv0
->base
= simplify_gen_binary (op
, mode
, iv0
->base
, iv1
->base
);
547 iv0
->step
= simplify_gen_binary (op
, mode
, iv0
->step
, iv1
->step
);
552 /* Handle addition of constant. */
553 if (iv1
->extend
== IV_UNKNOWN_EXTEND
555 && iv1
->step
== const0_rtx
)
557 iv0
->delta
= simplify_gen_binary (op
, mode
, iv0
->delta
, iv1
->base
);
561 if (iv0
->extend
== IV_UNKNOWN_EXTEND
563 && iv0
->step
== const0_rtx
)
571 iv0
->delta
= simplify_gen_binary (PLUS
, mode
, iv0
->delta
, arg
);
578 /* Evaluates multiplication of IV by constant CST. */
581 iv_mult (class rtx_iv
*iv
, rtx mby
)
583 scalar_int_mode mode
= iv
->extend_mode
;
585 if (GET_MODE (mby
) != VOIDmode
586 && GET_MODE (mby
) != mode
)
589 if (iv
->extend
== IV_UNKNOWN_EXTEND
)
591 iv
->base
= simplify_gen_binary (MULT
, mode
, iv
->base
, mby
);
592 iv
->step
= simplify_gen_binary (MULT
, mode
, iv
->step
, mby
);
596 iv
->delta
= simplify_gen_binary (MULT
, mode
, iv
->delta
, mby
);
597 iv
->mult
= simplify_gen_binary (MULT
, mode
, iv
->mult
, mby
);
603 /* Evaluates shift of IV by constant CST. */
606 iv_shift (class rtx_iv
*iv
, rtx mby
)
608 scalar_int_mode mode
= iv
->extend_mode
;
610 if (GET_MODE (mby
) != VOIDmode
611 && GET_MODE (mby
) != mode
)
614 if (iv
->extend
== IV_UNKNOWN_EXTEND
)
616 iv
->base
= simplify_gen_binary (ASHIFT
, mode
, iv
->base
, mby
);
617 iv
->step
= simplify_gen_binary (ASHIFT
, mode
, iv
->step
, mby
);
621 iv
->delta
= simplify_gen_binary (ASHIFT
, mode
, iv
->delta
, mby
);
622 iv
->mult
= simplify_gen_binary (ASHIFT
, mode
, iv
->mult
, mby
);
628 /* The recursive part of get_biv_step. Gets the value of the single value
629 defined by DEF wrto initial value of REG inside loop, in shape described
633 get_biv_step_1 (df_ref def
, scalar_int_mode outer_mode
, rtx reg
,
634 rtx
*inner_step
, scalar_int_mode
*inner_mode
,
635 enum iv_extend_code
*extend
,
638 rtx set
, rhs
, op0
= NULL_RTX
, op1
= NULL_RTX
;
640 enum rtx_code code
, prev_code
= UNKNOWN
;
641 rtx_insn
*insn
= DF_REF_INSN (def
);
643 enum iv_grd_result res
;
645 set
= single_set (insn
);
649 rhs
= find_reg_equal_equiv_note (insn
);
655 code
= GET_CODE (rhs
);
668 if (code
== PLUS
&& CONSTANT_P (op0
))
669 std::swap (op0
, op1
);
671 if (!simple_reg_p (op0
)
672 || !CONSTANT_P (op1
))
675 if (GET_MODE (rhs
) != outer_mode
)
677 /* ppc64 uses expressions like
679 (set x:SI (plus:SI (subreg:SI y:DI) 1)).
681 this is equivalent to
683 (set x':DI (plus:DI y:DI 1))
684 (set x:SI (subreg:SI (x':DI)). */
685 if (GET_CODE (op0
) != SUBREG
)
687 if (GET_MODE (SUBREG_REG (op0
)) != outer_mode
)
696 if (GET_MODE (rhs
) != outer_mode
)
701 /* rv64 wraps SImode arithmetic inside an extension to DImode.
702 This matches the actual hardware semantics. So peek inside
703 the extension and see if we have simple arithmetic that we
705 if (GET_CODE (op0
) == PLUS
)
711 if (CONSTANT_P (op0
))
712 std::swap (op0
, op1
);
714 if (!simple_reg_p (op0
) || !CONSTANT_P (op1
))
721 if (!simple_reg_p (op0
))
731 if (GET_CODE (next
) == SUBREG
)
733 if (!subreg_lowpart_p (next
))
736 nextr
= SUBREG_REG (next
);
737 if (GET_MODE (nextr
) != outer_mode
)
743 res
= iv_get_reaching_def (insn
, nextr
, &next_def
);
745 if (res
== GRD_INVALID
|| res
== GRD_INVARIANT
)
748 if (res
== GRD_MAYBE_BIV
)
750 if (!rtx_equal_p (nextr
, reg
))
753 *inner_step
= const0_rtx
;
754 *extend
= IV_UNKNOWN_EXTEND
;
755 *inner_mode
= outer_mode
;
756 *outer_step
= const0_rtx
;
758 else if (!get_biv_step_1 (next_def
, outer_mode
, reg
,
759 inner_step
, inner_mode
, extend
,
763 if (GET_CODE (next
) == SUBREG
)
765 scalar_int_mode amode
;
766 if (!is_a
<scalar_int_mode
> (GET_MODE (next
), &amode
)
767 || GET_MODE_SIZE (amode
) > GET_MODE_SIZE (*inner_mode
))
771 *inner_step
= simplify_gen_binary (PLUS
, outer_mode
,
772 *inner_step
, *outer_step
);
773 *outer_step
= const0_rtx
;
774 *extend
= IV_UNKNOWN_EXTEND
;
785 if (*inner_mode
== outer_mode
786 /* See comment in previous switch. */
787 || GET_MODE (rhs
) != outer_mode
)
788 *inner_step
= simplify_gen_binary (code
, outer_mode
,
791 *outer_step
= simplify_gen_binary (code
, outer_mode
,
794 if (prev_code
== SIGN_EXTEND
)
795 *extend
= IV_SIGN_EXTEND
;
796 else if (prev_code
== ZERO_EXTEND
)
797 *extend
= IV_ZERO_EXTEND
;
802 gcc_assert (GET_MODE (op0
) == *inner_mode
803 && *extend
== IV_UNKNOWN_EXTEND
804 && *outer_step
== const0_rtx
);
806 *extend
= (code
== SIGN_EXTEND
) ? IV_SIGN_EXTEND
: IV_ZERO_EXTEND
;
816 /* Gets the operation on register REG inside loop, in shape
818 OUTER_STEP + EXTEND_{OUTER_MODE} (SUBREG_{INNER_MODE} (REG + INNER_STEP))
820 If the operation cannot be described in this shape, return false.
821 LAST_DEF is the definition of REG that dominates loop latch. */
824 get_biv_step (df_ref last_def
, scalar_int_mode outer_mode
, rtx reg
,
825 rtx
*inner_step
, scalar_int_mode
*inner_mode
,
826 enum iv_extend_code
*extend
, rtx
*outer_step
)
828 if (!get_biv_step_1 (last_def
, outer_mode
, reg
,
829 inner_step
, inner_mode
, extend
,
833 gcc_assert ((*inner_mode
== outer_mode
) != (*extend
!= IV_UNKNOWN_EXTEND
));
834 gcc_assert (*inner_mode
!= outer_mode
|| *outer_step
== const0_rtx
);
839 /* Records information that DEF is induction variable IV. */
842 record_iv (df_ref def
, class rtx_iv
*iv
)
844 class rtx_iv
*recorded_iv
= XNEW (class rtx_iv
);
847 check_iv_ref_table_size ();
848 DF_REF_IV_SET (def
, recorded_iv
);
851 /* If DEF was already analyzed for bivness, store the description of the biv to
852 IV and return true. Otherwise return false. */
855 analyzed_for_bivness_p (rtx def
, class rtx_iv
*iv
)
857 class biv_entry
*biv
= bivs
->find_with_hash (def
, REGNO (def
));
867 record_biv (rtx def
, class rtx_iv
*iv
)
869 class biv_entry
*biv
= XNEW (class biv_entry
);
870 biv_entry
**slot
= bivs
->find_slot_with_hash (def
, REGNO (def
), INSERT
);
872 biv
->regno
= REGNO (def
);
878 /* Determines whether DEF is a biv and if so, stores its description
879 to *IV. OUTER_MODE is the mode of DEF. */
882 iv_analyze_biv (scalar_int_mode outer_mode
, rtx def
, class rtx_iv
*iv
)
884 rtx inner_step
, outer_step
;
885 scalar_int_mode inner_mode
;
886 enum iv_extend_code extend
;
891 fprintf (dump_file
, "Analyzing ");
892 print_rtl (dump_file
, def
);
893 fprintf (dump_file
, " for bivness.\n");
898 if (!CONSTANT_P (def
))
901 return iv_constant (iv
, outer_mode
, def
);
904 if (!latch_dominating_def (def
, &last_def
))
907 fprintf (dump_file
, " not simple.\n");
912 return iv_constant (iv
, outer_mode
, def
);
914 if (analyzed_for_bivness_p (def
, iv
))
917 fprintf (dump_file
, " already analysed.\n");
918 return iv
->base
!= NULL_RTX
;
921 if (!get_biv_step (last_def
, outer_mode
, def
, &inner_step
, &inner_mode
,
922 &extend
, &outer_step
))
928 /* Loop transforms base to es (base + inner_step) + outer_step,
929 where es means extend of subreg between inner_mode and outer_mode.
930 The corresponding induction variable is
932 es ((base - outer_step) + i * (inner_step + outer_step)) + outer_step */
934 iv
->base
= simplify_gen_binary (MINUS
, outer_mode
, def
, outer_step
);
935 iv
->step
= simplify_gen_binary (PLUS
, outer_mode
, inner_step
, outer_step
);
936 iv
->mode
= inner_mode
;
937 iv
->extend_mode
= outer_mode
;
939 iv
->mult
= const1_rtx
;
940 iv
->delta
= outer_step
;
941 iv
->first_special
= inner_mode
!= outer_mode
;
946 fprintf (dump_file
, " ");
947 dump_iv_info (dump_file
, iv
);
948 fprintf (dump_file
, "\n");
951 record_biv (def
, iv
);
952 return iv
->base
!= NULL_RTX
;
955 /* Analyzes expression RHS used at INSN and stores the result to *IV.
956 The mode of the induction variable is MODE. */
959 iv_analyze_expr (rtx_insn
*insn
, scalar_int_mode mode
, rtx rhs
,
963 rtx op0
= NULL_RTX
, op1
= NULL_RTX
;
964 class rtx_iv iv0
, iv1
;
965 enum rtx_code code
= GET_CODE (rhs
);
966 scalar_int_mode omode
= mode
;
971 gcc_assert (GET_MODE (rhs
) == mode
|| GET_MODE (rhs
) == VOIDmode
);
976 return iv_analyze_op (insn
, mode
, rhs
, iv
);
988 /* We don't know how many bits there are in a sign-extended constant. */
989 if (!is_a
<scalar_int_mode
> (GET_MODE (op0
), &omode
))
1000 op0
= XEXP (rhs
, 0);
1001 mby
= XEXP (rhs
, 1);
1002 if (!CONSTANT_P (mby
))
1003 std::swap (op0
, mby
);
1004 if (!CONSTANT_P (mby
))
1009 op0
= XEXP (rhs
, 0);
1010 mby
= XEXP (rhs
, 1);
1011 if (!CONSTANT_P (mby
))
1020 && !iv_analyze_expr (insn
, omode
, op0
, &iv0
))
1024 && !iv_analyze_expr (insn
, omode
, op1
, &iv1
))
1030 if (!iv_extend (&iv0
, IV_SIGN_EXTEND
, mode
))
1035 if (!iv_extend (&iv0
, IV_ZERO_EXTEND
, mode
))
1046 if (!iv_add (&iv0
, &iv1
, code
))
1051 if (!iv_mult (&iv0
, mby
))
1056 if (!iv_shift (&iv0
, mby
))
1065 return iv
->base
!= NULL_RTX
;
1068 /* Analyzes iv DEF and stores the result to *IV. */
1071 iv_analyze_def (df_ref def
, class rtx_iv
*iv
)
1073 rtx_insn
*insn
= DF_REF_INSN (def
);
1074 rtx reg
= DF_REF_REG (def
);
1079 fprintf (dump_file
, "Analyzing def of ");
1080 print_rtl (dump_file
, reg
);
1081 fprintf (dump_file
, " in insn ");
1082 print_rtl_single (dump_file
, insn
);
1085 check_iv_ref_table_size ();
1086 if (DF_REF_IV (def
))
1089 fprintf (dump_file
, " already analysed.\n");
1090 *iv
= *DF_REF_IV (def
);
1091 return iv
->base
!= NULL_RTX
;
1094 iv
->base
= NULL_RTX
;
1095 iv
->step
= NULL_RTX
;
1097 scalar_int_mode mode
;
1098 if (!REG_P (reg
) || !is_a
<scalar_int_mode
> (GET_MODE (reg
), &mode
))
1101 set
= single_set (insn
);
1105 if (!REG_P (SET_DEST (set
)))
1108 gcc_assert (SET_DEST (set
) == reg
);
1109 rhs
= find_reg_equal_equiv_note (insn
);
1111 rhs
= XEXP (rhs
, 0);
1113 rhs
= SET_SRC (set
);
1115 iv_analyze_expr (insn
, mode
, rhs
, iv
);
1116 record_iv (def
, iv
);
1120 print_rtl (dump_file
, reg
);
1121 fprintf (dump_file
, " in insn ");
1122 print_rtl_single (dump_file
, insn
);
1123 fprintf (dump_file
, " is ");
1124 dump_iv_info (dump_file
, iv
);
1125 fprintf (dump_file
, "\n");
1128 return iv
->base
!= NULL_RTX
;
1131 /* Analyzes operand OP of INSN and stores the result to *IV. MODE is the
1135 iv_analyze_op (rtx_insn
*insn
, scalar_int_mode mode
, rtx op
, class rtx_iv
*iv
)
1138 enum iv_grd_result res
;
1142 fprintf (dump_file
, "Analyzing operand ");
1143 print_rtl (dump_file
, op
);
1144 fprintf (dump_file
, " of insn ");
1145 print_rtl_single (dump_file
, insn
);
1148 if (function_invariant_p (op
))
1149 res
= GRD_INVARIANT
;
1150 else if (GET_CODE (op
) == SUBREG
)
1152 scalar_int_mode inner_mode
;
1153 if (!subreg_lowpart_p (op
)
1154 || !is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op
)), &inner_mode
))
1157 if (!iv_analyze_op (insn
, inner_mode
, SUBREG_REG (op
), iv
))
1160 return iv_subreg (iv
, mode
);
1164 res
= iv_get_reaching_def (insn
, op
, &def
);
1165 if (res
== GRD_INVALID
)
1168 fprintf (dump_file
, " not simple.\n");
1173 if (res
== GRD_INVARIANT
)
1175 iv_constant (iv
, mode
, op
);
1179 fprintf (dump_file
, " ");
1180 dump_iv_info (dump_file
, iv
);
1181 fprintf (dump_file
, "\n");
1186 if (res
== GRD_MAYBE_BIV
)
1187 return iv_analyze_biv (mode
, op
, iv
);
1189 return iv_analyze_def (def
, iv
);
1192 /* Analyzes value VAL at INSN and stores the result to *IV. MODE is the
1196 iv_analyze (rtx_insn
*insn
, scalar_int_mode mode
, rtx val
, class rtx_iv
*iv
)
1200 /* We must find the insn in that val is used, so that we get to UD chains.
1201 Since the function is sometimes called on result of get_condition,
1202 this does not necessarily have to be directly INSN; scan also the
1204 if (simple_reg_p (val
))
1206 if (GET_CODE (val
) == SUBREG
)
1207 reg
= SUBREG_REG (val
);
1211 while (!df_find_use (insn
, reg
))
1212 insn
= NEXT_INSN (insn
);
1215 return iv_analyze_op (insn
, mode
, val
, iv
);
1218 /* Analyzes definition of DEF in INSN and stores the result to IV. */
1221 iv_analyze_result (rtx_insn
*insn
, rtx def
, class rtx_iv
*iv
)
1225 adef
= df_find_def (insn
, def
);
1229 return iv_analyze_def (adef
, iv
);
1232 /* Checks whether definition of register REG in INSN is a basic induction
1233 variable. MODE is the mode of REG.
1235 IV analysis must have been initialized (via a call to
1236 iv_analysis_loop_init) for this function to produce a result. */
1239 biv_p (rtx_insn
*insn
, scalar_int_mode mode
, rtx reg
)
1242 df_ref def
, last_def
;
1244 if (!simple_reg_p (reg
))
1247 def
= df_find_def (insn
, reg
);
1248 gcc_assert (def
!= NULL
);
1249 if (!latch_dominating_def (reg
, &last_def
))
1251 if (last_def
!= def
)
1254 if (!iv_analyze_biv (mode
, reg
, &iv
))
1257 return iv
.step
!= const0_rtx
;
1260 /* Calculates value of IV at ITERATION-th iteration. */
1263 get_iv_value (class rtx_iv
*iv
, rtx iteration
)
1267 /* We would need to generate some if_then_else patterns, and so far
1268 it is not needed anywhere. */
1269 gcc_assert (!iv
->first_special
);
1271 if (iv
->step
!= const0_rtx
&& iteration
!= const0_rtx
)
1272 val
= simplify_gen_binary (PLUS
, iv
->extend_mode
, iv
->base
,
1273 simplify_gen_binary (MULT
, iv
->extend_mode
,
1274 iv
->step
, iteration
));
1278 if (iv
->extend_mode
== iv
->mode
)
1281 val
= lowpart_subreg (iv
->mode
, val
, iv
->extend_mode
);
1283 if (iv
->extend
== IV_UNKNOWN_EXTEND
)
1286 val
= simplify_gen_unary (iv_extend_to_rtx_code (iv
->extend
),
1287 iv
->extend_mode
, val
, iv
->mode
);
1288 val
= simplify_gen_binary (PLUS
, iv
->extend_mode
, iv
->delta
,
1289 simplify_gen_binary (MULT
, iv
->extend_mode
,
1295 /* Free the data for an induction variable analysis. */
1298 iv_analysis_done (void)
1304 df_finish_pass (true);
1307 free (iv_ref_table
);
1308 iv_ref_table
= NULL
;
1309 iv_ref_table_size
= 0;
1313 /* Computes inverse to X modulo (1 << MOD). */
1316 inverse (uint64_t x
, int mod
)
1319 ((uint64_t) 1 << (mod
- 1) << 1) - 1;
1323 for (i
= 0; i
< mod
- 1; i
++)
1325 rslt
= (rslt
* x
) & mask
;
1332 /* Checks whether any register in X is in set ALT. */
1335 altered_reg_used (const_rtx x
, bitmap alt
)
1337 subrtx_iterator::array_type array
;
1338 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1340 const_rtx x
= *iter
;
1341 if (REG_P (x
) && REGNO_REG_SET_P (alt
, REGNO (x
)))
1347 /* Marks registers altered by EXPR in set ALT. */
1350 mark_altered (rtx expr
, const_rtx by ATTRIBUTE_UNUSED
, void *alt
)
1352 if (GET_CODE (expr
) == SUBREG
)
1353 expr
= SUBREG_REG (expr
);
1357 SET_REGNO_REG_SET ((bitmap
) alt
, REGNO (expr
));
1360 /* Checks whether RHS is simple enough to process. */
1363 simple_rhs_p (rtx rhs
)
1367 if (function_invariant_p (rhs
)
1368 || (REG_P (rhs
) && !HARD_REGISTER_P (rhs
)))
1371 switch (GET_CODE (rhs
))
1376 op0
= XEXP (rhs
, 0);
1377 op1
= XEXP (rhs
, 1);
1378 /* Allow reg OP const and reg OP reg. */
1379 if (!(REG_P (op0
) && !HARD_REGISTER_P (op0
))
1380 && !function_invariant_p (op0
))
1382 if (!(REG_P (op1
) && !HARD_REGISTER_P (op1
))
1383 && !function_invariant_p (op1
))
1392 op0
= XEXP (rhs
, 0);
1393 op1
= XEXP (rhs
, 1);
1394 /* Allow reg OP const. */
1395 if (!(REG_P (op0
) && !HARD_REGISTER_P (op0
)))
1397 if (!function_invariant_p (op1
))
1407 /* If any registers in *EXPR that have a single definition, try to replace
1408 them with the known-equivalent values. */
1411 replace_single_def_regs (rtx
*expr
)
1413 subrtx_var_iterator::array_type array
;
1415 FOR_EACH_SUBRTX_VAR (iter
, array
, *expr
, NONCONST
)
1419 if (rtx new_x
= df_find_single_def_src (x
))
1421 *expr
= simplify_replace_rtx (*expr
, x
, new_x
);
1427 /* A subroutine of simplify_using_initial_values, this function examines INSN
1428 to see if it contains a suitable set that we can use to make a replacement.
1429 If it is suitable, return true and set DEST and SRC to the lhs and rhs of
1430 the set; return false otherwise. */
1433 suitable_set_for_replacement (rtx_insn
*insn
, rtx
*dest
, rtx
*src
)
1435 rtx set
= single_set (insn
);
1436 rtx lhs
= NULL_RTX
, rhs
;
1441 lhs
= SET_DEST (set
);
1445 rhs
= find_reg_equal_equiv_note (insn
);
1447 rhs
= XEXP (rhs
, 0);
1449 rhs
= SET_SRC (set
);
1451 if (!simple_rhs_p (rhs
))
1459 /* Using the data returned by suitable_set_for_replacement, replace DEST
1460 with SRC in *EXPR and return the new expression. Also call
1461 replace_single_def_regs if the replacement changed something. */
1463 replace_in_expr (rtx
*expr
, rtx dest
, rtx src
)
1466 *expr
= simplify_replace_rtx (*expr
, dest
, src
);
1469 replace_single_def_regs (expr
);
1472 /* Checks whether A implies B. */
1475 implies_p (rtx a
, rtx b
)
1477 rtx op0
, op1
, opb0
, opb1
;
1480 if (rtx_equal_p (a
, b
))
1483 if (GET_CODE (a
) == EQ
)
1489 || (GET_CODE (op0
) == SUBREG
1490 && REG_P (SUBREG_REG (op0
))))
1492 rtx r
= simplify_replace_rtx (b
, op0
, op1
);
1493 if (r
== const_true_rtx
)
1498 || (GET_CODE (op1
) == SUBREG
1499 && REG_P (SUBREG_REG (op1
))))
1501 rtx r
= simplify_replace_rtx (b
, op1
, op0
);
1502 if (r
== const_true_rtx
)
1507 if (b
== const_true_rtx
)
1510 if ((GET_RTX_CLASS (GET_CODE (a
)) != RTX_COMM_COMPARE
1511 && GET_RTX_CLASS (GET_CODE (a
)) != RTX_COMPARE
)
1512 || (GET_RTX_CLASS (GET_CODE (b
)) != RTX_COMM_COMPARE
1513 && GET_RTX_CLASS (GET_CODE (b
)) != RTX_COMPARE
))
1521 mode
= GET_MODE (op0
);
1522 if (mode
!= GET_MODE (opb0
))
1524 else if (mode
== VOIDmode
)
1526 mode
= GET_MODE (op1
);
1527 if (mode
!= GET_MODE (opb1
))
1531 /* A < B implies A + 1 <= B. */
1532 if ((GET_CODE (a
) == GT
|| GET_CODE (a
) == LT
)
1533 && (GET_CODE (b
) == GE
|| GET_CODE (b
) == LE
))
1536 if (GET_CODE (a
) == GT
)
1537 std::swap (op0
, op1
);
1539 if (GET_CODE (b
) == GE
)
1540 std::swap (opb0
, opb1
);
1542 if (SCALAR_INT_MODE_P (mode
)
1543 && rtx_equal_p (op1
, opb1
)
1544 && simplify_gen_binary (MINUS
, mode
, opb0
, op0
) == const1_rtx
)
1549 /* A < B or A > B imply A != B. TODO: Likewise
1550 A + n < B implies A != B + n if neither wraps. */
1551 if (GET_CODE (b
) == NE
1552 && (GET_CODE (a
) == GT
|| GET_CODE (a
) == GTU
1553 || GET_CODE (a
) == LT
|| GET_CODE (a
) == LTU
))
1555 if (rtx_equal_p (op0
, opb0
)
1556 && rtx_equal_p (op1
, opb1
))
1560 /* For unsigned comparisons, A != 0 implies A > 0 and A >= 1. */
1561 if (GET_CODE (a
) == NE
1562 && op1
== const0_rtx
)
1564 if ((GET_CODE (b
) == GTU
1565 && opb1
== const0_rtx
)
1566 || (GET_CODE (b
) == GEU
1567 && opb1
== const1_rtx
))
1568 return rtx_equal_p (op0
, opb0
);
1571 /* A != N is equivalent to A - (N + 1) <u -1. */
1572 if (GET_CODE (a
) == NE
1573 && CONST_INT_P (op1
)
1574 && GET_CODE (b
) == LTU
1575 && opb1
== constm1_rtx
1576 && GET_CODE (opb0
) == PLUS
1577 && CONST_INT_P (XEXP (opb0
, 1))
1578 /* Avoid overflows. */
1579 && ((unsigned HOST_WIDE_INT
) INTVAL (XEXP (opb0
, 1))
1580 != ((unsigned HOST_WIDE_INT
)1
1581 << (HOST_BITS_PER_WIDE_INT
- 1)) - 1)
1582 && INTVAL (XEXP (opb0
, 1)) + 1 == -INTVAL (op1
))
1583 return rtx_equal_p (op0
, XEXP (opb0
, 0));
1585 /* Likewise, A != N implies A - N > 0. */
1586 if (GET_CODE (a
) == NE
1587 && CONST_INT_P (op1
))
1589 if (GET_CODE (b
) == GTU
1590 && GET_CODE (opb0
) == PLUS
1591 && opb1
== const0_rtx
1592 && CONST_INT_P (XEXP (opb0
, 1))
1593 /* Avoid overflows. */
1594 && ((unsigned HOST_WIDE_INT
) INTVAL (XEXP (opb0
, 1))
1595 != (HOST_WIDE_INT_1U
<< (HOST_BITS_PER_WIDE_INT
- 1)))
1596 && rtx_equal_p (XEXP (opb0
, 0), op0
))
1597 return INTVAL (op1
) == -INTVAL (XEXP (opb0
, 1));
1598 if (GET_CODE (b
) == GEU
1599 && GET_CODE (opb0
) == PLUS
1600 && opb1
== const1_rtx
1601 && CONST_INT_P (XEXP (opb0
, 1))
1602 /* Avoid overflows. */
1603 && ((unsigned HOST_WIDE_INT
) INTVAL (XEXP (opb0
, 1))
1604 != (HOST_WIDE_INT_1U
<< (HOST_BITS_PER_WIDE_INT
- 1)))
1605 && rtx_equal_p (XEXP (opb0
, 0), op0
))
1606 return INTVAL (op1
) == -INTVAL (XEXP (opb0
, 1));
1609 /* A >s X, where X is positive, implies A <u Y, if Y is negative. */
1610 if ((GET_CODE (a
) == GT
|| GET_CODE (a
) == GE
)
1611 && CONST_INT_P (op1
)
1612 && ((GET_CODE (a
) == GT
&& op1
== constm1_rtx
)
1613 || INTVAL (op1
) >= 0)
1614 && GET_CODE (b
) == LTU
1615 && CONST_INT_P (opb1
)
1616 && rtx_equal_p (op0
, opb0
))
1617 return INTVAL (opb1
) < 0;
1622 /* Canonicalizes COND so that
1624 (1) Ensure that operands are ordered according to
1625 swap_commutative_operands_p.
1626 (2) (LE x const) will be replaced with (LT x <const+1>) and similarly
1627 for GE, GEU, and LEU. */
1630 canon_condition (rtx cond
)
1636 code
= GET_CODE (cond
);
1637 op0
= XEXP (cond
, 0);
1638 op1
= XEXP (cond
, 1);
1640 if (swap_commutative_operands_p (op0
, op1
))
1642 code
= swap_condition (code
);
1643 std::swap (op0
, op1
);
1646 mode
= GET_MODE (op0
);
1647 if (mode
== VOIDmode
)
1648 mode
= GET_MODE (op1
);
1649 gcc_assert (mode
!= VOIDmode
);
1651 if (CONST_SCALAR_INT_P (op1
) && GET_MODE_CLASS (mode
) != MODE_CC
)
1653 rtx_mode_t
const_val (op1
, mode
);
1658 if (wi::ne_p (const_val
, wi::max_value (mode
, SIGNED
)))
1661 op1
= immed_wide_int_const (wi::add (const_val
, 1), mode
);
1666 if (wi::ne_p (const_val
, wi::min_value (mode
, SIGNED
)))
1669 op1
= immed_wide_int_const (wi::sub (const_val
, 1), mode
);
1674 if (wi::ne_p (const_val
, -1))
1677 op1
= immed_wide_int_const (wi::add (const_val
, 1), mode
);
1682 if (wi::ne_p (const_val
, 0))
1685 op1
= immed_wide_int_const (wi::sub (const_val
, 1), mode
);
1694 if (op0
!= XEXP (cond
, 0)
1695 || op1
!= XEXP (cond
, 1)
1696 || code
!= GET_CODE (cond
)
1697 || GET_MODE (cond
) != SImode
)
1698 cond
= gen_rtx_fmt_ee (code
, SImode
, op0
, op1
);
1703 /* Reverses CONDition; returns NULL if we cannot. */
1706 reversed_condition (rtx cond
)
1708 enum rtx_code reversed
;
1709 reversed
= reversed_comparison_code (cond
, NULL
);
1710 if (reversed
== UNKNOWN
)
1713 return gen_rtx_fmt_ee (reversed
,
1714 GET_MODE (cond
), XEXP (cond
, 0),
1718 /* Tries to use the fact that COND holds to simplify EXPR. ALTERED is the
1719 set of altered regs. */
1722 simplify_using_condition (rtx cond
, rtx
*expr
, regset altered
)
1724 rtx rev
, reve
, exp
= *expr
;
1726 /* If some register gets altered later, we do not really speak about its
1727 value at the time of comparison. */
1728 if (altered
&& altered_reg_used (cond
, altered
))
1731 if (GET_CODE (cond
) == EQ
1732 && REG_P (XEXP (cond
, 0)) && CONSTANT_P (XEXP (cond
, 1)))
1734 *expr
= simplify_replace_rtx (*expr
, XEXP (cond
, 0), XEXP (cond
, 1));
1738 if (!COMPARISON_P (exp
))
1741 rev
= reversed_condition (cond
);
1742 reve
= reversed_condition (exp
);
1744 cond
= canon_condition (cond
);
1745 exp
= canon_condition (exp
);
1747 rev
= canon_condition (rev
);
1749 reve
= canon_condition (reve
);
1751 if (rtx_equal_p (exp
, cond
))
1753 *expr
= const_true_rtx
;
1757 if (rev
&& rtx_equal_p (exp
, rev
))
1763 if (implies_p (cond
, exp
))
1765 *expr
= const_true_rtx
;
1769 if (reve
&& implies_p (cond
, reve
))
1775 /* A proof by contradiction. If *EXPR implies (not cond), *EXPR must
1777 if (rev
&& implies_p (exp
, rev
))
1783 /* Similarly, If (not *EXPR) implies (not cond), *EXPR must be true. */
1784 if (rev
&& reve
&& implies_p (reve
, rev
))
1786 *expr
= const_true_rtx
;
1790 /* We would like to have some other tests here. TODO. */
1795 /* Use relationship between A and *B to eventually eliminate *B.
1796 OP is the operation we consider. */
1799 eliminate_implied_condition (enum rtx_code op
, rtx a
, rtx
*b
)
1804 /* If A implies *B, we may replace *B by true. */
1805 if (implies_p (a
, *b
))
1806 *b
= const_true_rtx
;
1810 /* If *B implies A, we may replace *B by false. */
1811 if (implies_p (*b
, a
))
1820 /* Eliminates the conditions in TAIL that are implied by HEAD. OP is the
1821 operation we consider. */
1824 eliminate_implied_conditions (enum rtx_code op
, rtx
*head
, rtx tail
)
1828 for (elt
= tail
; elt
; elt
= XEXP (elt
, 1))
1829 eliminate_implied_condition (op
, *head
, &XEXP (elt
, 0));
1830 for (elt
= tail
; elt
; elt
= XEXP (elt
, 1))
1831 eliminate_implied_condition (op
, XEXP (elt
, 0), head
);
1834 /* Simplifies *EXPR using initial values at the start of the LOOP. If *EXPR
1835 is a list, its elements are assumed to be combined using OP. */
1838 simplify_using_initial_values (class loop
*loop
, enum rtx_code op
, rtx
*expr
)
1840 bool expression_valid
;
1841 rtx head
, tail
, last_valid_expr
;
1842 rtx_expr_list
*cond_list
;
1845 regset altered
, this_altered
;
1851 if (CONSTANT_P (*expr
))
1854 if (GET_CODE (*expr
) == EXPR_LIST
)
1856 head
= XEXP (*expr
, 0);
1857 tail
= XEXP (*expr
, 1);
1859 eliminate_implied_conditions (op
, &head
, tail
);
1864 neutral
= const_true_rtx
;
1869 neutral
= const0_rtx
;
1870 aggr
= const_true_rtx
;
1877 simplify_using_initial_values (loop
, UNKNOWN
, &head
);
1880 XEXP (*expr
, 0) = aggr
;
1881 XEXP (*expr
, 1) = NULL_RTX
;
1884 else if (head
== neutral
)
1887 simplify_using_initial_values (loop
, op
, expr
);
1890 simplify_using_initial_values (loop
, op
, &tail
);
1892 if (tail
&& XEXP (tail
, 0) == aggr
)
1898 XEXP (*expr
, 0) = head
;
1899 XEXP (*expr
, 1) = tail
;
1903 gcc_assert (op
== UNKNOWN
);
1905 replace_single_def_regs (expr
);
1906 if (CONSTANT_P (*expr
))
1909 e
= loop_preheader_edge (loop
);
1910 if (e
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
1913 altered
= ALLOC_REG_SET (®_obstack
);
1914 this_altered
= ALLOC_REG_SET (®_obstack
);
1916 expression_valid
= true;
1917 last_valid_expr
= *expr
;
1921 insn
= BB_END (e
->src
);
1922 if (any_condjump_p (insn
) && onlyjump_p (insn
))
1924 rtx cond
= get_condition (BB_END (e
->src
), NULL
, false, true);
1926 if (cond
&& (e
->flags
& EDGE_FALLTHRU
))
1927 cond
= reversed_condition (cond
);
1931 simplify_using_condition (cond
, expr
, altered
);
1935 if (CONSTANT_P (*expr
))
1937 for (note
= cond_list
; note
; note
= XEXP (note
, 1))
1939 simplify_using_condition (XEXP (note
, 0), expr
, altered
);
1940 if (CONSTANT_P (*expr
))
1944 cond_list
= alloc_EXPR_LIST (0, cond
, cond_list
);
1948 FOR_BB_INSNS_REVERSE (e
->src
, insn
)
1956 CLEAR_REG_SET (this_altered
);
1957 note_stores (insn
, mark_altered
, this_altered
);
1960 /* Kill all registers that might be clobbered by the call.
1961 We don't track modes of hard registers, so we need to be
1962 conservative and assume that partial kills are full kills. */
1963 function_abi callee_abi
= insn_callee_abi (insn
);
1964 IOR_REG_SET_HRS (this_altered
,
1965 callee_abi
.full_and_partial_reg_clobbers ());
1968 if (suitable_set_for_replacement (insn
, &dest
, &src
))
1970 rtx_expr_list
**pnote
, **pnote_next
;
1972 replace_in_expr (expr
, dest
, src
);
1973 if (CONSTANT_P (*expr
))
1976 for (pnote
= &cond_list
; *pnote
; pnote
= pnote_next
)
1978 rtx_expr_list
*note
= *pnote
;
1979 rtx old_cond
= XEXP (note
, 0);
1981 pnote_next
= (rtx_expr_list
**)&XEXP (note
, 1);
1982 replace_in_expr (&XEXP (note
, 0), dest
, src
);
1984 /* We can no longer use a condition that has been simplified
1985 to a constant, and simplify_using_condition will abort if
1987 if (CONSTANT_P (XEXP (note
, 0)))
1989 *pnote
= *pnote_next
;
1991 free_EXPR_LIST_node (note
);
1993 /* Retry simplifications with this condition if either the
1994 expression or the condition changed. */
1995 else if (old_cond
!= XEXP (note
, 0) || old
!= *expr
)
1996 simplify_using_condition (XEXP (note
, 0), expr
, altered
);
2001 rtx_expr_list
**pnote
, **pnote_next
;
2003 /* If we did not use this insn to make a replacement, any overlap
2004 between stores in this insn and our expression will cause the
2005 expression to become invalid. */
2006 if (altered_reg_used (*expr
, this_altered
))
2009 /* Likewise for the conditions. */
2010 for (pnote
= &cond_list
; *pnote
; pnote
= pnote_next
)
2012 rtx_expr_list
*note
= *pnote
;
2013 rtx old_cond
= XEXP (note
, 0);
2015 pnote_next
= (rtx_expr_list
**)&XEXP (note
, 1);
2016 if (altered_reg_used (old_cond
, this_altered
))
2018 *pnote
= *pnote_next
;
2020 free_EXPR_LIST_node (note
);
2025 if (CONSTANT_P (*expr
))
2028 IOR_REG_SET (altered
, this_altered
);
2030 /* If the expression now contains regs that have been altered, we
2031 can't return it to the caller. However, it is still valid for
2032 further simplification, so keep searching to see if we can
2033 eventually turn it into a constant. */
2034 if (altered_reg_used (*expr
, altered
))
2035 expression_valid
= false;
2036 if (expression_valid
)
2037 last_valid_expr
= *expr
;
2040 if (!single_pred_p (e
->src
)
2041 || single_pred (e
->src
) == ENTRY_BLOCK_PTR_FOR_FN (cfun
))
2043 e
= single_pred_edge (e
->src
);
2047 free_EXPR_LIST_list (&cond_list
);
2048 if (!CONSTANT_P (*expr
))
2049 *expr
= last_valid_expr
;
2050 FREE_REG_SET (altered
);
2051 FREE_REG_SET (this_altered
);
2054 /* Transforms invariant IV into MODE. Adds assumptions based on the fact
2055 that IV occurs as left operands of comparison COND and its signedness
2056 is SIGNED_P to DESC. */
2059 shorten_into_mode (class rtx_iv
*iv
, scalar_int_mode mode
,
2060 enum rtx_code cond
, bool signed_p
, class niter_desc
*desc
)
2062 rtx mmin
, mmax
, cond_over
, cond_under
;
2064 get_mode_bounds (mode
, signed_p
, iv
->extend_mode
, &mmin
, &mmax
);
2065 cond_under
= simplify_gen_relational (LT
, SImode
, iv
->extend_mode
,
2067 cond_over
= simplify_gen_relational (GT
, SImode
, iv
->extend_mode
,
2076 if (cond_under
!= const0_rtx
)
2078 alloc_EXPR_LIST (0, cond_under
, desc
->infinite
);
2079 if (cond_over
!= const0_rtx
)
2080 desc
->noloop_assumptions
=
2081 alloc_EXPR_LIST (0, cond_over
, desc
->noloop_assumptions
);
2088 if (cond_over
!= const0_rtx
)
2090 alloc_EXPR_LIST (0, cond_over
, desc
->infinite
);
2091 if (cond_under
!= const0_rtx
)
2092 desc
->noloop_assumptions
=
2093 alloc_EXPR_LIST (0, cond_under
, desc
->noloop_assumptions
);
2097 if (cond_over
!= const0_rtx
)
2099 alloc_EXPR_LIST (0, cond_over
, desc
->infinite
);
2100 if (cond_under
!= const0_rtx
)
2102 alloc_EXPR_LIST (0, cond_under
, desc
->infinite
);
2110 iv
->extend
= signed_p
? IV_SIGN_EXTEND
: IV_ZERO_EXTEND
;
2113 /* Transforms IV0 and IV1 compared by COND so that they are both compared as
2114 subregs of the same mode if possible (sometimes it is necessary to add
2115 some assumptions to DESC). */
2118 canonicalize_iv_subregs (class rtx_iv
*iv0
, class rtx_iv
*iv1
,
2119 enum rtx_code cond
, class niter_desc
*desc
)
2121 scalar_int_mode comp_mode
;
2124 /* If the ivs behave specially in the first iteration, or are
2125 added/multiplied after extending, we ignore them. */
2126 if (iv0
->first_special
|| iv0
->mult
!= const1_rtx
|| iv0
->delta
!= const0_rtx
)
2128 if (iv1
->first_special
|| iv1
->mult
!= const1_rtx
|| iv1
->delta
!= const0_rtx
)
2131 /* If there is some extend, it must match signedness of the comparison. */
2136 if (iv0
->extend
== IV_ZERO_EXTEND
2137 || iv1
->extend
== IV_ZERO_EXTEND
)
2144 if (iv0
->extend
== IV_SIGN_EXTEND
2145 || iv1
->extend
== IV_SIGN_EXTEND
)
2151 if (iv0
->extend
!= IV_UNKNOWN_EXTEND
2152 && iv1
->extend
!= IV_UNKNOWN_EXTEND
2153 && iv0
->extend
!= iv1
->extend
)
2157 if (iv0
->extend
!= IV_UNKNOWN_EXTEND
)
2158 signed_p
= iv0
->extend
== IV_SIGN_EXTEND
;
2159 if (iv1
->extend
!= IV_UNKNOWN_EXTEND
)
2160 signed_p
= iv1
->extend
== IV_SIGN_EXTEND
;
2167 /* Values of both variables should be computed in the same mode. These
2168 might indeed be different, if we have comparison like
2170 (compare (subreg:SI (iv0)) (subreg:SI (iv1)))
2172 and iv0 and iv1 are both ivs iterating in SI mode, but calculated
2173 in different modes. This does not seem impossible to handle, but
2174 it hardly ever occurs in practice.
2176 The only exception is the case when one of operands is invariant.
2177 For example pentium 3 generates comparisons like
2178 (lt (subreg:HI (reg:SI)) 100). Here we assign HImode to 100, but we
2179 definitely do not want this prevent the optimization. */
2180 comp_mode
= iv0
->extend_mode
;
2181 if (GET_MODE_BITSIZE (comp_mode
) < GET_MODE_BITSIZE (iv1
->extend_mode
))
2182 comp_mode
= iv1
->extend_mode
;
2184 if (iv0
->extend_mode
!= comp_mode
)
2186 if (iv0
->mode
!= iv0
->extend_mode
2187 || iv0
->step
!= const0_rtx
)
2190 iv0
->base
= simplify_gen_unary (signed_p
? SIGN_EXTEND
: ZERO_EXTEND
,
2191 comp_mode
, iv0
->base
, iv0
->mode
);
2192 iv0
->extend_mode
= comp_mode
;
2195 if (iv1
->extend_mode
!= comp_mode
)
2197 if (iv1
->mode
!= iv1
->extend_mode
2198 || iv1
->step
!= const0_rtx
)
2201 iv1
->base
= simplify_gen_unary (signed_p
? SIGN_EXTEND
: ZERO_EXTEND
,
2202 comp_mode
, iv1
->base
, iv1
->mode
);
2203 iv1
->extend_mode
= comp_mode
;
2206 /* Check that both ivs belong to a range of a single mode. If one of the
2207 operands is an invariant, we may need to shorten it into the common
2209 if (iv0
->mode
== iv0
->extend_mode
2210 && iv0
->step
== const0_rtx
2211 && iv0
->mode
!= iv1
->mode
)
2212 shorten_into_mode (iv0
, iv1
->mode
, cond
, signed_p
, desc
);
2214 if (iv1
->mode
== iv1
->extend_mode
2215 && iv1
->step
== const0_rtx
2216 && iv0
->mode
!= iv1
->mode
)
2217 shorten_into_mode (iv1
, iv0
->mode
, swap_condition (cond
), signed_p
, desc
);
2219 if (iv0
->mode
!= iv1
->mode
)
2222 desc
->mode
= iv0
->mode
;
2223 desc
->signed_p
= signed_p
;
2228 /* Tries to estimate the maximum number of iterations in LOOP, and return the
2229 result. This function is called from iv_number_of_iterations with
2230 a number of fields in DESC already filled in. OLD_NITER is the original
2231 expression for the number of iterations, before we tried to simplify it. */
2234 determine_max_iter (class loop
*loop
, class niter_desc
*desc
, rtx old_niter
)
2236 rtx niter
= desc
->niter_expr
;
2237 rtx mmin
, mmax
, cmp
;
2239 uint64_t andmax
= 0;
2241 /* We used to look for constant operand 0 of AND,
2242 but canonicalization should always make this impossible. */
2243 gcc_checking_assert (GET_CODE (niter
) != AND
2244 || !CONST_INT_P (XEXP (niter
, 0)));
2246 if (GET_CODE (niter
) == AND
2247 && CONST_INT_P (XEXP (niter
, 1)))
2249 andmax
= UINTVAL (XEXP (niter
, 1));
2250 niter
= XEXP (niter
, 0);
2253 get_mode_bounds (desc
->mode
, desc
->signed_p
, desc
->mode
, &mmin
, &mmax
);
2254 nmax
= UINTVAL (mmax
) - UINTVAL (mmin
);
2256 if (GET_CODE (niter
) == UDIV
)
2258 if (!CONST_INT_P (XEXP (niter
, 1)))
2260 inc
= INTVAL (XEXP (niter
, 1));
2261 niter
= XEXP (niter
, 0);
2266 /* We could use a binary search here, but for now improving the upper
2267 bound by just one eliminates one important corner case. */
2268 cmp
= simplify_gen_relational (desc
->signed_p
? LT
: LTU
, VOIDmode
,
2269 desc
->mode
, old_niter
, mmax
);
2270 simplify_using_initial_values (loop
, UNKNOWN
, &cmp
);
2271 if (cmp
== const_true_rtx
)
2276 fprintf (dump_file
, ";; improved upper bound by one.\n");
2280 nmax
= MIN (nmax
, andmax
);
2282 fprintf (dump_file
, ";; Determined upper bound %" PRId64
".\n",
2287 /* Computes number of iterations of the CONDITION in INSN in LOOP and stores
2288 the result into DESC. Very similar to determine_number_of_iterations
2289 (basically its rtl version), complicated by things like subregs. */
2292 iv_number_of_iterations (class loop
*loop
, rtx_insn
*insn
, rtx condition
,
2293 class niter_desc
*desc
)
2295 rtx op0
, op1
, delta
, step
, bound
, may_xform
, tmp
, tmp0
, tmp1
;
2296 class rtx_iv iv0
, iv1
;
2297 rtx assumption
, may_not_xform
;
2299 machine_mode nonvoid_mode
;
2300 scalar_int_mode comp_mode
;
2301 rtx mmin
, mmax
, mode_mmin
, mode_mmax
;
2302 uint64_t s
, size
, d
, inv
, max
, up
, down
;
2303 int64_t inc
, step_val
;
2304 int was_sharp
= false;
2308 /* The meaning of these assumptions is this:
2310 then the rest of information does not have to be valid
2311 if noloop_assumptions then the loop does not roll
2312 if infinite then this exit is never used */
2314 desc
->assumptions
= NULL_RTX
;
2315 desc
->noloop_assumptions
= NULL_RTX
;
2316 desc
->infinite
= NULL_RTX
;
2317 desc
->simple_p
= true;
2319 desc
->const_iter
= false;
2320 desc
->niter_expr
= NULL_RTX
;
2322 cond
= GET_CODE (condition
);
2323 gcc_assert (COMPARISON_P (condition
));
2325 nonvoid_mode
= GET_MODE (XEXP (condition
, 0));
2326 if (nonvoid_mode
== VOIDmode
)
2327 nonvoid_mode
= GET_MODE (XEXP (condition
, 1));
2328 /* The constant comparisons should be folded. */
2329 gcc_assert (nonvoid_mode
!= VOIDmode
);
2331 /* We only handle integers or pointers. */
2332 scalar_int_mode mode
;
2333 if (!is_a
<scalar_int_mode
> (nonvoid_mode
, &mode
))
2336 op0
= XEXP (condition
, 0);
2337 if (!iv_analyze (insn
, mode
, op0
, &iv0
))
2340 op1
= XEXP (condition
, 1);
2341 if (!iv_analyze (insn
, mode
, op1
, &iv1
))
2344 if (GET_MODE_BITSIZE (iv0
.extend_mode
) > HOST_BITS_PER_WIDE_INT
2345 || GET_MODE_BITSIZE (iv1
.extend_mode
) > HOST_BITS_PER_WIDE_INT
)
2348 /* Check condition and normalize it. */
2356 std::swap (iv0
, iv1
);
2357 cond
= swap_condition (cond
);
2369 /* Handle extends. This is relatively nontrivial, so we only try in some
2370 easy cases, when we can canonicalize the ivs (possibly by adding some
2371 assumptions) to shape subreg (base + i * step). This function also fills
2372 in desc->mode and desc->signed_p. */
2374 if (!canonicalize_iv_subregs (&iv0
, &iv1
, cond
, desc
))
2377 comp_mode
= iv0
.extend_mode
;
2379 size
= GET_MODE_PRECISION (mode
);
2380 get_mode_bounds (mode
, (cond
== LE
|| cond
== LT
), comp_mode
, &mmin
, &mmax
);
2381 mode_mmin
= lowpart_subreg (mode
, mmin
, comp_mode
);
2382 mode_mmax
= lowpart_subreg (mode
, mmax
, comp_mode
);
2384 if (!CONST_INT_P (iv0
.step
) || !CONST_INT_P (iv1
.step
))
2387 /* We can take care of the case of two induction variables chasing each other
2388 if the test is NE. I have never seen a loop using it, but still it is
2390 if (iv0
.step
!= const0_rtx
&& iv1
.step
!= const0_rtx
)
2395 iv0
.step
= simplify_gen_binary (MINUS
, comp_mode
, iv0
.step
, iv1
.step
);
2396 iv1
.step
= const0_rtx
;
2399 iv0
.step
= lowpart_subreg (mode
, iv0
.step
, comp_mode
);
2400 iv1
.step
= lowpart_subreg (mode
, iv1
.step
, comp_mode
);
2402 /* This is either infinite loop or the one that ends immediately, depending
2403 on initial values. Unswitching should remove this kind of conditions. */
2404 if (iv0
.step
== const0_rtx
&& iv1
.step
== const0_rtx
)
2409 if (iv0
.step
== const0_rtx
)
2410 step_val
= -INTVAL (iv1
.step
);
2412 step_val
= INTVAL (iv0
.step
);
2414 /* Ignore loops of while (i-- < 10) type. */
2418 step_is_pow2
= !(step_val
& (step_val
- 1));
2422 /* We do not care about whether the step is power of two in this
2424 step_is_pow2
= false;
2428 /* Some more condition normalization. We must record some assumptions
2429 due to overflows. */
2434 /* We want to take care only of non-sharp relationals; this is easy,
2435 as in cases the overflow would make the transformation unsafe
2436 the loop does not roll. Seemingly it would make more sense to want
2437 to take care of sharp relationals instead, as NE is more similar to
2438 them, but the problem is that here the transformation would be more
2439 difficult due to possibly infinite loops. */
2440 if (iv0
.step
== const0_rtx
)
2442 tmp
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2443 assumption
= simplify_gen_relational (EQ
, SImode
, mode
, tmp
,
2445 if (assumption
== const_true_rtx
)
2446 goto zero_iter_simplify
;
2447 iv0
.base
= simplify_gen_binary (PLUS
, comp_mode
,
2448 iv0
.base
, const1_rtx
);
2452 tmp
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2453 assumption
= simplify_gen_relational (EQ
, SImode
, mode
, tmp
,
2455 if (assumption
== const_true_rtx
)
2456 goto zero_iter_simplify
;
2457 iv1
.base
= simplify_gen_binary (PLUS
, comp_mode
,
2458 iv1
.base
, constm1_rtx
);
2461 if (assumption
!= const0_rtx
)
2462 desc
->noloop_assumptions
=
2463 alloc_EXPR_LIST (0, assumption
, desc
->noloop_assumptions
);
2464 cond
= (cond
== LT
) ? LE
: LEU
;
2466 /* It will be useful to be able to tell the difference once more in
2467 LE -> NE reduction. */
2473 /* Take care of trivially infinite loops. */
2476 if (iv0
.step
== const0_rtx
)
2478 tmp
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2479 if (rtx_equal_p (tmp
, mode_mmin
))
2482 alloc_EXPR_LIST (0, const_true_rtx
, NULL_RTX
);
2483 /* Fill in the remaining fields somehow. */
2484 goto zero_iter_simplify
;
2489 tmp
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2490 if (rtx_equal_p (tmp
, mode_mmax
))
2493 alloc_EXPR_LIST (0, const_true_rtx
, NULL_RTX
);
2494 /* Fill in the remaining fields somehow. */
2495 goto zero_iter_simplify
;
2500 /* If we can we want to take care of NE conditions instead of size
2501 comparisons, as they are much more friendly (most importantly
2502 this takes care of special handling of loops with step 1). We can
2503 do it if we first check that upper bound is greater or equal to
2504 lower bound, their difference is constant c modulo step and that
2505 there is not an overflow. */
2508 if (iv0
.step
== const0_rtx
)
2509 step
= simplify_gen_unary (NEG
, comp_mode
, iv1
.step
, comp_mode
);
2512 step
= lowpart_subreg (mode
, step
, comp_mode
);
2513 delta
= simplify_gen_binary (MINUS
, comp_mode
, iv1
.base
, iv0
.base
);
2514 delta
= lowpart_subreg (mode
, delta
, comp_mode
);
2515 delta
= simplify_gen_binary (UMOD
, mode
, delta
, step
);
2516 may_xform
= const0_rtx
;
2517 may_not_xform
= const_true_rtx
;
2519 if (CONST_INT_P (delta
))
2521 if (was_sharp
&& INTVAL (delta
) == INTVAL (step
) - 1)
2523 /* A special case. We have transformed condition of type
2524 for (i = 0; i < 4; i += 4)
2526 for (i = 0; i <= 3; i += 4)
2527 obviously if the test for overflow during that transformation
2528 passed, we cannot overflow here. Most importantly any
2529 loop with sharp end condition and step 1 falls into this
2530 category, so handling this case specially is definitely
2531 worth the troubles. */
2532 may_xform
= const_true_rtx
;
2534 else if (iv0
.step
== const0_rtx
)
2536 bound
= simplify_gen_binary (PLUS
, comp_mode
, mmin
, step
);
2537 bound
= simplify_gen_binary (MINUS
, comp_mode
, bound
, delta
);
2538 bound
= lowpart_subreg (mode
, bound
, comp_mode
);
2539 tmp
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2540 may_xform
= simplify_gen_relational (cond
, SImode
, mode
,
2542 may_not_xform
= simplify_gen_relational (reverse_condition (cond
),
2548 bound
= simplify_gen_binary (MINUS
, comp_mode
, mmax
, step
);
2549 bound
= simplify_gen_binary (PLUS
, comp_mode
, bound
, delta
);
2550 bound
= lowpart_subreg (mode
, bound
, comp_mode
);
2551 tmp
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2552 may_xform
= simplify_gen_relational (cond
, SImode
, mode
,
2554 may_not_xform
= simplify_gen_relational (reverse_condition (cond
),
2560 if (may_xform
!= const0_rtx
)
2562 /* We perform the transformation always provided that it is not
2563 completely senseless. This is OK, as we would need this assumption
2564 to determine the number of iterations anyway. */
2565 if (may_xform
!= const_true_rtx
)
2567 /* If the step is a power of two and the final value we have
2568 computed overflows, the cycle is infinite. Otherwise it
2569 is nontrivial to compute the number of iterations. */
2571 desc
->infinite
= alloc_EXPR_LIST (0, may_not_xform
,
2574 desc
->assumptions
= alloc_EXPR_LIST (0, may_xform
,
2578 /* We are going to lose some information about upper bound on
2579 number of iterations in this step, so record the information
2581 inc
= INTVAL (iv0
.step
) - INTVAL (iv1
.step
);
2582 if (CONST_INT_P (iv1
.base
))
2583 up
= INTVAL (iv1
.base
);
2585 up
= INTVAL (mode_mmax
) - inc
;
2586 down
= INTVAL (CONST_INT_P (iv0
.base
)
2589 max
= (up
- down
) / inc
+ 1;
2591 && !desc
->assumptions
)
2592 record_niter_bound (loop
, max
, false, true);
2594 if (iv0
.step
== const0_rtx
)
2596 iv0
.base
= simplify_gen_binary (PLUS
, comp_mode
, iv0
.base
, delta
);
2597 iv0
.base
= simplify_gen_binary (MINUS
, comp_mode
, iv0
.base
, step
);
2601 iv1
.base
= simplify_gen_binary (MINUS
, comp_mode
, iv1
.base
, delta
);
2602 iv1
.base
= simplify_gen_binary (PLUS
, comp_mode
, iv1
.base
, step
);
2605 tmp0
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2606 tmp1
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2607 assumption
= simplify_gen_relational (reverse_condition (cond
),
2608 SImode
, mode
, tmp0
, tmp1
);
2609 if (assumption
== const_true_rtx
)
2610 goto zero_iter_simplify
;
2611 else if (assumption
!= const0_rtx
)
2612 desc
->noloop_assumptions
=
2613 alloc_EXPR_LIST (0, assumption
, desc
->noloop_assumptions
);
2618 /* Count the number of iterations. */
2621 /* Everything we do here is just arithmetics modulo size of mode. This
2622 makes us able to do more involved computations of number of iterations
2623 than in other cases. First transform the condition into shape
2624 s * i <> c, with s positive. */
2625 iv1
.base
= simplify_gen_binary (MINUS
, comp_mode
, iv1
.base
, iv0
.base
);
2626 iv0
.base
= const0_rtx
;
2627 iv0
.step
= simplify_gen_binary (MINUS
, comp_mode
, iv0
.step
, iv1
.step
);
2628 iv1
.step
= const0_rtx
;
2629 if (INTVAL (iv0
.step
) < 0)
2631 iv0
.step
= simplify_gen_unary (NEG
, comp_mode
, iv0
.step
, comp_mode
);
2632 iv1
.base
= simplify_gen_unary (NEG
, comp_mode
, iv1
.base
, comp_mode
);
2634 iv0
.step
= lowpart_subreg (mode
, iv0
.step
, comp_mode
);
2636 /* Let nsd (s, size of mode) = d. If d does not divide c, the loop
2637 is infinite. Otherwise, the number of iterations is
2638 (inverse(s/d) * (c/d)) mod (size of mode/d). */
2639 s
= INTVAL (iv0
.step
); d
= 1;
2646 bound
= gen_int_mode (((uint64_t) 1 << (size
- 1) << 1) - 1, mode
);
2648 tmp1
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2649 tmp
= simplify_gen_binary (UMOD
, mode
, tmp1
, gen_int_mode (d
, mode
));
2650 assumption
= simplify_gen_relational (NE
, SImode
, mode
, tmp
, const0_rtx
);
2651 desc
->infinite
= alloc_EXPR_LIST (0, assumption
, desc
->infinite
);
2653 tmp
= simplify_gen_binary (UDIV
, mode
, tmp1
, gen_int_mode (d
, mode
));
2654 inv
= inverse (s
, size
);
2655 tmp
= simplify_gen_binary (MULT
, mode
, tmp
, gen_int_mode (inv
, mode
));
2656 desc
->niter_expr
= simplify_gen_binary (AND
, mode
, tmp
, bound
);
2660 if (iv1
.step
== const0_rtx
)
2661 /* Condition in shape a + s * i <= b
2662 We must know that b + s does not overflow and a <= b + s and then we
2663 can compute number of iterations as (b + s - a) / s. (It might
2664 seem that we in fact could be more clever about testing the b + s
2665 overflow condition using some information about b - a mod s,
2666 but it was already taken into account during LE -> NE transform). */
2669 tmp0
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2670 tmp1
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2672 bound
= simplify_gen_binary (MINUS
, mode
, mode_mmax
,
2673 lowpart_subreg (mode
, step
,
2679 /* If s is power of 2, we know that the loop is infinite if
2680 a % s <= b % s and b + s overflows. */
2681 assumption
= simplify_gen_relational (reverse_condition (cond
),
2685 t0
= simplify_gen_binary (UMOD
, mode
, copy_rtx (tmp0
), step
);
2686 t1
= simplify_gen_binary (UMOD
, mode
, copy_rtx (tmp1
), step
);
2687 tmp
= simplify_gen_relational (cond
, SImode
, mode
, t0
, t1
);
2688 assumption
= simplify_gen_binary (AND
, SImode
, assumption
, tmp
);
2690 alloc_EXPR_LIST (0, assumption
, desc
->infinite
);
2694 assumption
= simplify_gen_relational (cond
, SImode
, mode
,
2697 alloc_EXPR_LIST (0, assumption
, desc
->assumptions
);
2700 tmp
= simplify_gen_binary (PLUS
, comp_mode
, iv1
.base
, iv0
.step
);
2701 tmp
= lowpart_subreg (mode
, tmp
, comp_mode
);
2702 assumption
= simplify_gen_relational (reverse_condition (cond
),
2703 SImode
, mode
, tmp0
, tmp
);
2705 delta
= simplify_gen_binary (PLUS
, mode
, tmp1
, step
);
2706 delta
= simplify_gen_binary (MINUS
, mode
, delta
, tmp0
);
2710 /* Condition in shape a <= b - s * i
2711 We must know that a - s does not overflow and a - s <= b and then
2712 we can again compute number of iterations as (b - (a - s)) / s. */
2713 step
= simplify_gen_unary (NEG
, mode
, iv1
.step
, mode
);
2714 tmp0
= lowpart_subreg (mode
, iv0
.base
, comp_mode
);
2715 tmp1
= lowpart_subreg (mode
, iv1
.base
, comp_mode
);
2717 bound
= simplify_gen_binary (PLUS
, mode
, mode_mmin
,
2718 lowpart_subreg (mode
, step
, comp_mode
));
2723 /* If s is power of 2, we know that the loop is infinite if
2724 a % s <= b % s and a - s overflows. */
2725 assumption
= simplify_gen_relational (reverse_condition (cond
),
2729 t0
= simplify_gen_binary (UMOD
, mode
, copy_rtx (tmp0
), step
);
2730 t1
= simplify_gen_binary (UMOD
, mode
, copy_rtx (tmp1
), step
);
2731 tmp
= simplify_gen_relational (cond
, SImode
, mode
, t0
, t1
);
2732 assumption
= simplify_gen_binary (AND
, SImode
, assumption
, tmp
);
2734 alloc_EXPR_LIST (0, assumption
, desc
->infinite
);
2738 assumption
= simplify_gen_relational (cond
, SImode
, mode
,
2741 alloc_EXPR_LIST (0, assumption
, desc
->assumptions
);
2744 tmp
= simplify_gen_binary (PLUS
, comp_mode
, iv0
.base
, iv1
.step
);
2745 tmp
= lowpart_subreg (mode
, tmp
, comp_mode
);
2746 assumption
= simplify_gen_relational (reverse_condition (cond
),
2749 delta
= simplify_gen_binary (MINUS
, mode
, tmp0
, step
);
2750 delta
= simplify_gen_binary (MINUS
, mode
, tmp1
, delta
);
2752 if (assumption
== const_true_rtx
)
2753 goto zero_iter_simplify
;
2754 else if (assumption
!= const0_rtx
)
2755 desc
->noloop_assumptions
=
2756 alloc_EXPR_LIST (0, assumption
, desc
->noloop_assumptions
);
2757 delta
= simplify_gen_binary (UDIV
, mode
, delta
, step
);
2758 desc
->niter_expr
= delta
;
2761 old_niter
= desc
->niter_expr
;
2763 simplify_using_initial_values (loop
, AND
, &desc
->assumptions
);
2764 if (desc
->assumptions
2765 && XEXP (desc
->assumptions
, 0) == const0_rtx
)
2767 simplify_using_initial_values (loop
, IOR
, &desc
->noloop_assumptions
);
2768 simplify_using_initial_values (loop
, IOR
, &desc
->infinite
);
2769 simplify_using_initial_values (loop
, UNKNOWN
, &desc
->niter_expr
);
2771 /* Rerun the simplification. Consider code (created by copying loop headers)
2783 The first pass determines that i = 0, the second pass uses it to eliminate
2784 noloop assumption. */
2786 simplify_using_initial_values (loop
, AND
, &desc
->assumptions
);
2787 if (desc
->assumptions
2788 && XEXP (desc
->assumptions
, 0) == const0_rtx
)
2790 simplify_using_initial_values (loop
, IOR
, &desc
->noloop_assumptions
);
2791 simplify_using_initial_values (loop
, IOR
, &desc
->infinite
);
2792 simplify_using_initial_values (loop
, UNKNOWN
, &desc
->niter_expr
);
2794 if (desc
->noloop_assumptions
2795 && XEXP (desc
->noloop_assumptions
, 0) == const_true_rtx
)
2798 if (CONST_INT_P (desc
->niter_expr
))
2800 uint64_t val
= INTVAL (desc
->niter_expr
);
2802 desc
->const_iter
= true;
2803 desc
->niter
= val
& GET_MODE_MASK (desc
->mode
);
2805 && !desc
->assumptions
)
2806 record_niter_bound (loop
, desc
->niter
, false, true);
2810 max
= determine_max_iter (loop
, desc
, old_niter
);
2812 goto zero_iter_simplify
;
2814 && !desc
->assumptions
)
2815 record_niter_bound (loop
, max
, false, true);
2817 /* simplify_using_initial_values does a copy propagation on the registers
2818 in the expression for the number of iterations. This prolongs life
2819 ranges of registers and increases register pressure, and usually
2820 brings no gain (and if it happens to do, the cse pass will take care
2821 of it anyway). So prevent this behavior, unless it enabled us to
2822 derive that the number of iterations is a constant. */
2823 desc
->niter_expr
= old_niter
;
2829 /* Simplify the assumptions. */
2830 simplify_using_initial_values (loop
, AND
, &desc
->assumptions
);
2831 if (desc
->assumptions
2832 && XEXP (desc
->assumptions
, 0) == const0_rtx
)
2834 simplify_using_initial_values (loop
, IOR
, &desc
->infinite
);
2838 desc
->const_iter
= true;
2840 record_niter_bound (loop
, 0, true, true);
2841 desc
->noloop_assumptions
= NULL_RTX
;
2842 desc
->niter_expr
= const0_rtx
;
2846 desc
->simple_p
= false;
2850 /* Checks whether E is a simple exit from LOOP and stores its description
2854 check_simple_exit (class loop
*loop
, edge e
, class niter_desc
*desc
)
2856 basic_block exit_bb
;
2862 desc
->simple_p
= false;
2864 /* It must belong directly to the loop. */
2865 if (exit_bb
->loop_father
!= loop
)
2868 /* It must be tested (at least) once during any iteration. */
2869 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, exit_bb
))
2872 /* It must end in a simple conditional jump. */
2873 if (!any_condjump_p (BB_END (exit_bb
)) || !onlyjump_p (BB_END (exit_bb
)))
2876 ein
= EDGE_SUCC (exit_bb
, 0);
2878 ein
= EDGE_SUCC (exit_bb
, 1);
2881 desc
->in_edge
= ein
;
2883 /* Test whether the condition is suitable. */
2884 if (!(condition
= get_condition (BB_END (ein
->src
), &at
, false, false)))
2887 if (ein
->flags
& EDGE_FALLTHRU
)
2889 condition
= reversed_condition (condition
);
2894 /* Check that we are able to determine number of iterations and fill
2895 in information about it. */
2896 iv_number_of_iterations (loop
, at
, condition
, desc
);
2899 /* Finds a simple exit of LOOP and stores its description into DESC. */
2902 find_simple_exit (class loop
*loop
, class niter_desc
*desc
)
2907 class niter_desc act
;
2911 desc
->simple_p
= false;
2912 body
= get_loop_body (loop
);
2914 for (i
= 0; i
< loop
->num_nodes
; i
++)
2916 FOR_EACH_EDGE (e
, ei
, body
[i
]->succs
)
2918 if (flow_bb_inside_loop_p (loop
, e
->dest
))
2921 check_simple_exit (loop
, e
, &act
);
2929 /* Prefer constant iterations; the less the better. */
2931 || (desc
->const_iter
&& act
.niter
>= desc
->niter
))
2934 /* Also if the actual exit may be infinite, while the old one
2935 not, prefer the old one. */
2936 if (act
.infinite
&& !desc
->infinite
)
2948 fprintf (dump_file
, "Loop %d is simple:\n", loop
->num
);
2949 fprintf (dump_file
, " simple exit %d -> %d\n",
2950 desc
->out_edge
->src
->index
,
2951 desc
->out_edge
->dest
->index
);
2952 if (desc
->assumptions
)
2954 fprintf (dump_file
, " assumptions: ");
2955 print_rtl (dump_file
, desc
->assumptions
);
2956 fprintf (dump_file
, "\n");
2958 if (desc
->noloop_assumptions
)
2960 fprintf (dump_file
, " does not roll if: ");
2961 print_rtl (dump_file
, desc
->noloop_assumptions
);
2962 fprintf (dump_file
, "\n");
2966 fprintf (dump_file
, " infinite if: ");
2967 print_rtl (dump_file
, desc
->infinite
);
2968 fprintf (dump_file
, "\n");
2971 fprintf (dump_file
, " number of iterations: ");
2972 print_rtl (dump_file
, desc
->niter_expr
);
2973 fprintf (dump_file
, "\n");
2975 fprintf (dump_file
, " upper bound: %li\n",
2976 (long)get_max_loop_iterations_int (loop
));
2977 fprintf (dump_file
, " likely upper bound: %li\n",
2978 (long)get_likely_max_loop_iterations_int (loop
));
2979 fprintf (dump_file
, " realistic bound: %li\n",
2980 (long)get_estimated_loop_iterations_int (loop
));
2983 fprintf (dump_file
, "Loop %d is not simple.\n", loop
->num
);
2986 /* Fix up the finiteness if possible. We can only do it for single exit,
2987 since the loop is finite, but it's possible that we predicate one loop
2988 exit to be finite which can not be determined as finite in middle-end as
2989 well. It results in incorrect predicate information on the exit condition
2990 expression. For example, if says [(int) _1 + -8, + , -8] != 0 finite,
2991 it means _1 can exactly divide -8. */
2992 if (desc
->infinite
&& single_exit (loop
) && finite_loop_p (loop
))
2994 desc
->infinite
= NULL_RTX
;
2996 fprintf (dump_file
, " infinite updated to finite.\n");
3002 /* Creates a simple loop description of LOOP if it was not computed
3006 get_simple_loop_desc (class loop
*loop
)
3008 class niter_desc
*desc
= simple_loop_desc (loop
);
3013 /* At least desc->infinite is not always initialized by
3014 find_simple_loop_exit. */
3015 desc
= ggc_cleared_alloc
<niter_desc
> ();
3016 iv_analysis_loop_init (loop
);
3017 find_simple_exit (loop
, desc
);
3018 loop
->simple_loop_desc
= desc
;
3022 /* Releases simple loop description for LOOP. */
3025 free_simple_loop_desc (class loop
*loop
)
3027 class niter_desc
*desc
= simple_loop_desc (loop
);
3033 loop
->simple_loop_desc
= NULL
;