1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
56 static bool associative_constant_p (rtx
);
57 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 /* Negate a CONST_INT rtx, truncating (because a conversion from a
61 maximally negative number can overflow). */
63 neg_const_int (enum machine_mode mode
, rtx i
)
65 return gen_int_mode (- INTVAL (i
), mode
);
69 /* Make a binary operation by properly ordering the operands and
70 seeing if the expression folds. */
73 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
78 /* Put complex operands first and constants second if commutative. */
79 if (GET_RTX_CLASS (code
) == 'c'
80 && swap_commutative_operands_p (op0
, op1
))
81 tem
= op0
, op0
= op1
, op1
= tem
;
83 /* If this simplifies, do it. */
84 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
88 /* Handle addition and subtraction specially. Otherwise, just form
91 if (code
== PLUS
|| code
== MINUS
)
93 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
98 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
101 /* If X is a MEM referencing the constant pool, return the real value.
102 Otherwise return X. */
104 avoid_constant_pool_reference (rtx x
)
107 enum machine_mode cmode
;
109 switch (GET_CODE (x
))
115 /* Handle float extensions of constant pool references. */
117 c
= avoid_constant_pool_reference (tmp
);
118 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
122 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
123 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
133 /* Call target hook to avoid the effects of -fpic etc.... */
134 addr
= (*targetm
.delegitimize_address
) (addr
);
136 if (GET_CODE (addr
) == LO_SUM
)
137 addr
= XEXP (addr
, 1);
139 if (GET_CODE (addr
) != SYMBOL_REF
140 || ! CONSTANT_POOL_ADDRESS_P (addr
))
143 c
= get_pool_constant (addr
);
144 cmode
= get_pool_mode (addr
);
146 /* If we're accessing the constant in a different mode than it was
147 originally stored, attempt to fix that up via subreg simplifications.
148 If that fails we have no choice but to return the original memory. */
149 if (cmode
!= GET_MODE (x
))
151 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
158 /* Make a unary operation by first seeing if it folds and otherwise making
159 the specified operation. */
162 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
163 enum machine_mode op_mode
)
167 /* If this simplifies, use it. */
168 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
171 return gen_rtx_fmt_e (code
, mode
, op
);
174 /* Likewise for ternary operations. */
177 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
178 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
182 /* If this simplifies, use it. */
183 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
187 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
190 /* Likewise, for relational operations.
191 CMP_MODE specifies mode comparison is done in.
195 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
196 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
200 if (cmp_mode
== VOIDmode
)
201 cmp_mode
= GET_MODE (op0
);
202 if (cmp_mode
== VOIDmode
)
203 cmp_mode
= GET_MODE (op1
);
205 if (cmp_mode
!= VOIDmode
)
207 tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
);
211 #ifdef FLOAT_STORE_FLAG_VALUE
212 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
215 if (tem
== const0_rtx
)
216 return CONST0_RTX (mode
);
217 if (tem
!= const_true_rtx
)
219 val
= FLOAT_STORE_FLAG_VALUE (mode
);
220 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
227 /* For the following tests, ensure const0_rtx is op1. */
228 if (swap_commutative_operands_p (op0
, op1
)
229 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
230 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
232 /* If op0 is a compare, extract the comparison arguments from it. */
233 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
234 return simplify_gen_relational (code
, mode
, VOIDmode
,
235 XEXP (op0
, 0), XEXP (op0
, 1));
237 /* If op0 is a comparison, extract the comparison arguments form it. */
238 if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && op1
== const0_rtx
)
242 if (GET_MODE (op0
) == mode
)
244 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
245 XEXP (op0
, 0), XEXP (op0
, 1));
249 enum rtx_code
new = reversed_comparison_code (op0
, NULL_RTX
);
251 return simplify_gen_relational (new, mode
, VOIDmode
,
252 XEXP (op0
, 0), XEXP (op0
, 1));
256 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
259 /* Replace all occurrences of OLD in X with NEW and try to simplify the
260 resulting RTX. Return a new RTX which is as simplified as possible. */
263 simplify_replace_rtx (rtx x
, rtx old
, rtx
new)
265 enum rtx_code code
= GET_CODE (x
);
266 enum machine_mode mode
= GET_MODE (x
);
268 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
269 to build a new expression substituting recursively. If we can't do
270 anything, return our input. */
275 switch (GET_RTX_CLASS (code
))
279 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
280 rtx op
= (XEXP (x
, 0) == old
281 ? new : simplify_replace_rtx (XEXP (x
, 0), old
, new));
283 return simplify_gen_unary (code
, mode
, op
, op_mode
);
289 simplify_gen_binary (code
, mode
,
290 simplify_replace_rtx (XEXP (x
, 0), old
, new),
291 simplify_replace_rtx (XEXP (x
, 1), old
, new));
294 enum machine_mode op_mode
= (GET_MODE (XEXP (x
, 0)) != VOIDmode
295 ? GET_MODE (XEXP (x
, 0))
296 : GET_MODE (XEXP (x
, 1)));
297 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
298 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
299 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
305 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
306 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
309 simplify_gen_ternary (code
, mode
,
314 simplify_replace_rtx (XEXP (x
, 1), old
, new),
315 simplify_replace_rtx (XEXP (x
, 2), old
, new));
319 /* The only case we try to handle is a SUBREG. */
323 exp
= simplify_gen_subreg (GET_MODE (x
),
324 simplify_replace_rtx (SUBREG_REG (x
),
326 GET_MODE (SUBREG_REG (x
)),
335 return replace_equiv_address_nv (x
,
336 simplify_replace_rtx (XEXP (x
, 0),
338 else if (code
== LO_SUM
)
340 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
341 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
343 /* (lo_sum (high x) x) -> x */
344 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
347 return gen_rtx_LO_SUM (mode
, op0
, op1
);
349 else if (code
== REG
)
351 if (REG_P (old
) && REGNO (x
) == REGNO (old
))
363 /* Try to simplify a unary operation CODE whose output mode is to be
364 MODE with input operand OP whose mode was originally OP_MODE.
365 Return zero if no simplification can be made. */
367 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
368 rtx op
, enum machine_mode op_mode
)
370 unsigned int width
= GET_MODE_BITSIZE (mode
);
371 rtx trueop
= avoid_constant_pool_reference (op
);
373 if (code
== VEC_DUPLICATE
)
375 if (!VECTOR_MODE_P (mode
))
377 if (GET_MODE (trueop
) != VOIDmode
378 && !VECTOR_MODE_P (GET_MODE (trueop
))
379 && GET_MODE_INNER (mode
) != GET_MODE (trueop
))
381 if (GET_MODE (trueop
) != VOIDmode
382 && VECTOR_MODE_P (GET_MODE (trueop
))
383 && GET_MODE_INNER (mode
) != GET_MODE_INNER (GET_MODE (trueop
)))
385 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
386 || GET_CODE (trueop
) == CONST_VECTOR
)
388 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
389 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
390 rtvec v
= rtvec_alloc (n_elts
);
393 if (GET_CODE (trueop
) != CONST_VECTOR
)
394 for (i
= 0; i
< n_elts
; i
++)
395 RTVEC_ELT (v
, i
) = trueop
;
398 enum machine_mode inmode
= GET_MODE (trueop
);
399 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
400 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
402 if (in_n_elts
>= n_elts
|| n_elts
% in_n_elts
)
404 for (i
= 0; i
< n_elts
; i
++)
405 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
407 return gen_rtx_CONST_VECTOR (mode
, v
);
411 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
413 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
414 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
415 enum machine_mode opmode
= GET_MODE (trueop
);
416 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
417 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
418 rtvec v
= rtvec_alloc (n_elts
);
421 if (op_n_elts
!= n_elts
)
424 for (i
= 0; i
< n_elts
; i
++)
426 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
427 CONST_VECTOR_ELT (trueop
, i
),
428 GET_MODE_INNER (opmode
));
431 RTVEC_ELT (v
, i
) = x
;
433 return gen_rtx_CONST_VECTOR (mode
, v
);
436 /* The order of these tests is critical so that, for example, we don't
437 check the wrong mode (input vs. output) for a conversion operation,
438 such as FIX. At some point, this should be simplified. */
440 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
441 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
443 HOST_WIDE_INT hv
, lv
;
446 if (GET_CODE (trueop
) == CONST_INT
)
447 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
449 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
451 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
452 d
= real_value_truncate (mode
, d
);
453 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
455 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
456 && (GET_CODE (trueop
) == CONST_DOUBLE
457 || GET_CODE (trueop
) == CONST_INT
))
459 HOST_WIDE_INT hv
, lv
;
462 if (GET_CODE (trueop
) == CONST_INT
)
463 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
465 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
467 if (op_mode
== VOIDmode
)
469 /* We don't know how to interpret negative-looking numbers in
470 this case, so don't try to fold those. */
474 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
477 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
479 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
480 d
= real_value_truncate (mode
, d
);
481 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
484 if (GET_CODE (trueop
) == CONST_INT
485 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
487 HOST_WIDE_INT arg0
= INTVAL (trueop
);
501 val
= (arg0
>= 0 ? arg0
: - arg0
);
505 /* Don't use ffs here. Instead, get low order bit and then its
506 number. If arg0 is zero, this will return 0, as desired. */
507 arg0
&= GET_MODE_MASK (mode
);
508 val
= exact_log2 (arg0
& (- arg0
)) + 1;
512 arg0
&= GET_MODE_MASK (mode
);
513 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
516 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
520 arg0
&= GET_MODE_MASK (mode
);
523 /* Even if the value at zero is undefined, we have to come
524 up with some replacement. Seems good enough. */
525 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
526 val
= GET_MODE_BITSIZE (mode
);
529 val
= exact_log2 (arg0
& -arg0
);
533 arg0
&= GET_MODE_MASK (mode
);
536 val
++, arg0
&= arg0
- 1;
540 arg0
&= GET_MODE_MASK (mode
);
543 val
++, arg0
&= arg0
- 1;
552 /* When zero-extending a CONST_INT, we need to know its
554 if (op_mode
== VOIDmode
)
556 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
558 /* If we were really extending the mode,
559 we would have to distinguish between zero-extension
560 and sign-extension. */
561 if (width
!= GET_MODE_BITSIZE (op_mode
))
565 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
566 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
572 if (op_mode
== VOIDmode
)
574 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
576 /* If we were really extending the mode,
577 we would have to distinguish between zero-extension
578 and sign-extension. */
579 if (width
!= GET_MODE_BITSIZE (op_mode
))
583 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
586 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
588 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
589 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
606 val
= trunc_int_for_mode (val
, mode
);
608 return GEN_INT (val
);
611 /* We can do some operations on integer CONST_DOUBLEs. Also allow
612 for a DImode operation on a CONST_INT. */
613 else if (GET_MODE (trueop
) == VOIDmode
614 && width
<= HOST_BITS_PER_WIDE_INT
* 2
615 && (GET_CODE (trueop
) == CONST_DOUBLE
616 || GET_CODE (trueop
) == CONST_INT
))
618 unsigned HOST_WIDE_INT l1
, lv
;
619 HOST_WIDE_INT h1
, hv
;
621 if (GET_CODE (trueop
) == CONST_DOUBLE
)
622 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
624 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
634 neg_double (l1
, h1
, &lv
, &hv
);
639 neg_double (l1
, h1
, &lv
, &hv
);
651 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
654 lv
= exact_log2 (l1
& -l1
) + 1;
660 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
661 - HOST_BITS_PER_WIDE_INT
;
663 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
664 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
665 lv
= GET_MODE_BITSIZE (mode
);
671 lv
= exact_log2 (l1
& -l1
);
673 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
674 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
675 lv
= GET_MODE_BITSIZE (mode
);
698 /* This is just a change-of-mode, so do nothing. */
703 if (op_mode
== VOIDmode
)
706 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
710 lv
= l1
& GET_MODE_MASK (op_mode
);
714 if (op_mode
== VOIDmode
715 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
719 lv
= l1
& GET_MODE_MASK (op_mode
);
720 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
721 && (lv
& ((HOST_WIDE_INT
) 1
722 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
723 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
725 hv
= HWI_SIGN_EXTEND (lv
);
736 return immed_double_const (lv
, hv
, mode
);
739 else if (GET_CODE (trueop
) == CONST_DOUBLE
740 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
742 REAL_VALUE_TYPE d
, t
;
743 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
748 if (HONOR_SNANS (mode
) && real_isnan (&d
))
750 real_sqrt (&t
, mode
, &d
);
754 d
= REAL_VALUE_ABS (d
);
757 d
= REAL_VALUE_NEGATE (d
);
760 d
= real_value_truncate (mode
, d
);
763 /* All this does is change the mode. */
766 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
772 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
775 else if (GET_CODE (trueop
) == CONST_DOUBLE
776 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
777 && GET_MODE_CLASS (mode
) == MODE_INT
778 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
780 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
781 operators are intentionally left unspecified (to ease implementation
782 by target backends), for consistency, this routine implements the
783 same semantics for constant folding as used by the middle-end. */
785 HOST_WIDE_INT xh
, xl
, th
, tl
;
786 REAL_VALUE_TYPE x
, t
;
787 REAL_VALUE_FROM_CONST_DOUBLE (x
, trueop
);
791 if (REAL_VALUE_ISNAN (x
))
794 /* Test against the signed upper bound. */
795 if (width
> HOST_BITS_PER_WIDE_INT
)
797 th
= ((unsigned HOST_WIDE_INT
) 1
798 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
804 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
806 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
807 if (REAL_VALUES_LESS (t
, x
))
814 /* Test against the signed lower bound. */
815 if (width
> HOST_BITS_PER_WIDE_INT
)
817 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
823 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
825 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
826 if (REAL_VALUES_LESS (x
, t
))
832 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
836 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
839 /* Test against the unsigned upper bound. */
840 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
845 else if (width
>= HOST_BITS_PER_WIDE_INT
)
847 th
= ((unsigned HOST_WIDE_INT
) 1
848 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
854 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
856 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
857 if (REAL_VALUES_LESS (t
, x
))
864 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
870 return immed_double_const (xl
, xh
, mode
);
873 /* This was formerly used only for non-IEEE float.
874 eggert@twinsun.com says it is safe for IEEE also. */
877 enum rtx_code reversed
;
880 /* There are some simplifications we can do even if the operands
885 /* (not (not X)) == X. */
886 if (GET_CODE (op
) == NOT
)
889 /* (not (eq X Y)) == (ne X Y), etc. */
890 if (GET_RTX_CLASS (GET_CODE (op
)) == '<'
891 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
892 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
894 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
895 XEXP (op
, 0), XEXP (op
, 1));
897 /* (not (plus X -1)) can become (neg X). */
898 if (GET_CODE (op
) == PLUS
899 && XEXP (op
, 1) == constm1_rtx
)
900 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
902 /* Similarly, (not (neg X)) is (plus X -1). */
903 if (GET_CODE (op
) == NEG
)
904 return plus_constant (XEXP (op
, 0), -1);
906 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
907 if (GET_CODE (op
) == XOR
908 && GET_CODE (XEXP (op
, 1)) == CONST_INT
909 && (temp
= simplify_unary_operation (NOT
, mode
,
912 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
915 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
916 operands other than 1, but that is not valid. We could do a
917 similar simplification for (not (lshiftrt C X)) where C is
918 just the sign bit, but this doesn't seem common enough to
920 if (GET_CODE (op
) == ASHIFT
921 && XEXP (op
, 0) == const1_rtx
)
923 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
924 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
927 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
928 by reversing the comparison code if valid. */
929 if (STORE_FLAG_VALUE
== -1
930 && GET_RTX_CLASS (GET_CODE (op
)) == '<'
931 && (reversed
= reversed_comparison_code (op
, NULL_RTX
))
933 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
934 XEXP (op
, 0), XEXP (op
, 1));
936 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
937 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
938 so we can perform the above simplification. */
940 if (STORE_FLAG_VALUE
== -1
941 && GET_CODE (op
) == ASHIFTRT
942 && GET_CODE (XEXP (op
, 1)) == CONST_INT
943 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
944 return simplify_gen_relational (GE
, mode
, VOIDmode
,
945 XEXP (op
, 0), const0_rtx
);
950 /* (neg (neg X)) == X. */
951 if (GET_CODE (op
) == NEG
)
954 /* (neg (plus X 1)) can become (not X). */
955 if (GET_CODE (op
) == PLUS
956 && XEXP (op
, 1) == const1_rtx
)
957 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
959 /* Similarly, (neg (not X)) is (plus X 1). */
960 if (GET_CODE (op
) == NOT
)
961 return plus_constant (XEXP (op
, 0), 1);
963 /* (neg (minus X Y)) can become (minus Y X). This transformation
964 isn't safe for modes with signed zeros, since if X and Y are
965 both +0, (minus Y X) is the same as (minus X Y). If the
966 rounding mode is towards +infinity (or -infinity) then the two
967 expressions will be rounded differently. */
968 if (GET_CODE (op
) == MINUS
969 && !HONOR_SIGNED_ZEROS (mode
)
970 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
971 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1),
974 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
975 if (GET_CODE (op
) == PLUS
976 && !HONOR_SIGNED_ZEROS (mode
)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
979 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
980 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
983 /* (neg (mult A B)) becomes (mult (neg A) B).
984 This works even for floating-point values. */
985 if (GET_CODE (op
) == MULT
986 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
988 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
989 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
992 /* NEG commutes with ASHIFT since it is multiplication. Only do
993 this if we can then eliminate the NEG (e.g., if the operand
995 if (GET_CODE (op
) == ASHIFT
)
997 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0),
1000 return simplify_gen_binary (ASHIFT
, mode
, temp
,
1007 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1008 becomes just the MINUS if its mode is MODE. This allows
1009 folding switch statements on machines using casesi (such as
1011 if (GET_CODE (op
) == TRUNCATE
1012 && GET_MODE (XEXP (op
, 0)) == mode
1013 && GET_CODE (XEXP (op
, 0)) == MINUS
1014 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1015 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1016 return XEXP (op
, 0);
1018 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1019 if (! POINTERS_EXTEND_UNSIGNED
1020 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1022 || (GET_CODE (op
) == SUBREG
1023 && GET_CODE (SUBREG_REG (op
)) == REG
1024 && REG_POINTER (SUBREG_REG (op
))
1025 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1026 return convert_memory_address (Pmode
, op
);
1030 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1032 if (POINTERS_EXTEND_UNSIGNED
> 0
1033 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1035 || (GET_CODE (op
) == SUBREG
1036 && GET_CODE (SUBREG_REG (op
)) == REG
1037 && REG_POINTER (SUBREG_REG (op
))
1038 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1039 return convert_memory_address (Pmode
, op
);
1051 /* Subroutine of simplify_associative_operation. Return true if rtx OP
1052 is a suitable integer or floating point immediate constant. */
1054 associative_constant_p (rtx op
)
1056 if (GET_CODE (op
) == CONST_INT
1057 || GET_CODE (op
) == CONST_DOUBLE
)
1059 op
= avoid_constant_pool_reference (op
);
1060 return GET_CODE (op
) == CONST_INT
1061 || GET_CODE (op
) == CONST_DOUBLE
;
1064 /* Subroutine of simplify_binary_operation to simplify an associative
1065 binary operation CODE with result mode MODE, operating on OP0 and OP1.
1066 Return 0 if no simplification is possible. */
1068 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1073 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
1074 if (GET_CODE (op0
) == code
1075 && associative_constant_p (op1
)
1076 && associative_constant_p (XEXP (op0
, 1)))
1078 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1081 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1084 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
1085 if (GET_CODE (op0
) == code
1086 && GET_CODE (op1
) == code
1087 && associative_constant_p (XEXP (op0
, 1))
1088 && associative_constant_p (XEXP (op1
, 1)))
1090 rtx c
= simplify_binary_operation (code
, mode
,
1091 XEXP (op0
, 1), XEXP (op1
, 1));
1094 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1095 return simplify_gen_binary (code
, mode
, tem
, c
);
1098 /* Canonicalize (x op c) op y as (x op y) op c. */
1099 if (GET_CODE (op0
) == code
1100 && associative_constant_p (XEXP (op0
, 1)))
1102 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1103 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1106 /* Canonicalize x op (y op c) as (x op y) op c. */
1107 if (GET_CODE (op1
) == code
1108 && associative_constant_p (XEXP (op1
, 1)))
1110 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1111 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1117 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1118 and OP1. Return 0 if no simplification is possible.
1120 Don't use this for relational operations such as EQ or LT.
1121 Use simplify_relational_operation instead. */
1123 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1126 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
1128 unsigned int width
= GET_MODE_BITSIZE (mode
);
1130 rtx trueop0
= avoid_constant_pool_reference (op0
);
1131 rtx trueop1
= avoid_constant_pool_reference (op1
);
1133 /* Relational operations don't work here. We must know the mode
1134 of the operands in order to do the comparison correctly.
1135 Assuming a full word can give incorrect results.
1136 Consider comparing 128 with -128 in QImode. */
1138 if (GET_RTX_CLASS (code
) == '<')
1141 /* Make sure the constant is second. */
1142 if (GET_RTX_CLASS (code
) == 'c'
1143 && swap_commutative_operands_p (trueop0
, trueop1
))
1145 tem
= op0
, op0
= op1
, op1
= tem
;
1146 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
1149 if (VECTOR_MODE_P (mode
)
1150 && GET_CODE (trueop0
) == CONST_VECTOR
1151 && GET_CODE (trueop1
) == CONST_VECTOR
)
1153 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1154 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1155 enum machine_mode op0mode
= GET_MODE (trueop0
);
1156 int op0_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op0mode
));
1157 unsigned op0_n_elts
= (GET_MODE_SIZE (op0mode
) / op0_elt_size
);
1158 enum machine_mode op1mode
= GET_MODE (trueop1
);
1159 int op1_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op1mode
));
1160 unsigned op1_n_elts
= (GET_MODE_SIZE (op1mode
) / op1_elt_size
);
1161 rtvec v
= rtvec_alloc (n_elts
);
1164 if (op0_n_elts
!= n_elts
|| op1_n_elts
!= n_elts
)
1167 for (i
= 0; i
< n_elts
; i
++)
1169 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
1170 CONST_VECTOR_ELT (trueop0
, i
),
1171 CONST_VECTOR_ELT (trueop1
, i
));
1174 RTVEC_ELT (v
, i
) = x
;
1177 return gen_rtx_CONST_VECTOR (mode
, v
);
1180 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1181 && GET_CODE (trueop0
) == CONST_DOUBLE
1182 && GET_CODE (trueop1
) == CONST_DOUBLE
1183 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
1185 REAL_VALUE_TYPE f0
, f1
, value
;
1187 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
1188 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
1189 f0
= real_value_truncate (mode
, f0
);
1190 f1
= real_value_truncate (mode
, f1
);
1192 if (HONOR_SNANS (mode
)
1193 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
1197 && REAL_VALUES_EQUAL (f1
, dconst0
)
1198 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
1201 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
1203 value
= real_value_truncate (mode
, value
);
1204 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
1207 /* We can fold some multi-word operations. */
1208 if (GET_MODE_CLASS (mode
) == MODE_INT
1209 && width
== HOST_BITS_PER_WIDE_INT
* 2
1210 && (GET_CODE (trueop0
) == CONST_DOUBLE
1211 || GET_CODE (trueop0
) == CONST_INT
)
1212 && (GET_CODE (trueop1
) == CONST_DOUBLE
1213 || GET_CODE (trueop1
) == CONST_INT
))
1215 unsigned HOST_WIDE_INT l1
, l2
, lv
;
1216 HOST_WIDE_INT h1
, h2
, hv
;
1218 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1219 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
1221 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
1223 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1224 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
1226 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
1231 /* A - B == A + (-B). */
1232 neg_double (l2
, h2
, &lv
, &hv
);
1235 /* Fall through.... */
1238 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1242 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1245 case DIV
: case MOD
: case UDIV
: case UMOD
:
1246 /* We'd need to include tree.h to do this and it doesn't seem worth
1251 lv
= l1
& l2
, hv
= h1
& h2
;
1255 lv
= l1
| l2
, hv
= h1
| h2
;
1259 lv
= l1
^ l2
, hv
= h1
^ h2
;
1265 && ((unsigned HOST_WIDE_INT
) l1
1266 < (unsigned HOST_WIDE_INT
) l2
)))
1275 && ((unsigned HOST_WIDE_INT
) l1
1276 > (unsigned HOST_WIDE_INT
) l2
)))
1283 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1285 && ((unsigned HOST_WIDE_INT
) l1
1286 < (unsigned HOST_WIDE_INT
) l2
)))
1293 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1295 && ((unsigned HOST_WIDE_INT
) l1
1296 > (unsigned HOST_WIDE_INT
) l2
)))
1302 case LSHIFTRT
: case ASHIFTRT
:
1304 case ROTATE
: case ROTATERT
:
1305 #ifdef SHIFT_COUNT_TRUNCATED
1306 if (SHIFT_COUNT_TRUNCATED
)
1307 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1310 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1313 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1314 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1316 else if (code
== ASHIFT
)
1317 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1318 else if (code
== ROTATE
)
1319 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1320 else /* code == ROTATERT */
1321 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1328 return immed_double_const (lv
, hv
, mode
);
1331 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1332 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1334 /* Even if we can't compute a constant result,
1335 there are some cases worth simplifying. */
1340 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1341 when x is NaN, infinite, or finite and nonzero. They aren't
1342 when x is -0 and the rounding mode is not towards -infinity,
1343 since (-0) + 0 is then 0. */
1344 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1347 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1348 transformations are safe even for IEEE. */
1349 if (GET_CODE (op0
) == NEG
)
1350 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1351 else if (GET_CODE (op1
) == NEG
)
1352 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1354 /* (~a) + 1 -> -a */
1355 if (INTEGRAL_MODE_P (mode
)
1356 && GET_CODE (op0
) == NOT
1357 && trueop1
== const1_rtx
)
1358 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1360 /* Handle both-operands-constant cases. We can only add
1361 CONST_INTs to constants since the sum of relocatable symbols
1362 can't be handled by most assemblers. Don't add CONST_INT
1363 to CONST_INT since overflow won't be computed properly if wider
1364 than HOST_BITS_PER_WIDE_INT. */
1366 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1367 && GET_CODE (op1
) == CONST_INT
)
1368 return plus_constant (op0
, INTVAL (op1
));
1369 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1370 && GET_CODE (op0
) == CONST_INT
)
1371 return plus_constant (op1
, INTVAL (op0
));
1373 /* See if this is something like X * C - X or vice versa or
1374 if the multiplication is written as a shift. If so, we can
1375 distribute and make a new multiply, shift, or maybe just
1376 have X (if C is 2 in the example above). But don't make
1377 real multiply if we didn't have one before. */
1379 if (! FLOAT_MODE_P (mode
))
1381 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1382 rtx lhs
= op0
, rhs
= op1
;
1385 if (GET_CODE (lhs
) == NEG
)
1386 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1387 else if (GET_CODE (lhs
) == MULT
1388 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1390 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1393 else if (GET_CODE (lhs
) == ASHIFT
1394 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1395 && INTVAL (XEXP (lhs
, 1)) >= 0
1396 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1398 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1399 lhs
= XEXP (lhs
, 0);
1402 if (GET_CODE (rhs
) == NEG
)
1403 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1404 else if (GET_CODE (rhs
) == MULT
1405 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1407 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1410 else if (GET_CODE (rhs
) == ASHIFT
1411 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1412 && INTVAL (XEXP (rhs
, 1)) >= 0
1413 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1415 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1416 rhs
= XEXP (rhs
, 0);
1419 if (rtx_equal_p (lhs
, rhs
))
1421 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1422 GEN_INT (coeff0
+ coeff1
));
1423 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1427 /* If one of the operands is a PLUS or a MINUS, see if we can
1428 simplify this by the associative law.
1429 Don't use the associative law for floating point.
1430 The inaccuracy makes it nonassociative,
1431 and subtle programs can break if operations are associated. */
1433 if (INTEGRAL_MODE_P (mode
)
1434 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1435 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1436 || (GET_CODE (op0
) == CONST
1437 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1438 || (GET_CODE (op1
) == CONST
1439 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1440 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1443 /* Reassociate floating point addition only when the user
1444 specifies unsafe math optimizations. */
1445 if (FLOAT_MODE_P (mode
)
1446 && flag_unsafe_math_optimizations
)
1448 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1456 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1457 using cc0, in which case we want to leave it as a COMPARE
1458 so we can distinguish it from a register-register-copy.
1460 In IEEE floating point, x-0 is not the same as x. */
1462 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1463 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1464 && trueop1
== CONST0_RTX (mode
))
1468 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1469 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1470 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1471 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1473 rtx xop00
= XEXP (op0
, 0);
1474 rtx xop10
= XEXP (op1
, 0);
1477 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1479 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1480 && GET_MODE (xop00
) == GET_MODE (xop10
)
1481 && REGNO (xop00
) == REGNO (xop10
)
1482 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1483 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1490 /* We can't assume x-x is 0 even with non-IEEE floating point,
1491 but since it is zero except in very strange circumstances, we
1492 will treat it as zero with -funsafe-math-optimizations. */
1493 if (rtx_equal_p (trueop0
, trueop1
)
1494 && ! side_effects_p (op0
)
1495 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1496 return CONST0_RTX (mode
);
1498 /* Change subtraction from zero into negation. (0 - x) is the
1499 same as -x when x is NaN, infinite, or finite and nonzero.
1500 But if the mode has signed zeros, and does not round towards
1501 -infinity, then 0 - 0 is 0, not -0. */
1502 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1503 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1505 /* (-1 - a) is ~a. */
1506 if (trueop0
== constm1_rtx
)
1507 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1509 /* Subtracting 0 has no effect unless the mode has signed zeros
1510 and supports rounding towards -infinity. In such a case,
1512 if (!(HONOR_SIGNED_ZEROS (mode
)
1513 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1514 && trueop1
== CONST0_RTX (mode
))
1517 /* See if this is something like X * C - X or vice versa or
1518 if the multiplication is written as a shift. If so, we can
1519 distribute and make a new multiply, shift, or maybe just
1520 have X (if C is 2 in the example above). But don't make
1521 real multiply if we didn't have one before. */
1523 if (! FLOAT_MODE_P (mode
))
1525 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1526 rtx lhs
= op0
, rhs
= op1
;
1529 if (GET_CODE (lhs
) == NEG
)
1530 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1531 else if (GET_CODE (lhs
) == MULT
1532 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1534 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1537 else if (GET_CODE (lhs
) == ASHIFT
1538 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1539 && INTVAL (XEXP (lhs
, 1)) >= 0
1540 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1542 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1543 lhs
= XEXP (lhs
, 0);
1546 if (GET_CODE (rhs
) == NEG
)
1547 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1548 else if (GET_CODE (rhs
) == MULT
1549 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1551 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1554 else if (GET_CODE (rhs
) == ASHIFT
1555 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1556 && INTVAL (XEXP (rhs
, 1)) >= 0
1557 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1559 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1560 rhs
= XEXP (rhs
, 0);
1563 if (rtx_equal_p (lhs
, rhs
))
1565 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1566 GEN_INT (coeff0
- coeff1
));
1567 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1571 /* (a - (-b)) -> (a + b). True even for IEEE. */
1572 if (GET_CODE (op1
) == NEG
)
1573 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1575 /* If one of the operands is a PLUS or a MINUS, see if we can
1576 simplify this by the associative law.
1577 Don't use the associative law for floating point.
1578 The inaccuracy makes it nonassociative,
1579 and subtle programs can break if operations are associated. */
1581 if (INTEGRAL_MODE_P (mode
)
1582 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1583 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1584 || (GET_CODE (op0
) == CONST
1585 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1586 || (GET_CODE (op1
) == CONST
1587 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1588 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1591 /* Don't let a relocatable value get a negative coeff. */
1592 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1593 return simplify_gen_binary (PLUS
, mode
,
1595 neg_const_int (mode
, op1
));
1597 /* (x - (x & y)) -> (x & ~y) */
1598 if (GET_CODE (op1
) == AND
)
1600 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1602 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1603 GET_MODE (XEXP (op1
, 1)));
1604 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1606 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1608 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1609 GET_MODE (XEXP (op1
, 0)));
1610 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1616 if (trueop1
== constm1_rtx
)
1617 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1619 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1620 x is NaN, since x * 0 is then also NaN. Nor is it valid
1621 when the mode has signed zeros, since multiplying a negative
1622 number by 0 will give -0, not 0. */
1623 if (!HONOR_NANS (mode
)
1624 && !HONOR_SIGNED_ZEROS (mode
)
1625 && trueop1
== CONST0_RTX (mode
)
1626 && ! side_effects_p (op0
))
1629 /* In IEEE floating point, x*1 is not equivalent to x for
1631 if (!HONOR_SNANS (mode
)
1632 && trueop1
== CONST1_RTX (mode
))
1635 /* Convert multiply by constant power of two into shift unless
1636 we are still generating RTL. This test is a kludge. */
1637 if (GET_CODE (trueop1
) == CONST_INT
1638 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1639 /* If the mode is larger than the host word size, and the
1640 uppermost bit is set, then this isn't a power of two due
1641 to implicit sign extension. */
1642 && (width
<= HOST_BITS_PER_WIDE_INT
1643 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1644 && ! rtx_equal_function_value_matters
)
1645 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1647 /* x*2 is x+x and x*(-1) is -x */
1648 if (GET_CODE (trueop1
) == CONST_DOUBLE
1649 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1650 && GET_MODE (op0
) == mode
)
1653 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1655 if (REAL_VALUES_EQUAL (d
, dconst2
))
1656 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1658 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1659 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1662 /* Reassociate multiplication, but for floating point MULTs
1663 only when the user specifies unsafe math optimizations. */
1664 if (! FLOAT_MODE_P (mode
)
1665 || flag_unsafe_math_optimizations
)
1667 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1674 if (trueop1
== const0_rtx
)
1676 if (GET_CODE (trueop1
) == CONST_INT
1677 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1678 == GET_MODE_MASK (mode
)))
1680 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1682 /* A | (~A) -> -1 */
1683 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1684 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1685 && ! side_effects_p (op0
)
1686 && GET_MODE_CLASS (mode
) != MODE_CC
)
1688 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1694 if (trueop1
== const0_rtx
)
1696 if (GET_CODE (trueop1
) == CONST_INT
1697 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1698 == GET_MODE_MASK (mode
)))
1699 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1700 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1701 && GET_MODE_CLASS (mode
) != MODE_CC
)
1703 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1709 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1711 if (GET_CODE (trueop1
) == CONST_INT
1712 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1713 == GET_MODE_MASK (mode
)))
1715 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1716 && GET_MODE_CLASS (mode
) != MODE_CC
)
1719 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1720 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1721 && ! side_effects_p (op0
)
1722 && GET_MODE_CLASS (mode
) != MODE_CC
)
1724 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1730 /* Convert divide by power of two into shift (divide by 1 handled
1732 if (GET_CODE (trueop1
) == CONST_INT
1733 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1734 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (arg1
));
1736 /* Fall through.... */
1739 if (trueop1
== CONST1_RTX (mode
))
1741 /* On some platforms DIV uses narrower mode than its
1743 rtx x
= gen_lowpart_common (mode
, op0
);
1746 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1747 return gen_lowpart_SUBREG (mode
, op0
);
1752 /* Maybe change 0 / x to 0. This transformation isn't safe for
1753 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1754 Nor is it safe for modes with signed zeros, since dividing
1755 0 by a negative number gives -0, not 0. */
1756 if (!HONOR_NANS (mode
)
1757 && !HONOR_SIGNED_ZEROS (mode
)
1758 && trueop0
== CONST0_RTX (mode
)
1759 && ! side_effects_p (op1
))
1762 /* Change division by a constant into multiplication. Only do
1763 this with -funsafe-math-optimizations. */
1764 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1765 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1766 && trueop1
!= CONST0_RTX (mode
)
1767 && flag_unsafe_math_optimizations
)
1770 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1772 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1774 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1775 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1776 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
1782 /* Handle modulus by power of two (mod with 1 handled below). */
1783 if (GET_CODE (trueop1
) == CONST_INT
1784 && exact_log2 (INTVAL (trueop1
)) > 0)
1785 return simplify_gen_binary (AND
, mode
, op0
,
1786 GEN_INT (INTVAL (op1
) - 1));
1788 /* Fall through.... */
1791 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1792 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1799 /* Rotating ~0 always results in ~0. */
1800 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1801 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1802 && ! side_effects_p (op1
))
1805 /* Fall through.... */
1809 if (trueop1
== const0_rtx
)
1811 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1816 if (width
<= HOST_BITS_PER_WIDE_INT
1817 && GET_CODE (trueop1
) == CONST_INT
1818 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1819 && ! side_effects_p (op0
))
1821 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1823 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1829 if (width
<= HOST_BITS_PER_WIDE_INT
1830 && GET_CODE (trueop1
) == CONST_INT
1831 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1832 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1833 && ! side_effects_p (op0
))
1835 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1837 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1843 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1845 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1847 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1853 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1855 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1857 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1866 /* ??? There are simplifications that can be done. */
1870 if (!VECTOR_MODE_P (mode
))
1872 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1874 != GET_MODE_INNER (GET_MODE (trueop0
)))
1875 || GET_CODE (trueop1
) != PARALLEL
1876 || XVECLEN (trueop1
, 0) != 1
1877 || GET_CODE (XVECEXP (trueop1
, 0, 0)) != CONST_INT
)
1880 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1881 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP (trueop1
, 0, 0)));
1885 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1886 || (GET_MODE_INNER (mode
)
1887 != GET_MODE_INNER (GET_MODE (trueop0
)))
1888 || GET_CODE (trueop1
) != PARALLEL
)
1891 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1893 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1894 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1895 rtvec v
= rtvec_alloc (n_elts
);
1898 if (XVECLEN (trueop1
, 0) != (int) n_elts
)
1900 for (i
= 0; i
< n_elts
; i
++)
1902 rtx x
= XVECEXP (trueop1
, 0, i
);
1904 if (GET_CODE (x
) != CONST_INT
)
1906 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, INTVAL (x
));
1909 return gen_rtx_CONST_VECTOR (mode
, v
);
1915 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
1916 ? GET_MODE (trueop0
)
1917 : GET_MODE_INNER (mode
));
1918 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
1919 ? GET_MODE (trueop1
)
1920 : GET_MODE_INNER (mode
));
1922 if (!VECTOR_MODE_P (mode
)
1923 || (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
1924 != GET_MODE_SIZE (mode
)))
1927 if ((VECTOR_MODE_P (op0_mode
)
1928 && (GET_MODE_INNER (mode
)
1929 != GET_MODE_INNER (op0_mode
)))
1930 || (!VECTOR_MODE_P (op0_mode
)
1931 && GET_MODE_INNER (mode
) != op0_mode
))
1934 if ((VECTOR_MODE_P (op1_mode
)
1935 && (GET_MODE_INNER (mode
)
1936 != GET_MODE_INNER (op1_mode
)))
1937 || (!VECTOR_MODE_P (op1_mode
)
1938 && GET_MODE_INNER (mode
) != op1_mode
))
1941 if ((GET_CODE (trueop0
) == CONST_VECTOR
1942 || GET_CODE (trueop0
) == CONST_INT
1943 || GET_CODE (trueop0
) == CONST_DOUBLE
)
1944 && (GET_CODE (trueop1
) == CONST_VECTOR
1945 || GET_CODE (trueop1
) == CONST_INT
1946 || GET_CODE (trueop1
) == CONST_DOUBLE
))
1948 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1949 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1950 rtvec v
= rtvec_alloc (n_elts
);
1952 unsigned in_n_elts
= 1;
1954 if (VECTOR_MODE_P (op0_mode
))
1955 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
1956 for (i
= 0; i
< n_elts
; i
++)
1960 if (!VECTOR_MODE_P (op0_mode
))
1961 RTVEC_ELT (v
, i
) = trueop0
;
1963 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
1967 if (!VECTOR_MODE_P (op1_mode
))
1968 RTVEC_ELT (v
, i
) = trueop1
;
1970 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
1975 return gen_rtx_CONST_VECTOR (mode
, v
);
1987 /* Get the integer argument values in two forms:
1988 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1990 arg0
= INTVAL (trueop0
);
1991 arg1
= INTVAL (trueop1
);
1993 if (width
< HOST_BITS_PER_WIDE_INT
)
1995 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1996 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1999 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2000 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2003 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2004 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2012 /* Compute the value of the arithmetic. */
2017 val
= arg0s
+ arg1s
;
2021 val
= arg0s
- arg1s
;
2025 val
= arg0s
* arg1s
;
2030 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2033 val
= arg0s
/ arg1s
;
2038 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2041 val
= arg0s
% arg1s
;
2046 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2049 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2054 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2057 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2073 /* If shift count is undefined, don't fold it; let the machine do
2074 what it wants. But truncate it if the machine will do that. */
2078 #ifdef SHIFT_COUNT_TRUNCATED
2079 if (SHIFT_COUNT_TRUNCATED
)
2083 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
2090 #ifdef SHIFT_COUNT_TRUNCATED
2091 if (SHIFT_COUNT_TRUNCATED
)
2095 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
2102 #ifdef SHIFT_COUNT_TRUNCATED
2103 if (SHIFT_COUNT_TRUNCATED
)
2107 val
= arg0s
>> arg1
;
2109 /* Bootstrap compiler may not have sign extended the right shift.
2110 Manually extend the sign to insure bootstrap cc matches gcc. */
2111 if (arg0s
< 0 && arg1
> 0)
2112 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
2121 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2122 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2130 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2131 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2135 /* Do nothing here. */
2139 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2143 val
= ((unsigned HOST_WIDE_INT
) arg0
2144 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2148 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2152 val
= ((unsigned HOST_WIDE_INT
) arg0
2153 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2160 /* ??? There are simplifications that can be done. */
2167 val
= trunc_int_for_mode (val
, mode
);
2169 return GEN_INT (val
);
2172 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2175 Rather than test for specific case, we do this by a brute-force method
2176 and do all possible simplifications until no more changes occur. Then
2177 we rebuild the operation.
2179 If FORCE is true, then always generate the rtx. This is used to
2180 canonicalize stuff emitted from simplify_gen_binary. Note that this
2181 can still fail if the rtx is too complex. It won't fail just because
2182 the result is not 'simpler' than the input, however. */
2184 struct simplify_plus_minus_op_data
2191 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2193 const struct simplify_plus_minus_op_data
*d1
= p1
;
2194 const struct simplify_plus_minus_op_data
*d2
= p2
;
2196 return (commutative_operand_precedence (d2
->op
)
2197 - commutative_operand_precedence (d1
->op
));
2201 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2204 struct simplify_plus_minus_op_data ops
[8];
2206 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2207 int first
, negate
, changed
;
2210 memset (ops
, 0, sizeof ops
);
2212 /* Set up the two operands and then expand them until nothing has been
2213 changed. If we run out of room in our array, give up; this should
2214 almost never happen. */
2219 ops
[1].neg
= (code
== MINUS
);
2225 for (i
= 0; i
< n_ops
; i
++)
2227 rtx this_op
= ops
[i
].op
;
2228 int this_neg
= ops
[i
].neg
;
2229 enum rtx_code this_code
= GET_CODE (this_op
);
2238 ops
[n_ops
].op
= XEXP (this_op
, 1);
2239 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2242 ops
[i
].op
= XEXP (this_op
, 0);
2248 ops
[i
].op
= XEXP (this_op
, 0);
2249 ops
[i
].neg
= ! this_neg
;
2255 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2256 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2257 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2259 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2260 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2261 ops
[n_ops
].neg
= this_neg
;
2269 /* ~a -> (-a - 1) */
2272 ops
[n_ops
].op
= constm1_rtx
;
2273 ops
[n_ops
++].neg
= this_neg
;
2274 ops
[i
].op
= XEXP (this_op
, 0);
2275 ops
[i
].neg
= !this_neg
;
2283 ops
[i
].op
= neg_const_int (mode
, this_op
);
2296 /* If we only have two operands, we can't do anything. */
2297 if (n_ops
<= 2 && !force
)
2300 /* Count the number of CONSTs we didn't split above. */
2301 for (i
= 0; i
< n_ops
; i
++)
2302 if (GET_CODE (ops
[i
].op
) == CONST
)
2305 /* Now simplify each pair of operands until nothing changes. The first
2306 time through just simplify constants against each other. */
2313 for (i
= 0; i
< n_ops
- 1; i
++)
2314 for (j
= i
+ 1; j
< n_ops
; j
++)
2316 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2317 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2319 if (lhs
!= 0 && rhs
!= 0
2320 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2322 enum rtx_code ncode
= PLUS
;
2328 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2330 else if (swap_commutative_operands_p (lhs
, rhs
))
2331 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2333 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2335 /* Reject "simplifications" that just wrap the two
2336 arguments in a CONST. Failure to do so can result
2337 in infinite recursion with simplify_binary_operation
2338 when it calls us to simplify CONST operations. */
2340 && ! (GET_CODE (tem
) == CONST
2341 && GET_CODE (XEXP (tem
, 0)) == ncode
2342 && XEXP (XEXP (tem
, 0), 0) == lhs
2343 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2344 /* Don't allow -x + -1 -> ~x simplifications in the
2345 first pass. This allows us the chance to combine
2346 the -1 with other constants. */
2348 && GET_CODE (tem
) == NOT
2349 && XEXP (tem
, 0) == rhs
))
2352 if (GET_CODE (tem
) == NEG
)
2353 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2354 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2355 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2359 ops
[j
].op
= NULL_RTX
;
2369 /* Pack all the operands to the lower-numbered entries. */
2370 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2375 /* Sort the operations based on swap_commutative_operands_p. */
2376 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2378 /* We suppressed creation of trivial CONST expressions in the
2379 combination loop to avoid recursion. Create one manually now.
2380 The combination loop should have ensured that there is exactly
2381 one CONST_INT, and the sort will have ensured that it is last
2382 in the array and that any other constant will be next-to-last. */
2385 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2386 && CONSTANT_P (ops
[n_ops
- 2].op
))
2388 rtx value
= ops
[n_ops
- 1].op
;
2389 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2390 value
= neg_const_int (mode
, value
);
2391 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2395 /* Count the number of CONSTs that we generated. */
2397 for (i
= 0; i
< n_ops
; i
++)
2398 if (GET_CODE (ops
[i
].op
) == CONST
)
2401 /* Give up if we didn't reduce the number of operands we had. Make
2402 sure we count a CONST as two operands. If we have the same
2403 number of operands, but have made more CONSTs than before, this
2404 is also an improvement, so accept it. */
2406 && (n_ops
+ n_consts
> input_ops
2407 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2410 /* Put a non-negated operand first. If there aren't any, make all
2411 operands positive and negate the whole thing later. */
2414 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2418 for (i
= 0; i
< n_ops
; i
++)
2430 /* Now make the result by performing the requested operations. */
2432 for (i
= 1; i
< n_ops
; i
++)
2433 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2434 mode
, result
, ops
[i
].op
);
2436 return negate
? gen_rtx_NEG (mode
, result
) : result
;
2439 /* Like simplify_binary_operation except used for relational operators.
2440 MODE is the mode of the operands, not that of the result. If MODE
2441 is VOIDmode, both operands must also be VOIDmode and we compare the
2442 operands in "infinite precision".
2444 If no simplification is possible, this function returns zero. Otherwise,
2445 it returns either const_true_rtx or const0_rtx. */
2448 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2451 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2456 if (mode
== VOIDmode
2457 && (GET_MODE (op0
) != VOIDmode
2458 || GET_MODE (op1
) != VOIDmode
))
2461 /* If op0 is a compare, extract the comparison arguments from it. */
2462 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2463 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2465 trueop0
= avoid_constant_pool_reference (op0
);
2466 trueop1
= avoid_constant_pool_reference (op1
);
2468 /* We can't simplify MODE_CC values since we don't know what the
2469 actual comparison is. */
2470 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2473 /* Make sure the constant is second. */
2474 if (swap_commutative_operands_p (trueop0
, trueop1
))
2476 tem
= op0
, op0
= op1
, op1
= tem
;
2477 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
2478 code
= swap_condition (code
);
2481 /* For integer comparisons of A and B maybe we can simplify A - B and can
2482 then simplify a comparison of that with zero. If A and B are both either
2483 a register or a CONST_INT, this can't help; testing for these cases will
2484 prevent infinite recursion here and speed things up.
2486 If CODE is an unsigned comparison, then we can never do this optimization,
2487 because it gives an incorrect result if the subtraction wraps around zero.
2488 ANSI C defines unsigned operations such that they never overflow, and
2489 thus such cases can not be ignored. */
2491 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2492 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
2493 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
2494 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2495 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2496 return simplify_relational_operation (signed_condition (code
),
2497 mode
, tem
, const0_rtx
);
2499 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2500 return const_true_rtx
;
2502 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2505 /* For modes without NaNs, if the two operands are equal, we know the
2506 result except if they have side-effects. */
2507 if (! HONOR_NANS (GET_MODE (trueop0
))
2508 && rtx_equal_p (trueop0
, trueop1
)
2509 && ! side_effects_p (trueop0
))
2510 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2512 /* If the operands are floating-point constants, see if we can fold
2514 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2515 && GET_CODE (trueop1
) == CONST_DOUBLE
2516 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2518 REAL_VALUE_TYPE d0
, d1
;
2520 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2521 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2523 /* Comparisons are unordered iff at least one of the values is NaN. */
2524 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2534 return const_true_rtx
;
2547 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2548 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2549 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2552 /* Otherwise, see if the operands are both integers. */
2553 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2554 && (GET_CODE (trueop0
) == CONST_DOUBLE
2555 || GET_CODE (trueop0
) == CONST_INT
)
2556 && (GET_CODE (trueop1
) == CONST_DOUBLE
2557 || GET_CODE (trueop1
) == CONST_INT
))
2559 int width
= GET_MODE_BITSIZE (mode
);
2560 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2561 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2563 /* Get the two words comprising each integer constant. */
2564 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2566 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2567 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2571 l0u
= l0s
= INTVAL (trueop0
);
2572 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2575 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2577 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2578 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2582 l1u
= l1s
= INTVAL (trueop1
);
2583 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2586 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2587 we have to sign or zero-extend the values. */
2588 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2590 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2591 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2593 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2594 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2596 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2597 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2599 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2600 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2602 equal
= (h0u
== h1u
&& l0u
== l1u
);
2603 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2604 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2605 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2606 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2609 /* Otherwise, there are some code-specific tests we can make. */
2615 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2620 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2621 return const_true_rtx
;
2625 /* Unsigned values are never negative. */
2626 if (trueop1
== const0_rtx
)
2627 return const_true_rtx
;
2631 if (trueop1
== const0_rtx
)
2636 /* Unsigned values are never greater than the largest
2638 if (GET_CODE (trueop1
) == CONST_INT
2639 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2640 && INTEGRAL_MODE_P (mode
))
2641 return const_true_rtx
;
2645 if (GET_CODE (trueop1
) == CONST_INT
2646 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2647 && INTEGRAL_MODE_P (mode
))
2652 /* Optimize abs(x) < 0.0. */
2653 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
2655 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2657 if (GET_CODE (tem
) == ABS
)
2663 /* Optimize abs(x) >= 0.0. */
2664 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
2666 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2668 if (GET_CODE (tem
) == ABS
)
2669 return const_true_rtx
;
2674 /* Optimize ! (abs(x) < 0.0). */
2675 if (trueop1
== CONST0_RTX (mode
))
2677 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2679 if (GET_CODE (tem
) == ABS
)
2680 return const_true_rtx
;
2691 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2697 return equal
? const_true_rtx
: const0_rtx
;
2700 return ! equal
? const_true_rtx
: const0_rtx
;
2703 return op0lt
? const_true_rtx
: const0_rtx
;
2706 return op1lt
? const_true_rtx
: const0_rtx
;
2708 return op0ltu
? const_true_rtx
: const0_rtx
;
2710 return op1ltu
? const_true_rtx
: const0_rtx
;
2713 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2716 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2718 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2720 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2722 return const_true_rtx
;
2730 /* Simplify CODE, an operation with result mode MODE and three operands,
2731 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2732 a constant. Return 0 if no simplifications is possible. */
2735 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
2736 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
2739 unsigned int width
= GET_MODE_BITSIZE (mode
);
2741 /* VOIDmode means "infinite" precision. */
2743 width
= HOST_BITS_PER_WIDE_INT
;
2749 if (GET_CODE (op0
) == CONST_INT
2750 && GET_CODE (op1
) == CONST_INT
2751 && GET_CODE (op2
) == CONST_INT
2752 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2753 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2755 /* Extracting a bit-field from a constant */
2756 HOST_WIDE_INT val
= INTVAL (op0
);
2758 if (BITS_BIG_ENDIAN
)
2759 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2760 - INTVAL (op2
) - INTVAL (op1
));
2762 val
>>= INTVAL (op2
);
2764 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2766 /* First zero-extend. */
2767 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2768 /* If desired, propagate sign bit. */
2769 if (code
== SIGN_EXTRACT
2770 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2771 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2774 /* Clear the bits that don't belong in our mode,
2775 unless they and our sign bit are all one.
2776 So we get either a reasonable negative value or a reasonable
2777 unsigned value for this mode. */
2778 if (width
< HOST_BITS_PER_WIDE_INT
2779 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2780 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2781 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2783 return GEN_INT (val
);
2788 if (GET_CODE (op0
) == CONST_INT
)
2789 return op0
!= const0_rtx
? op1
: op2
;
2791 /* Convert a == b ? b : a to "a". */
2792 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
2793 && !HONOR_NANS (mode
)
2794 && rtx_equal_p (XEXP (op0
, 0), op1
)
2795 && rtx_equal_p (XEXP (op0
, 1), op2
))
2797 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
2798 && !HONOR_NANS (mode
)
2799 && rtx_equal_p (XEXP (op0
, 1), op1
)
2800 && rtx_equal_p (XEXP (op0
, 0), op2
))
2802 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2804 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2805 ? GET_MODE (XEXP (op0
, 1))
2806 : GET_MODE (XEXP (op0
, 0)));
2808 if (cmp_mode
== VOIDmode
)
2809 cmp_mode
= op0_mode
;
2810 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2811 XEXP (op0
, 0), XEXP (op0
, 1));
2813 /* See if any simplifications were possible. */
2814 if (temp
== const0_rtx
)
2816 else if (temp
== const_true_rtx
)
2821 /* Look for happy constants in op1 and op2. */
2822 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2824 HOST_WIDE_INT t
= INTVAL (op1
);
2825 HOST_WIDE_INT f
= INTVAL (op2
);
2827 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2828 code
= GET_CODE (op0
);
2829 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2832 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2840 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2845 if (GET_MODE (op0
) != mode
2846 || GET_MODE (op1
) != mode
2847 || !VECTOR_MODE_P (mode
))
2849 op2
= avoid_constant_pool_reference (op2
);
2850 if (GET_CODE (op2
) == CONST_INT
)
2852 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2853 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2854 int mask
= (1 << n_elts
) - 1;
2856 if (!(INTVAL (op2
) & mask
))
2858 if ((INTVAL (op2
) & mask
) == mask
)
2861 op0
= avoid_constant_pool_reference (op0
);
2862 op1
= avoid_constant_pool_reference (op1
);
2863 if (GET_CODE (op0
) == CONST_VECTOR
2864 && GET_CODE (op1
) == CONST_VECTOR
)
2866 rtvec v
= rtvec_alloc (n_elts
);
2869 for (i
= 0; i
< n_elts
; i
++)
2870 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
2871 ? CONST_VECTOR_ELT (op0
, i
)
2872 : CONST_VECTOR_ELT (op1
, i
));
2873 return gen_rtx_CONST_VECTOR (mode
, v
);
2885 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2886 Return 0 if no simplifications is possible. */
2888 simplify_subreg (enum machine_mode outermode
, rtx op
,
2889 enum machine_mode innermode
, unsigned int byte
)
2891 /* Little bit of sanity checking. */
2892 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2893 || innermode
== BLKmode
|| outermode
== BLKmode
)
2896 if (GET_MODE (op
) != innermode
2897 && GET_MODE (op
) != VOIDmode
)
2900 if (byte
% GET_MODE_SIZE (outermode
)
2901 || byte
>= GET_MODE_SIZE (innermode
))
2904 if (outermode
== innermode
&& !byte
)
2907 /* Simplify subregs of vector constants. */
2908 if (GET_CODE (op
) == CONST_VECTOR
)
2910 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (innermode
));
2911 const unsigned int offset
= byte
/ elt_size
;
2914 if (GET_MODE_INNER (innermode
) == outermode
)
2916 elt
= CONST_VECTOR_ELT (op
, offset
);
2918 /* ?? We probably don't need this copy_rtx because constants
2919 can be shared. ?? */
2921 return copy_rtx (elt
);
2923 else if (GET_MODE_INNER (innermode
) == GET_MODE_INNER (outermode
)
2924 && GET_MODE_SIZE (innermode
) > GET_MODE_SIZE (outermode
))
2926 return (gen_rtx_CONST_VECTOR
2928 gen_rtvec_v (GET_MODE_NUNITS (outermode
),
2929 &CONST_VECTOR_ELT (op
, offset
))));
2931 else if (GET_MODE_CLASS (outermode
) == MODE_INT
2932 && (GET_MODE_SIZE (outermode
) % elt_size
== 0))
2934 /* This happens when the target register size is smaller then
2935 the vector mode, and we synthesize operations with vectors
2936 of elements that are smaller than the register size. */
2937 HOST_WIDE_INT sum
= 0, high
= 0;
2938 unsigned n_elts
= (GET_MODE_SIZE (outermode
) / elt_size
);
2939 unsigned i
= BYTES_BIG_ENDIAN
? offset
: offset
+ n_elts
- 1;
2940 unsigned step
= BYTES_BIG_ENDIAN
? 1 : -1;
2941 int shift
= BITS_PER_UNIT
* elt_size
;
2942 unsigned HOST_WIDE_INT unit_mask
;
2944 unit_mask
= (unsigned HOST_WIDE_INT
) -1
2945 >> (sizeof (HOST_WIDE_INT
) * BITS_PER_UNIT
- shift
);
2947 for (; n_elts
--; i
+= step
)
2949 elt
= CONST_VECTOR_ELT (op
, i
);
2950 if (GET_CODE (elt
) == CONST_DOUBLE
2951 && GET_MODE_CLASS (GET_MODE (elt
)) == MODE_FLOAT
)
2953 elt
= gen_lowpart_common (int_mode_for_mode (GET_MODE (elt
)),
2958 if (GET_CODE (elt
) != CONST_INT
)
2960 /* Avoid overflow. */
2961 if (high
>> (HOST_BITS_PER_WIDE_INT
- shift
))
2963 high
= high
<< shift
| sum
>> (HOST_BITS_PER_WIDE_INT
- shift
);
2964 sum
= (sum
<< shift
) + (INTVAL (elt
) & unit_mask
);
2966 if (GET_MODE_BITSIZE (outermode
) <= HOST_BITS_PER_WIDE_INT
)
2967 return GEN_INT (trunc_int_for_mode (sum
, outermode
));
2968 else if (GET_MODE_BITSIZE (outermode
) == 2* HOST_BITS_PER_WIDE_INT
)
2969 return immed_double_const (sum
, high
, outermode
);
2973 else if (GET_MODE_CLASS (outermode
) == MODE_INT
2974 && (elt_size
% GET_MODE_SIZE (outermode
) == 0))
2976 enum machine_mode new_mode
2977 = int_mode_for_mode (GET_MODE_INNER (innermode
));
2978 int subbyte
= byte
% elt_size
;
2980 op
= simplify_subreg (new_mode
, op
, innermode
, byte
- subbyte
);
2983 return simplify_subreg (outermode
, op
, new_mode
, subbyte
);
2985 else if (GET_MODE_CLASS (outermode
) == MODE_INT
)
2986 /* This shouldn't happen, but let's not do anything stupid. */
2990 /* Attempt to simplify constant to non-SUBREG expression. */
2991 if (CONSTANT_P (op
))
2994 unsigned HOST_WIDE_INT val
= 0;
2996 if (VECTOR_MODE_P (outermode
))
2998 /* Construct a CONST_VECTOR from individual subregs. */
2999 enum machine_mode submode
= GET_MODE_INNER (outermode
);
3000 int subsize
= GET_MODE_UNIT_SIZE (outermode
);
3001 int i
, elts
= GET_MODE_NUNITS (outermode
);
3002 rtvec v
= rtvec_alloc (elts
);
3005 for (i
= 0; i
< elts
; i
++, byte
+= subsize
)
3007 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
3008 /* ??? It would be nice if we could actually make such subregs
3009 on targets that allow such relocations. */
3010 if (byte
>= GET_MODE_SIZE (innermode
))
3011 elt
= CONST0_RTX (submode
);
3013 elt
= simplify_subreg (submode
, op
, innermode
, byte
);
3016 RTVEC_ELT (v
, i
) = elt
;
3018 return gen_rtx_CONST_VECTOR (outermode
, v
);
3021 /* ??? This code is partly redundant with code below, but can handle
3022 the subregs of floats and similar corner cases.
3023 Later it we should move all simplification code here and rewrite
3024 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
3025 using SIMPLIFY_SUBREG. */
3026 if (subreg_lowpart_offset (outermode
, innermode
) == byte
3027 && GET_CODE (op
) != CONST_VECTOR
)
3029 rtx
new = gen_lowpart_if_possible (outermode
, op
);
3034 /* Similar comment as above apply here. */
3035 if (GET_MODE_SIZE (outermode
) == UNITS_PER_WORD
3036 && GET_MODE_SIZE (innermode
) > UNITS_PER_WORD
3037 && GET_MODE_CLASS (outermode
) == MODE_INT
)
3039 rtx
new = constant_subword (op
,
3040 (byte
/ UNITS_PER_WORD
),
3046 if (GET_MODE_CLASS (outermode
) != MODE_INT
3047 && GET_MODE_CLASS (outermode
) != MODE_CC
)
3049 enum machine_mode new_mode
= int_mode_for_mode (outermode
);
3051 if (new_mode
!= innermode
|| byte
!= 0)
3053 op
= simplify_subreg (new_mode
, op
, innermode
, byte
);
3056 return simplify_subreg (outermode
, op
, new_mode
, 0);
3060 offset
= byte
* BITS_PER_UNIT
;
3061 switch (GET_CODE (op
))
3064 if (GET_MODE (op
) != VOIDmode
)
3067 /* We can't handle this case yet. */
3068 if (GET_MODE_BITSIZE (outermode
) >= HOST_BITS_PER_WIDE_INT
)
3071 part
= offset
>= HOST_BITS_PER_WIDE_INT
;
3072 if ((BITS_PER_WORD
> HOST_BITS_PER_WIDE_INT
3073 && BYTES_BIG_ENDIAN
)
3074 || (BITS_PER_WORD
<= HOST_BITS_PER_WIDE_INT
3075 && WORDS_BIG_ENDIAN
))
3077 val
= part
? CONST_DOUBLE_HIGH (op
) : CONST_DOUBLE_LOW (op
);
3078 offset
%= HOST_BITS_PER_WIDE_INT
;
3080 /* We've already picked the word we want from a double, so
3081 pretend this is actually an integer. */
3082 innermode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
3086 if (GET_CODE (op
) == CONST_INT
)
3089 /* We don't handle synthesizing of non-integral constants yet. */
3090 if (GET_MODE_CLASS (outermode
) != MODE_INT
)
3093 if (BYTES_BIG_ENDIAN
|| WORDS_BIG_ENDIAN
)
3095 if (WORDS_BIG_ENDIAN
)
3096 offset
= (GET_MODE_BITSIZE (innermode
)
3097 - GET_MODE_BITSIZE (outermode
) - offset
);
3098 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
3099 && GET_MODE_SIZE (outermode
) < UNITS_PER_WORD
)
3100 offset
= (offset
+ BITS_PER_WORD
- GET_MODE_BITSIZE (outermode
)
3101 - 2 * (offset
% BITS_PER_WORD
));
3104 if (offset
>= HOST_BITS_PER_WIDE_INT
)
3105 return ((HOST_WIDE_INT
) val
< 0) ? constm1_rtx
: const0_rtx
;
3109 if (GET_MODE_BITSIZE (outermode
) < HOST_BITS_PER_WIDE_INT
)
3110 val
= trunc_int_for_mode (val
, outermode
);
3111 return GEN_INT (val
);
3118 /* Changing mode twice with SUBREG => just change it once,
3119 or not at all if changing back op starting mode. */
3120 if (GET_CODE (op
) == SUBREG
)
3122 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3123 int final_offset
= byte
+ SUBREG_BYTE (op
);
3126 if (outermode
== innermostmode
3127 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3128 return SUBREG_REG (op
);
3130 /* The SUBREG_BYTE represents offset, as if the value were stored
3131 in memory. Irritating exception is paradoxical subreg, where
3132 we define SUBREG_BYTE to be 0. On big endian machines, this
3133 value should be negative. For a moment, undo this exception. */
3134 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3136 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3137 if (WORDS_BIG_ENDIAN
)
3138 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3139 if (BYTES_BIG_ENDIAN
)
3140 final_offset
+= difference
% UNITS_PER_WORD
;
3142 if (SUBREG_BYTE (op
) == 0
3143 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3145 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3146 if (WORDS_BIG_ENDIAN
)
3147 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3148 if (BYTES_BIG_ENDIAN
)
3149 final_offset
+= difference
% UNITS_PER_WORD
;
3152 /* See whether resulting subreg will be paradoxical. */
3153 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3155 /* In nonparadoxical subregs we can't handle negative offsets. */
3156 if (final_offset
< 0)
3158 /* Bail out in case resulting subreg would be incorrect. */
3159 if (final_offset
% GET_MODE_SIZE (outermode
)
3160 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3166 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3168 /* In paradoxical subreg, see if we are still looking on lower part.
3169 If so, our SUBREG_BYTE will be 0. */
3170 if (WORDS_BIG_ENDIAN
)
3171 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3172 if (BYTES_BIG_ENDIAN
)
3173 offset
+= difference
% UNITS_PER_WORD
;
3174 if (offset
== final_offset
)
3180 /* Recurse for further possible simplifications. */
3181 new = simplify_subreg (outermode
, SUBREG_REG (op
),
3182 GET_MODE (SUBREG_REG (op
)),
3186 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3189 /* SUBREG of a hard register => just change the register number
3190 and/or mode. If the hard register is not valid in that mode,
3191 suppress this simplification. If the hard register is the stack,
3192 frame, or argument pointer, leave this as a SUBREG. */
3195 && (! REG_FUNCTION_VALUE_P (op
)
3196 || ! rtx_equal_function_value_matters
)
3197 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3198 #ifdef CANNOT_CHANGE_MODE_CLASS
3199 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3200 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3201 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3203 && ((reload_completed
&& !frame_pointer_needed
)
3204 || (REGNO (op
) != FRAME_POINTER_REGNUM
3205 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3206 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3209 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3210 && REGNO (op
) != ARG_POINTER_REGNUM
3212 && REGNO (op
) != STACK_POINTER_REGNUM
3213 && subreg_offset_representable_p (REGNO (op
), innermode
,
3216 rtx tem
= gen_rtx_SUBREG (outermode
, op
, byte
);
3217 int final_regno
= subreg_hard_regno (tem
, 0);
3219 /* ??? We do allow it if the current REG is not valid for
3220 its mode. This is a kludge to work around how float/complex
3221 arguments are passed on 32-bit SPARC and should be fixed. */
3222 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3223 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
3225 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3227 /* Propagate original regno. We don't have any way to specify
3228 the offset inside original regno, so do so only for lowpart.
3229 The information is used only by alias analysis that can not
3230 grog partial register anyway. */
3232 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3233 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3238 /* If we have a SUBREG of a register that we are replacing and we are
3239 replacing it with a MEM, make a new MEM and try replacing the
3240 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3241 or if we would be widening it. */
3243 if (GET_CODE (op
) == MEM
3244 && ! mode_dependent_address_p (XEXP (op
, 0))
3245 /* Allow splitting of volatile memory references in case we don't
3246 have instruction to move the whole thing. */
3247 && (! MEM_VOLATILE_P (op
)
3248 || ! have_insn_for (SET
, innermode
))
3249 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3250 return adjust_address_nv (op
, outermode
, byte
);
3252 /* Handle complex values represented as CONCAT
3253 of real and imaginary part. */
3254 if (GET_CODE (op
) == CONCAT
)
3256 int is_realpart
= byte
< GET_MODE_UNIT_SIZE (innermode
);
3257 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
3258 unsigned int final_offset
;
3261 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
3262 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3265 /* We can at least simplify it by referring directly to the relevant part. */
3266 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3271 /* Make a SUBREG operation or equivalent if it folds. */
3274 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
3275 enum machine_mode innermode
, unsigned int byte
)
3278 /* Little bit of sanity checking. */
3279 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3280 || innermode
== BLKmode
|| outermode
== BLKmode
)
3283 if (GET_MODE (op
) != innermode
3284 && GET_MODE (op
) != VOIDmode
)
3287 if (byte
% GET_MODE_SIZE (outermode
)
3288 || byte
>= GET_MODE_SIZE (innermode
))
3291 if (GET_CODE (op
) == QUEUED
)
3294 new = simplify_subreg (outermode
, op
, innermode
, byte
);
3298 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
3301 return gen_rtx_SUBREG (outermode
, op
, byte
);
3303 /* Simplify X, an rtx expression.
3305 Return the simplified expression or NULL if no simplifications
3308 This is the preferred entry point into the simplification routines;
3309 however, we still allow passes to call the more specific routines.
3311 Right now GCC has three (yes, three) major bodies of RTL simplification
3312 code that need to be unified.
3314 1. fold_rtx in cse.c. This code uses various CSE specific
3315 information to aid in RTL simplification.
3317 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3318 it uses combine specific information to aid in RTL
3321 3. The routines in this file.
3324 Long term we want to only have one body of simplification code; to
3325 get to that state I recommend the following steps:
3327 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3328 which are not pass dependent state into these routines.
3330 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3331 use this routine whenever possible.
3333 3. Allow for pass dependent state to be provided to these
3334 routines and add simplifications based on the pass dependent
3335 state. Remove code from cse.c & combine.c that becomes
3338 It will take time, but ultimately the compiler will be easier to
3339 maintain and improve. It's totally silly that when we add a
3340 simplification that it needs to be added to 4 places (3 for RTL
3341 simplification and 1 for tree simplification. */
3344 simplify_rtx (rtx x
)
3346 enum rtx_code code
= GET_CODE (x
);
3347 enum machine_mode mode
= GET_MODE (x
);
3350 switch (GET_RTX_CLASS (code
))
3353 return simplify_unary_operation (code
, mode
,
3354 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3356 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3357 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
3359 /* Fall through.... */
3362 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3366 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3367 XEXP (x
, 0), XEXP (x
, 1),
3371 temp
= simplify_relational_operation (code
,
3372 ((GET_MODE (XEXP (x
, 0))
3374 ? GET_MODE (XEXP (x
, 0))
3375 : GET_MODE (XEXP (x
, 1))),
3376 XEXP (x
, 0), XEXP (x
, 1));
3377 #ifdef FLOAT_STORE_FLAG_VALUE
3378 if (temp
!= 0 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
3380 if (temp
== const0_rtx
)
3381 temp
= CONST0_RTX (mode
);
3383 temp
= CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode
),
3391 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
3392 GET_MODE (SUBREG_REG (x
)),
3394 if (code
== CONSTANT_P_RTX
)
3396 if (CONSTANT_P (XEXP (x
, 0)))
3404 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3405 if (GET_CODE (XEXP (x
, 0)) == HIGH
3406 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))