1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
61 enum machine_mode
, rtx
, rtx
);
63 /* Negate a CONST_INT rtx, truncating (because a conversion from a
64 maximally negative number can overflow). */
66 neg_const_int (enum machine_mode mode
, rtx i
)
68 return gen_int_mode (- INTVAL (i
), mode
);
71 /* Test whether expression, X, is an immediate constant that represents
72 the most significant bit of machine mode MODE. */
75 mode_signbit_p (enum machine_mode mode
, rtx x
)
77 unsigned HOST_WIDE_INT val
;
80 if (GET_MODE_CLASS (mode
) != MODE_INT
)
83 width
= GET_MODE_BITSIZE (mode
);
87 if (width
<= HOST_BITS_PER_WIDE_INT
88 && GET_CODE (x
) == CONST_INT
)
90 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x
) == CONST_DOUBLE
92 && CONST_DOUBLE_LOW (x
) == 0)
94 val
= CONST_DOUBLE_HIGH (x
);
95 width
-= HOST_BITS_PER_WIDE_INT
;
100 if (width
< HOST_BITS_PER_WIDE_INT
)
101 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
102 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
105 /* Make a binary operation by properly ordering the operands and
106 seeing if the expression folds. */
109 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
114 /* Put complex operands first and constants second if commutative. */
115 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
116 && swap_commutative_operands_p (op0
, op1
))
117 tem
= op0
, op0
= op1
, op1
= tem
;
119 /* If this simplifies, do it. */
120 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
124 /* Handle addition and subtraction specially. Otherwise, just form
127 if (code
== PLUS
|| code
== MINUS
)
129 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
134 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
137 /* If X is a MEM referencing the constant pool, return the real value.
138 Otherwise return X. */
140 avoid_constant_pool_reference (rtx x
)
143 enum machine_mode cmode
;
145 switch (GET_CODE (x
))
151 /* Handle float extensions of constant pool references. */
153 c
= avoid_constant_pool_reference (tmp
);
154 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
158 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
159 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
169 /* Call target hook to avoid the effects of -fpic etc.... */
170 addr
= targetm
.delegitimize_address (addr
);
172 if (GET_CODE (addr
) == LO_SUM
)
173 addr
= XEXP (addr
, 1);
175 if (GET_CODE (addr
) != SYMBOL_REF
176 || ! CONSTANT_POOL_ADDRESS_P (addr
))
179 c
= get_pool_constant (addr
);
180 cmode
= get_pool_mode (addr
);
182 /* If we're accessing the constant in a different mode than it was
183 originally stored, attempt to fix that up via subreg simplifications.
184 If that fails we have no choice but to return the original memory. */
185 if (cmode
!= GET_MODE (x
))
187 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
194 /* Make a unary operation by first seeing if it folds and otherwise making
195 the specified operation. */
198 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
199 enum machine_mode op_mode
)
203 /* If this simplifies, use it. */
204 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
207 return gen_rtx_fmt_e (code
, mode
, op
);
210 /* Likewise for ternary operations. */
213 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
214 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
218 /* If this simplifies, use it. */
219 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
223 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
226 /* Likewise, for relational operations.
227 CMP_MODE specifies mode comparison is done in. */
230 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
231 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
235 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
239 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
242 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
243 resulting RTX. Return a new RTX which is as simplified as possible. */
246 simplify_replace_rtx (rtx x
, rtx old_rtx
, rtx new_rtx
)
248 enum rtx_code code
= GET_CODE (x
);
249 enum machine_mode mode
= GET_MODE (x
);
250 enum machine_mode op_mode
;
253 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
254 to build a new expression substituting recursively. If we can't do
255 anything, return our input. */
260 switch (GET_RTX_CLASS (code
))
264 op_mode
= GET_MODE (op0
);
265 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
266 if (op0
== XEXP (x
, 0))
268 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
272 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
273 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
274 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
276 return simplify_gen_binary (code
, mode
, op0
, op1
);
279 case RTX_COMM_COMPARE
:
282 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
283 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
284 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
285 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
287 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
290 case RTX_BITFIELD_OPS
:
292 op_mode
= GET_MODE (op0
);
293 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
294 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
295 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
296 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
298 if (op_mode
== VOIDmode
)
299 op_mode
= GET_MODE (op0
);
300 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
303 /* The only case we try to handle is a SUBREG. */
306 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
307 if (op0
== SUBREG_REG (x
))
309 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
310 GET_MODE (SUBREG_REG (x
)),
312 return op0
? op0
: x
;
319 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
320 if (op0
== XEXP (x
, 0))
322 return replace_equiv_address_nv (x
, op0
);
324 else if (code
== LO_SUM
)
326 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
327 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
329 /* (lo_sum (high x) x) -> x */
330 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
333 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
335 return gen_rtx_LO_SUM (mode
, op0
, op1
);
337 else if (code
== REG
)
339 if (REG_P (old_rtx
) && REGNO (x
) == REGNO (old_rtx
))
350 /* Try to simplify a unary operation CODE whose output mode is to be
351 MODE with input operand OP whose mode was originally OP_MODE.
352 Return zero if no simplification can be made. */
354 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
355 rtx op
, enum machine_mode op_mode
)
357 unsigned int width
= GET_MODE_BITSIZE (mode
);
358 rtx trueop
= avoid_constant_pool_reference (op
);
360 if (code
== VEC_DUPLICATE
)
362 gcc_assert (VECTOR_MODE_P (mode
));
363 if (GET_MODE (trueop
) != VOIDmode
)
365 if (!VECTOR_MODE_P (GET_MODE (trueop
)))
366 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (trueop
));
368 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
369 (GET_MODE (trueop
)));
371 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
372 || GET_CODE (trueop
) == CONST_VECTOR
)
374 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
375 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
376 rtvec v
= rtvec_alloc (n_elts
);
379 if (GET_CODE (trueop
) != CONST_VECTOR
)
380 for (i
= 0; i
< n_elts
; i
++)
381 RTVEC_ELT (v
, i
) = trueop
;
384 enum machine_mode inmode
= GET_MODE (trueop
);
385 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
386 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
388 gcc_assert (in_n_elts
< n_elts
);
389 gcc_assert ((n_elts
% in_n_elts
) == 0);
390 for (i
= 0; i
< n_elts
; i
++)
391 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
393 return gen_rtx_CONST_VECTOR (mode
, v
);
396 else if (GET_CODE (op
) == CONST
)
397 return simplify_unary_operation (code
, mode
, XEXP (op
, 0), op_mode
);
399 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
401 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
402 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
403 enum machine_mode opmode
= GET_MODE (trueop
);
404 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
405 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
406 rtvec v
= rtvec_alloc (n_elts
);
409 gcc_assert (op_n_elts
== n_elts
);
410 for (i
= 0; i
< n_elts
; i
++)
412 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
413 CONST_VECTOR_ELT (trueop
, i
),
414 GET_MODE_INNER (opmode
));
417 RTVEC_ELT (v
, i
) = x
;
419 return gen_rtx_CONST_VECTOR (mode
, v
);
422 /* The order of these tests is critical so that, for example, we don't
423 check the wrong mode (input vs. output) for a conversion operation,
424 such as FIX. At some point, this should be simplified. */
426 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
427 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
429 HOST_WIDE_INT hv
, lv
;
432 if (GET_CODE (trueop
) == CONST_INT
)
433 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
435 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
437 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
438 d
= real_value_truncate (mode
, d
);
439 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
441 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
442 && (GET_CODE (trueop
) == CONST_DOUBLE
443 || GET_CODE (trueop
) == CONST_INT
))
445 HOST_WIDE_INT hv
, lv
;
448 if (GET_CODE (trueop
) == CONST_INT
)
449 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
451 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
453 if (op_mode
== VOIDmode
)
455 /* We don't know how to interpret negative-looking numbers in
456 this case, so don't try to fold those. */
460 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
463 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
465 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
466 d
= real_value_truncate (mode
, d
);
467 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
470 if (GET_CODE (trueop
) == CONST_INT
471 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
473 HOST_WIDE_INT arg0
= INTVAL (trueop
);
487 val
= (arg0
>= 0 ? arg0
: - arg0
);
491 /* Don't use ffs here. Instead, get low order bit and then its
492 number. If arg0 is zero, this will return 0, as desired. */
493 arg0
&= GET_MODE_MASK (mode
);
494 val
= exact_log2 (arg0
& (- arg0
)) + 1;
498 arg0
&= GET_MODE_MASK (mode
);
499 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
502 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
506 arg0
&= GET_MODE_MASK (mode
);
509 /* Even if the value at zero is undefined, we have to come
510 up with some replacement. Seems good enough. */
511 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
512 val
= GET_MODE_BITSIZE (mode
);
515 val
= exact_log2 (arg0
& -arg0
);
519 arg0
&= GET_MODE_MASK (mode
);
522 val
++, arg0
&= arg0
- 1;
526 arg0
&= GET_MODE_MASK (mode
);
529 val
++, arg0
&= arg0
- 1;
538 /* When zero-extending a CONST_INT, we need to know its
540 gcc_assert (op_mode
!= VOIDmode
);
541 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
543 /* If we were really extending the mode,
544 we would have to distinguish between zero-extension
545 and sign-extension. */
546 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
549 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
550 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
556 if (op_mode
== VOIDmode
)
558 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
560 /* If we were really extending the mode,
561 we would have to distinguish between zero-extension
562 and sign-extension. */
563 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
566 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
569 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
571 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
572 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
589 val
= trunc_int_for_mode (val
, mode
);
591 return GEN_INT (val
);
594 /* We can do some operations on integer CONST_DOUBLEs. Also allow
595 for a DImode operation on a CONST_INT. */
596 else if (GET_MODE (trueop
) == VOIDmode
597 && width
<= HOST_BITS_PER_WIDE_INT
* 2
598 && (GET_CODE (trueop
) == CONST_DOUBLE
599 || GET_CODE (trueop
) == CONST_INT
))
601 unsigned HOST_WIDE_INT l1
, lv
;
602 HOST_WIDE_INT h1
, hv
;
604 if (GET_CODE (trueop
) == CONST_DOUBLE
)
605 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
607 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
617 neg_double (l1
, h1
, &lv
, &hv
);
622 neg_double (l1
, h1
, &lv
, &hv
);
634 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
637 lv
= exact_log2 (l1
& -l1
) + 1;
643 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
644 - HOST_BITS_PER_WIDE_INT
;
646 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
647 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
648 lv
= GET_MODE_BITSIZE (mode
);
654 lv
= exact_log2 (l1
& -l1
);
656 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
657 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
658 lv
= GET_MODE_BITSIZE (mode
);
681 /* This is just a change-of-mode, so do nothing. */
686 gcc_assert (op_mode
!= VOIDmode
);
688 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
692 lv
= l1
& GET_MODE_MASK (op_mode
);
696 if (op_mode
== VOIDmode
697 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
701 lv
= l1
& GET_MODE_MASK (op_mode
);
702 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
703 && (lv
& ((HOST_WIDE_INT
) 1
704 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
705 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
707 hv
= HWI_SIGN_EXTEND (lv
);
718 return immed_double_const (lv
, hv
, mode
);
721 else if (GET_CODE (trueop
) == CONST_DOUBLE
722 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
724 REAL_VALUE_TYPE d
, t
;
725 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
730 if (HONOR_SNANS (mode
) && real_isnan (&d
))
732 real_sqrt (&t
, mode
, &d
);
736 d
= REAL_VALUE_ABS (d
);
739 d
= REAL_VALUE_NEGATE (d
);
742 d
= real_value_truncate (mode
, d
);
745 /* All this does is change the mode. */
748 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
755 real_to_target (tmp
, &d
, GET_MODE (trueop
));
756 for (i
= 0; i
< 4; i
++)
758 real_from_target (&d
, tmp
, mode
);
763 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
766 else if (GET_CODE (trueop
) == CONST_DOUBLE
767 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
768 && GET_MODE_CLASS (mode
) == MODE_INT
769 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
771 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
772 operators are intentionally left unspecified (to ease implementation
773 by target backends), for consistency, this routine implements the
774 same semantics for constant folding as used by the middle-end. */
776 HOST_WIDE_INT xh
, xl
, th
, tl
;
777 REAL_VALUE_TYPE x
, t
;
778 REAL_VALUE_FROM_CONST_DOUBLE (x
, trueop
);
782 if (REAL_VALUE_ISNAN (x
))
785 /* Test against the signed upper bound. */
786 if (width
> HOST_BITS_PER_WIDE_INT
)
788 th
= ((unsigned HOST_WIDE_INT
) 1
789 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
795 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
797 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
798 if (REAL_VALUES_LESS (t
, x
))
805 /* Test against the signed lower bound. */
806 if (width
> HOST_BITS_PER_WIDE_INT
)
808 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
814 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
816 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
817 if (REAL_VALUES_LESS (x
, t
))
823 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
827 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
830 /* Test against the unsigned upper bound. */
831 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
836 else if (width
>= HOST_BITS_PER_WIDE_INT
)
838 th
= ((unsigned HOST_WIDE_INT
) 1
839 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
845 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
847 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
848 if (REAL_VALUES_LESS (t
, x
))
855 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
861 return immed_double_const (xl
, xh
, mode
);
864 /* This was formerly used only for non-IEEE float.
865 eggert@twinsun.com says it is safe for IEEE also. */
868 enum rtx_code reversed
;
871 /* There are some simplifications we can do even if the operands
876 /* (not (not X)) == X. */
877 if (GET_CODE (op
) == NOT
)
880 /* (not (eq X Y)) == (ne X Y), etc. */
881 if (COMPARISON_P (op
)
882 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
883 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
885 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
886 XEXP (op
, 0), XEXP (op
, 1));
888 /* (not (plus X -1)) can become (neg X). */
889 if (GET_CODE (op
) == PLUS
890 && XEXP (op
, 1) == constm1_rtx
)
891 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
893 /* Similarly, (not (neg X)) is (plus X -1). */
894 if (GET_CODE (op
) == NEG
)
895 return plus_constant (XEXP (op
, 0), -1);
897 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
898 if (GET_CODE (op
) == XOR
899 && GET_CODE (XEXP (op
, 1)) == CONST_INT
900 && (temp
= simplify_unary_operation (NOT
, mode
,
903 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
905 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
906 if (GET_CODE (op
) == PLUS
907 && GET_CODE (XEXP (op
, 1)) == CONST_INT
908 && mode_signbit_p (mode
, XEXP (op
, 1))
909 && (temp
= simplify_unary_operation (NOT
, mode
,
912 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
916 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
917 operands other than 1, but that is not valid. We could do a
918 similar simplification for (not (lshiftrt C X)) where C is
919 just the sign bit, but this doesn't seem common enough to
921 if (GET_CODE (op
) == ASHIFT
922 && XEXP (op
, 0) == const1_rtx
)
924 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
925 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
928 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
929 by reversing the comparison code if valid. */
930 if (STORE_FLAG_VALUE
== -1
932 && (reversed
= reversed_comparison_code (op
, NULL_RTX
))
934 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
935 XEXP (op
, 0), XEXP (op
, 1));
937 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
938 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
939 so we can perform the above simplification. */
941 if (STORE_FLAG_VALUE
== -1
942 && GET_CODE (op
) == ASHIFTRT
943 && GET_CODE (XEXP (op
, 1)) == CONST_INT
944 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
945 return simplify_gen_relational (GE
, mode
, VOIDmode
,
946 XEXP (op
, 0), const0_rtx
);
951 /* (neg (neg X)) == X. */
952 if (GET_CODE (op
) == NEG
)
955 /* (neg (plus X 1)) can become (not X). */
956 if (GET_CODE (op
) == PLUS
957 && XEXP (op
, 1) == const1_rtx
)
958 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
960 /* Similarly, (neg (not X)) is (plus X 1). */
961 if (GET_CODE (op
) == NOT
)
962 return plus_constant (XEXP (op
, 0), 1);
964 /* (neg (minus X Y)) can become (minus Y X). This transformation
965 isn't safe for modes with signed zeros, since if X and Y are
966 both +0, (minus Y X) is the same as (minus X Y). If the
967 rounding mode is towards +infinity (or -infinity) then the two
968 expressions will be rounded differently. */
969 if (GET_CODE (op
) == MINUS
970 && !HONOR_SIGNED_ZEROS (mode
)
971 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
972 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1),
975 if (GET_CODE (op
) == PLUS
976 && !HONOR_SIGNED_ZEROS (mode
)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
979 /* (neg (plus A C)) is simplified to (minus -C A). */
980 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
981 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
983 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1),
986 return simplify_gen_binary (MINUS
, mode
, temp
,
990 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
991 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
992 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
995 /* (neg (mult A B)) becomes (mult (neg A) B).
996 This works even for floating-point values. */
997 if (GET_CODE (op
) == MULT
998 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1000 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1001 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
1004 /* NEG commutes with ASHIFT since it is multiplication. Only do
1005 this if we can then eliminate the NEG (e.g., if the operand
1007 if (GET_CODE (op
) == ASHIFT
)
1009 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0),
1012 return simplify_gen_binary (ASHIFT
, mode
, temp
,
1016 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1017 C is equal to the width of MODE minus 1. */
1018 if (GET_CODE (op
) == ASHIFTRT
1019 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1020 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
1021 return simplify_gen_binary (LSHIFTRT
, mode
,
1022 XEXP (op
, 0), XEXP (op
, 1));
1024 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1025 C is equal to the width of MODE minus 1. */
1026 if (GET_CODE (op
) == LSHIFTRT
1027 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1028 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
1029 return simplify_gen_binary (ASHIFTRT
, mode
,
1030 XEXP (op
, 0), XEXP (op
, 1));
1035 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1036 becomes just the MINUS if its mode is MODE. This allows
1037 folding switch statements on machines using casesi (such as
1039 if (GET_CODE (op
) == TRUNCATE
1040 && GET_MODE (XEXP (op
, 0)) == mode
1041 && GET_CODE (XEXP (op
, 0)) == MINUS
1042 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1043 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1044 return XEXP (op
, 0);
1046 /* Check for a sign extension of a subreg of a promoted
1047 variable, where the promotion is sign-extended, and the
1048 target mode is the same as the variable's promotion. */
1049 if (GET_CODE (op
) == SUBREG
1050 && SUBREG_PROMOTED_VAR_P (op
)
1051 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1052 && GET_MODE (XEXP (op
, 0)) == mode
)
1053 return XEXP (op
, 0);
1055 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1056 if (! POINTERS_EXTEND_UNSIGNED
1057 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1059 || (GET_CODE (op
) == SUBREG
1060 && REG_P (SUBREG_REG (op
))
1061 && REG_POINTER (SUBREG_REG (op
))
1062 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1063 return convert_memory_address (Pmode
, op
);
1068 /* Check for a zero extension of a subreg of a promoted
1069 variable, where the promotion is zero-extended, and the
1070 target mode is the same as the variable's promotion. */
1071 if (GET_CODE (op
) == SUBREG
1072 && SUBREG_PROMOTED_VAR_P (op
)
1073 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1074 && GET_MODE (XEXP (op
, 0)) == mode
)
1075 return XEXP (op
, 0);
1077 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1078 if (POINTERS_EXTEND_UNSIGNED
> 0
1079 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1081 || (GET_CODE (op
) == SUBREG
1082 && REG_P (SUBREG_REG (op
))
1083 && REG_POINTER (SUBREG_REG (op
))
1084 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1085 return convert_memory_address (Pmode
, op
);
1097 /* Subroutine of simplify_binary_operation to simplify a commutative,
1098 associative binary operation CODE with result mode MODE, operating
1099 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1100 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1101 canonicalization is possible. */
1104 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1109 /* Linearize the operator to the left. */
1110 if (GET_CODE (op1
) == code
)
1112 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1113 if (GET_CODE (op0
) == code
)
1115 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1116 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1119 /* "a op (b op c)" becomes "(b op c) op a". */
1120 if (! swap_commutative_operands_p (op1
, op0
))
1121 return simplify_gen_binary (code
, mode
, op1
, op0
);
1128 if (GET_CODE (op0
) == code
)
1130 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1131 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1133 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1134 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1137 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1138 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1139 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1140 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1142 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1144 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1145 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1146 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1147 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1149 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1155 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1156 and OP1. Return 0 if no simplification is possible.
1158 Don't use this for relational operations such as EQ or LT.
1159 Use simplify_relational_operation instead. */
1161 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1164 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
1166 unsigned int width
= GET_MODE_BITSIZE (mode
);
1167 rtx trueop0
, trueop1
;
1170 /* Relational operations don't work here. We must know the mode
1171 of the operands in order to do the comparison correctly.
1172 Assuming a full word can give incorrect results.
1173 Consider comparing 128 with -128 in QImode. */
1174 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1175 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1177 /* Make sure the constant is second. */
1178 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1179 && swap_commutative_operands_p (op0
, op1
))
1181 tem
= op0
, op0
= op1
, op1
= tem
;
1184 trueop0
= avoid_constant_pool_reference (op0
);
1185 trueop1
= avoid_constant_pool_reference (op1
);
1187 if (VECTOR_MODE_P (mode
)
1188 && code
!= VEC_CONCAT
1189 && GET_CODE (trueop0
) == CONST_VECTOR
1190 && GET_CODE (trueop1
) == CONST_VECTOR
)
1192 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1193 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1194 enum machine_mode op0mode
= GET_MODE (trueop0
);
1195 int op0_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op0mode
));
1196 unsigned op0_n_elts
= (GET_MODE_SIZE (op0mode
) / op0_elt_size
);
1197 enum machine_mode op1mode
= GET_MODE (trueop1
);
1198 int op1_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op1mode
));
1199 unsigned op1_n_elts
= (GET_MODE_SIZE (op1mode
) / op1_elt_size
);
1200 rtvec v
= rtvec_alloc (n_elts
);
1203 gcc_assert (op0_n_elts
== n_elts
);
1204 gcc_assert (op1_n_elts
== n_elts
);
1205 for (i
= 0; i
< n_elts
; i
++)
1207 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
1208 CONST_VECTOR_ELT (trueop0
, i
),
1209 CONST_VECTOR_ELT (trueop1
, i
));
1212 RTVEC_ELT (v
, i
) = x
;
1215 return gen_rtx_CONST_VECTOR (mode
, v
);
1218 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1219 && GET_CODE (trueop0
) == CONST_DOUBLE
1220 && GET_CODE (trueop1
) == CONST_DOUBLE
1221 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
1232 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
1234 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
1236 for (i
= 0; i
< 4; i
++)
1253 real_from_target (&r
, tmp0
, mode
);
1254 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
1258 REAL_VALUE_TYPE f0
, f1
, value
;
1260 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
1261 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
1262 f0
= real_value_truncate (mode
, f0
);
1263 f1
= real_value_truncate (mode
, f1
);
1265 if (HONOR_SNANS (mode
)
1266 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
1270 && REAL_VALUES_EQUAL (f1
, dconst0
)
1271 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
1274 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
1275 && flag_trapping_math
1276 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
1278 int s0
= REAL_VALUE_NEGATIVE (f0
);
1279 int s1
= REAL_VALUE_NEGATIVE (f1
);
1284 /* Inf + -Inf = NaN plus exception. */
1289 /* Inf - Inf = NaN plus exception. */
1294 /* Inf / Inf = NaN plus exception. */
1301 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
1302 && flag_trapping_math
1303 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
1304 || (REAL_VALUE_ISINF (f1
)
1305 && REAL_VALUES_EQUAL (f0
, dconst0
))))
1306 /* Inf * 0 = NaN plus exception. */
1309 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
1311 value
= real_value_truncate (mode
, value
);
1312 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
1316 /* We can fold some multi-word operations. */
1317 if (GET_MODE_CLASS (mode
) == MODE_INT
1318 && width
== HOST_BITS_PER_WIDE_INT
* 2
1319 && (GET_CODE (trueop0
) == CONST_DOUBLE
1320 || GET_CODE (trueop0
) == CONST_INT
)
1321 && (GET_CODE (trueop1
) == CONST_DOUBLE
1322 || GET_CODE (trueop1
) == CONST_INT
))
1324 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
1325 HOST_WIDE_INT h1
, h2
, hv
, ht
;
1327 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1328 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
1330 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
1332 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1333 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
1335 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
1340 /* A - B == A + (-B). */
1341 neg_double (l2
, h2
, &lv
, &hv
);
1344 /* Fall through.... */
1347 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1351 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1355 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
1356 &lv
, &hv
, <
, &ht
))
1361 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
1362 <
, &ht
, &lv
, &hv
))
1367 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
1368 &lv
, &hv
, <
, &ht
))
1373 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
1374 <
, &ht
, &lv
, &hv
))
1379 lv
= l1
& l2
, hv
= h1
& h2
;
1383 lv
= l1
| l2
, hv
= h1
| h2
;
1387 lv
= l1
^ l2
, hv
= h1
^ h2
;
1393 && ((unsigned HOST_WIDE_INT
) l1
1394 < (unsigned HOST_WIDE_INT
) l2
)))
1403 && ((unsigned HOST_WIDE_INT
) l1
1404 > (unsigned HOST_WIDE_INT
) l2
)))
1411 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1413 && ((unsigned HOST_WIDE_INT
) l1
1414 < (unsigned HOST_WIDE_INT
) l2
)))
1421 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1423 && ((unsigned HOST_WIDE_INT
) l1
1424 > (unsigned HOST_WIDE_INT
) l2
)))
1430 case LSHIFTRT
: case ASHIFTRT
:
1432 case ROTATE
: case ROTATERT
:
1433 if (SHIFT_COUNT_TRUNCATED
)
1434 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1436 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1439 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1440 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1442 else if (code
== ASHIFT
)
1443 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1444 else if (code
== ROTATE
)
1445 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1446 else /* code == ROTATERT */
1447 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1454 return immed_double_const (lv
, hv
, mode
);
1457 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1458 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1460 /* Even if we can't compute a constant result,
1461 there are some cases worth simplifying. */
1466 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1467 when x is NaN, infinite, or finite and nonzero. They aren't
1468 when x is -0 and the rounding mode is not towards -infinity,
1469 since (-0) + 0 is then 0. */
1470 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1473 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1474 transformations are safe even for IEEE. */
1475 if (GET_CODE (op0
) == NEG
)
1476 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1477 else if (GET_CODE (op1
) == NEG
)
1478 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1480 /* (~a) + 1 -> -a */
1481 if (INTEGRAL_MODE_P (mode
)
1482 && GET_CODE (op0
) == NOT
1483 && trueop1
== const1_rtx
)
1484 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1486 /* Handle both-operands-constant cases. We can only add
1487 CONST_INTs to constants since the sum of relocatable symbols
1488 can't be handled by most assemblers. Don't add CONST_INT
1489 to CONST_INT since overflow won't be computed properly if wider
1490 than HOST_BITS_PER_WIDE_INT. */
1492 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1493 && GET_CODE (op1
) == CONST_INT
)
1494 return plus_constant (op0
, INTVAL (op1
));
1495 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1496 && GET_CODE (op0
) == CONST_INT
)
1497 return plus_constant (op1
, INTVAL (op0
));
1499 /* See if this is something like X * C - X or vice versa or
1500 if the multiplication is written as a shift. If so, we can
1501 distribute and make a new multiply, shift, or maybe just
1502 have X (if C is 2 in the example above). But don't make
1503 something more expensive than we had before. */
1505 if (! FLOAT_MODE_P (mode
))
1507 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1508 rtx lhs
= op0
, rhs
= op1
;
1510 if (GET_CODE (lhs
) == NEG
)
1511 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1512 else if (GET_CODE (lhs
) == MULT
1513 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1515 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1517 else if (GET_CODE (lhs
) == ASHIFT
1518 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1519 && INTVAL (XEXP (lhs
, 1)) >= 0
1520 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1522 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1523 lhs
= XEXP (lhs
, 0);
1526 if (GET_CODE (rhs
) == NEG
)
1527 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1528 else if (GET_CODE (rhs
) == MULT
1529 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1531 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1533 else if (GET_CODE (rhs
) == ASHIFT
1534 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1535 && INTVAL (XEXP (rhs
, 1)) >= 0
1536 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1538 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1539 rhs
= XEXP (rhs
, 0);
1542 if (rtx_equal_p (lhs
, rhs
))
1544 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1545 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1546 GEN_INT (coeff0
+ coeff1
));
1547 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1552 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1553 if ((GET_CODE (op1
) == CONST_INT
1554 || GET_CODE (op1
) == CONST_DOUBLE
)
1555 && GET_CODE (op0
) == XOR
1556 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1557 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1558 && mode_signbit_p (mode
, op1
))
1559 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1560 simplify_gen_binary (XOR
, mode
, op1
,
1563 /* If one of the operands is a PLUS or a MINUS, see if we can
1564 simplify this by the associative law.
1565 Don't use the associative law for floating point.
1566 The inaccuracy makes it nonassociative,
1567 and subtle programs can break if operations are associated. */
1569 if (INTEGRAL_MODE_P (mode
)
1570 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1571 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1572 || (GET_CODE (op0
) == CONST
1573 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1574 || (GET_CODE (op1
) == CONST
1575 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1576 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1579 /* Reassociate floating point addition only when the user
1580 specifies unsafe math optimizations. */
1581 if (FLOAT_MODE_P (mode
)
1582 && flag_unsafe_math_optimizations
)
1584 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1592 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1593 using cc0, in which case we want to leave it as a COMPARE
1594 so we can distinguish it from a register-register-copy.
1596 In IEEE floating point, x-0 is not the same as x. */
1598 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1599 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1600 && trueop1
== CONST0_RTX (mode
))
1604 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1605 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1606 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1607 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1609 rtx xop00
= XEXP (op0
, 0);
1610 rtx xop10
= XEXP (op1
, 0);
1613 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1615 if (REG_P (xop00
) && REG_P (xop10
)
1616 && GET_MODE (xop00
) == GET_MODE (xop10
)
1617 && REGNO (xop00
) == REGNO (xop10
)
1618 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1619 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1626 /* We can't assume x-x is 0 even with non-IEEE floating point,
1627 but since it is zero except in very strange circumstances, we
1628 will treat it as zero with -funsafe-math-optimizations. */
1629 if (rtx_equal_p (trueop0
, trueop1
)
1630 && ! side_effects_p (op0
)
1631 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1632 return CONST0_RTX (mode
);
1634 /* Change subtraction from zero into negation. (0 - x) is the
1635 same as -x when x is NaN, infinite, or finite and nonzero.
1636 But if the mode has signed zeros, and does not round towards
1637 -infinity, then 0 - 0 is 0, not -0. */
1638 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1639 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1641 /* (-1 - a) is ~a. */
1642 if (trueop0
== constm1_rtx
)
1643 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1645 /* Subtracting 0 has no effect unless the mode has signed zeros
1646 and supports rounding towards -infinity. In such a case,
1648 if (!(HONOR_SIGNED_ZEROS (mode
)
1649 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1650 && trueop1
== CONST0_RTX (mode
))
1653 /* See if this is something like X * C - X or vice versa or
1654 if the multiplication is written as a shift. If so, we can
1655 distribute and make a new multiply, shift, or maybe just
1656 have X (if C is 2 in the example above). But don't make
1657 something more expensive than we had before. */
1659 if (! FLOAT_MODE_P (mode
))
1661 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1662 rtx lhs
= op0
, rhs
= op1
;
1664 if (GET_CODE (lhs
) == NEG
)
1665 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1666 else if (GET_CODE (lhs
) == MULT
1667 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1669 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1671 else if (GET_CODE (lhs
) == ASHIFT
1672 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1673 && INTVAL (XEXP (lhs
, 1)) >= 0
1674 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1676 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1677 lhs
= XEXP (lhs
, 0);
1680 if (GET_CODE (rhs
) == NEG
)
1681 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1682 else if (GET_CODE (rhs
) == MULT
1683 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1685 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1687 else if (GET_CODE (rhs
) == ASHIFT
1688 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1689 && INTVAL (XEXP (rhs
, 1)) >= 0
1690 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1692 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1693 rhs
= XEXP (rhs
, 0);
1696 if (rtx_equal_p (lhs
, rhs
))
1698 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1699 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1700 GEN_INT (coeff0
- coeff1
));
1701 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1706 /* (a - (-b)) -> (a + b). True even for IEEE. */
1707 if (GET_CODE (op1
) == NEG
)
1708 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1710 /* (-x - c) may be simplified as (-c - x). */
1711 if (GET_CODE (op0
) == NEG
1712 && (GET_CODE (op1
) == CONST_INT
1713 || GET_CODE (op1
) == CONST_DOUBLE
))
1715 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1717 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1720 /* If one of the operands is a PLUS or a MINUS, see if we can
1721 simplify this by the associative law.
1722 Don't use the associative law for floating point.
1723 The inaccuracy makes it nonassociative,
1724 and subtle programs can break if operations are associated. */
1726 if (INTEGRAL_MODE_P (mode
)
1727 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1728 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1729 || (GET_CODE (op0
) == CONST
1730 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1731 || (GET_CODE (op1
) == CONST
1732 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1733 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1736 /* Don't let a relocatable value get a negative coeff. */
1737 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1738 return simplify_gen_binary (PLUS
, mode
,
1740 neg_const_int (mode
, op1
));
1742 /* (x - (x & y)) -> (x & ~y) */
1743 if (GET_CODE (op1
) == AND
)
1745 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1747 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1748 GET_MODE (XEXP (op1
, 1)));
1749 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1751 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1753 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1754 GET_MODE (XEXP (op1
, 0)));
1755 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1761 if (trueop1
== constm1_rtx
)
1762 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1764 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1765 x is NaN, since x * 0 is then also NaN. Nor is it valid
1766 when the mode has signed zeros, since multiplying a negative
1767 number by 0 will give -0, not 0. */
1768 if (!HONOR_NANS (mode
)
1769 && !HONOR_SIGNED_ZEROS (mode
)
1770 && trueop1
== CONST0_RTX (mode
)
1771 && ! side_effects_p (op0
))
1774 /* In IEEE floating point, x*1 is not equivalent to x for
1776 if (!HONOR_SNANS (mode
)
1777 && trueop1
== CONST1_RTX (mode
))
1780 /* Convert multiply by constant power of two into shift unless
1781 we are still generating RTL. This test is a kludge. */
1782 if (GET_CODE (trueop1
) == CONST_INT
1783 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1784 /* If the mode is larger than the host word size, and the
1785 uppermost bit is set, then this isn't a power of two due
1786 to implicit sign extension. */
1787 && (width
<= HOST_BITS_PER_WIDE_INT
1788 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1789 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1791 /* x*2 is x+x and x*(-1) is -x */
1792 if (GET_CODE (trueop1
) == CONST_DOUBLE
1793 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1794 && GET_MODE (op0
) == mode
)
1797 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1799 if (REAL_VALUES_EQUAL (d
, dconst2
))
1800 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1802 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1803 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1806 /* Reassociate multiplication, but for floating point MULTs
1807 only when the user specifies unsafe math optimizations. */
1808 if (! FLOAT_MODE_P (mode
)
1809 || flag_unsafe_math_optimizations
)
1811 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1818 if (trueop1
== const0_rtx
)
1820 if (GET_CODE (trueop1
) == CONST_INT
1821 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1822 == GET_MODE_MASK (mode
)))
1824 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1826 /* A | (~A) -> -1 */
1827 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1828 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1829 && ! side_effects_p (op0
)
1830 && GET_MODE_CLASS (mode
) != MODE_CC
)
1832 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1838 if (trueop1
== const0_rtx
)
1840 if (GET_CODE (trueop1
) == CONST_INT
1841 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1842 == GET_MODE_MASK (mode
)))
1843 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1844 if (trueop0
== trueop1
1845 && ! side_effects_p (op0
)
1846 && GET_MODE_CLASS (mode
) != MODE_CC
)
1849 /* Canonicalize XOR of the most significant bit to PLUS. */
1850 if ((GET_CODE (op1
) == CONST_INT
1851 || GET_CODE (op1
) == CONST_DOUBLE
)
1852 && mode_signbit_p (mode
, op1
))
1853 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
1854 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1855 if ((GET_CODE (op1
) == CONST_INT
1856 || GET_CODE (op1
) == CONST_DOUBLE
)
1857 && GET_CODE (op0
) == PLUS
1858 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1859 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1860 && mode_signbit_p (mode
, XEXP (op0
, 1)))
1861 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1862 simplify_gen_binary (XOR
, mode
, op1
,
1865 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1871 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1873 /* If we are turning off bits already known off in OP0, we need
1875 if (GET_CODE (trueop1
) == CONST_INT
1876 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1877 && (nonzero_bits (trueop0
, mode
) & ~INTVAL (trueop1
)) == 0)
1879 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1880 && GET_MODE_CLASS (mode
) != MODE_CC
)
1883 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1884 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1885 && ! side_effects_p (op0
)
1886 && GET_MODE_CLASS (mode
) != MODE_CC
)
1888 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1889 ((A & N) + B) & M -> (A + B) & M
1890 Similarly if (N & M) == 0,
1891 ((A | N) + B) & M -> (A + B) & M
1892 and for - instead of + and/or ^ instead of |. */
1893 if (GET_CODE (trueop1
) == CONST_INT
1894 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1895 && ~INTVAL (trueop1
)
1896 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
1897 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
1902 pmop
[0] = XEXP (op0
, 0);
1903 pmop
[1] = XEXP (op0
, 1);
1905 for (which
= 0; which
< 2; which
++)
1908 switch (GET_CODE (tem
))
1911 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
1912 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
1913 == INTVAL (trueop1
))
1914 pmop
[which
] = XEXP (tem
, 0);
1918 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
1919 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
1920 pmop
[which
] = XEXP (tem
, 0);
1927 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
1929 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
1931 return simplify_gen_binary (code
, mode
, tem
, op1
);
1934 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1940 /* 0/x is 0 (or x&0 if x has side-effects). */
1941 if (trueop0
== const0_rtx
)
1942 return side_effects_p (op1
)
1943 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
1946 if (trueop1
== const1_rtx
)
1948 /* Handle narrowing UDIV. */
1949 rtx x
= gen_lowpart_common (mode
, op0
);
1952 if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1953 return gen_lowpart_SUBREG (mode
, op0
);
1956 /* Convert divide by power of two into shift. */
1957 if (GET_CODE (trueop1
) == CONST_INT
1958 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1959 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (arg1
));
1963 /* Handle floating point and integers separately. */
1964 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1966 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1967 safe for modes with NaNs, since 0.0 / 0.0 will then be
1968 NaN rather than 0.0. Nor is it safe for modes with signed
1969 zeros, since dividing 0 by a negative number gives -0.0 */
1970 if (trueop0
== CONST0_RTX (mode
)
1971 && !HONOR_NANS (mode
)
1972 && !HONOR_SIGNED_ZEROS (mode
)
1973 && ! side_effects_p (op1
))
1976 if (trueop1
== CONST1_RTX (mode
)
1977 && !HONOR_SNANS (mode
))
1980 if (GET_CODE (trueop1
) == CONST_DOUBLE
1981 && trueop1
!= CONST0_RTX (mode
))
1984 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1987 if (REAL_VALUES_EQUAL (d
, dconstm1
)
1988 && !HONOR_SNANS (mode
))
1989 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1991 /* Change FP division by a constant into multiplication.
1992 Only do this with -funsafe-math-optimizations. */
1993 if (flag_unsafe_math_optimizations
1994 && !REAL_VALUES_EQUAL (d
, dconst0
))
1996 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
1997 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1998 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2004 /* 0/x is 0 (or x&0 if x has side-effects). */
2005 if (trueop0
== const0_rtx
)
2006 return side_effects_p (op1
)
2007 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
2010 if (trueop1
== const1_rtx
)
2012 /* Handle narrowing DIV. */
2013 rtx x
= gen_lowpart_common (mode
, op0
);
2016 if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
2017 return gen_lowpart_SUBREG (mode
, op0
);
2021 if (trueop1
== constm1_rtx
)
2023 rtx x
= gen_lowpart_common (mode
, op0
);
2025 x
= (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
2026 ? gen_lowpart_SUBREG (mode
, op0
) : op0
;
2027 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2033 /* 0%x is 0 (or x&0 if x has side-effects). */
2034 if (trueop0
== const0_rtx
)
2035 return side_effects_p (op1
)
2036 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
2038 /* x%1 is 0 (of x&0 if x has side-effects). */
2039 if (trueop1
== const1_rtx
)
2040 return side_effects_p (op0
)
2041 ? simplify_gen_binary (AND
, mode
, op0
, const0_rtx
)
2043 /* Implement modulus by power of two as AND. */
2044 if (GET_CODE (trueop1
) == CONST_INT
2045 && exact_log2 (INTVAL (trueop1
)) > 0)
2046 return simplify_gen_binary (AND
, mode
, op0
,
2047 GEN_INT (INTVAL (op1
) - 1));
2051 /* 0%x is 0 (or x&0 if x has side-effects). */
2052 if (trueop0
== const0_rtx
)
2053 return side_effects_p (op1
)
2054 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
2056 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2057 if (trueop1
== const1_rtx
|| trueop1
== constm1_rtx
)
2058 return side_effects_p (op0
)
2059 ? simplify_gen_binary (AND
, mode
, op0
, const0_rtx
)
2066 /* Rotating ~0 always results in ~0. */
2067 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
2068 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2069 && ! side_effects_p (op1
))
2072 /* Fall through.... */
2076 if (trueop1
== const0_rtx
)
2078 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
2083 if (width
<= HOST_BITS_PER_WIDE_INT
2084 && GET_CODE (trueop1
) == CONST_INT
2085 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2086 && ! side_effects_p (op0
))
2088 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2090 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2096 if (width
<= HOST_BITS_PER_WIDE_INT
2097 && GET_CODE (trueop1
) == CONST_INT
2098 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2099 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2100 && ! side_effects_p (op0
))
2102 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2104 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2110 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
2112 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2114 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2120 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2122 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2124 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2133 /* ??? There are simplifications that can be done. */
2137 if (!VECTOR_MODE_P (mode
))
2139 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2140 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2141 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2142 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2143 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
2145 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2146 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2151 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2152 gcc_assert (GET_MODE_INNER (mode
)
2153 == GET_MODE_INNER (GET_MODE (trueop0
)));
2154 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2156 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2158 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2159 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2160 rtvec v
= rtvec_alloc (n_elts
);
2163 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2164 for (i
= 0; i
< n_elts
; i
++)
2166 rtx x
= XVECEXP (trueop1
, 0, i
);
2168 gcc_assert (GET_CODE (x
) == CONST_INT
);
2169 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2173 return gen_rtx_CONST_VECTOR (mode
, v
);
2179 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2180 ? GET_MODE (trueop0
)
2181 : GET_MODE_INNER (mode
));
2182 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2183 ? GET_MODE (trueop1
)
2184 : GET_MODE_INNER (mode
));
2186 gcc_assert (VECTOR_MODE_P (mode
));
2187 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2188 == GET_MODE_SIZE (mode
));
2190 if (VECTOR_MODE_P (op0_mode
))
2191 gcc_assert (GET_MODE_INNER (mode
)
2192 == GET_MODE_INNER (op0_mode
));
2194 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2196 if (VECTOR_MODE_P (op1_mode
))
2197 gcc_assert (GET_MODE_INNER (mode
)
2198 == GET_MODE_INNER (op1_mode
));
2200 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2202 if ((GET_CODE (trueop0
) == CONST_VECTOR
2203 || GET_CODE (trueop0
) == CONST_INT
2204 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2205 && (GET_CODE (trueop1
) == CONST_VECTOR
2206 || GET_CODE (trueop1
) == CONST_INT
2207 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2209 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2210 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2211 rtvec v
= rtvec_alloc (n_elts
);
2213 unsigned in_n_elts
= 1;
2215 if (VECTOR_MODE_P (op0_mode
))
2216 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2217 for (i
= 0; i
< n_elts
; i
++)
2221 if (!VECTOR_MODE_P (op0_mode
))
2222 RTVEC_ELT (v
, i
) = trueop0
;
2224 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2228 if (!VECTOR_MODE_P (op1_mode
))
2229 RTVEC_ELT (v
, i
) = trueop1
;
2231 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2236 return gen_rtx_CONST_VECTOR (mode
, v
);
2248 /* Get the integer argument values in two forms:
2249 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2251 arg0
= INTVAL (trueop0
);
2252 arg1
= INTVAL (trueop1
);
2254 if (width
< HOST_BITS_PER_WIDE_INT
)
2256 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2257 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2260 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2261 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2264 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2265 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2273 /* Compute the value of the arithmetic. */
2278 val
= arg0s
+ arg1s
;
2282 val
= arg0s
- arg1s
;
2286 val
= arg0s
* arg1s
;
2291 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2294 val
= arg0s
/ arg1s
;
2299 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2302 val
= arg0s
% arg1s
;
2307 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2310 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2315 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2318 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2336 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
2337 value is in range. We can't return any old value for out-of-range
2338 arguments because either the middle-end (via shift_truncation_mask)
2339 or the back-end might be relying on target-specific knowledge.
2340 Nor can we rely on shift_truncation_mask, since the shift might
2341 not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
2342 if (SHIFT_COUNT_TRUNCATED
)
2343 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
2344 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
2347 val
= (code
== ASHIFT
2348 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
2349 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
2351 /* Sign-extend the result for arithmetic right shifts. */
2352 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
2353 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
2361 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2362 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2370 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2371 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2375 /* Do nothing here. */
2379 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2383 val
= ((unsigned HOST_WIDE_INT
) arg0
2384 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2388 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2392 val
= ((unsigned HOST_WIDE_INT
) arg0
2393 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2400 /* ??? There are simplifications that can be done. */
2407 val
= trunc_int_for_mode (val
, mode
);
2409 return GEN_INT (val
);
2412 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2415 Rather than test for specific case, we do this by a brute-force method
2416 and do all possible simplifications until no more changes occur. Then
2417 we rebuild the operation.
2419 If FORCE is true, then always generate the rtx. This is used to
2420 canonicalize stuff emitted from simplify_gen_binary. Note that this
2421 can still fail if the rtx is too complex. It won't fail just because
2422 the result is not 'simpler' than the input, however. */
2424 struct simplify_plus_minus_op_data
2431 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2433 const struct simplify_plus_minus_op_data
*d1
= p1
;
2434 const struct simplify_plus_minus_op_data
*d2
= p2
;
2436 return (commutative_operand_precedence (d2
->op
)
2437 - commutative_operand_precedence (d1
->op
));
2441 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2444 struct simplify_plus_minus_op_data ops
[8];
2446 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2450 memset (ops
, 0, sizeof ops
);
2452 /* Set up the two operands and then expand them until nothing has been
2453 changed. If we run out of room in our array, give up; this should
2454 almost never happen. */
2459 ops
[1].neg
= (code
== MINUS
);
2465 for (i
= 0; i
< n_ops
; i
++)
2467 rtx this_op
= ops
[i
].op
;
2468 int this_neg
= ops
[i
].neg
;
2469 enum rtx_code this_code
= GET_CODE (this_op
);
2478 ops
[n_ops
].op
= XEXP (this_op
, 1);
2479 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2482 ops
[i
].op
= XEXP (this_op
, 0);
2488 ops
[i
].op
= XEXP (this_op
, 0);
2489 ops
[i
].neg
= ! this_neg
;
2495 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2496 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2497 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2499 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2500 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2501 ops
[n_ops
].neg
= this_neg
;
2509 /* ~a -> (-a - 1) */
2512 ops
[n_ops
].op
= constm1_rtx
;
2513 ops
[n_ops
++].neg
= this_neg
;
2514 ops
[i
].op
= XEXP (this_op
, 0);
2515 ops
[i
].neg
= !this_neg
;
2523 ops
[i
].op
= neg_const_int (mode
, this_op
);
2536 /* If we only have two operands, we can't do anything. */
2537 if (n_ops
<= 2 && !force
)
2540 /* Count the number of CONSTs we didn't split above. */
2541 for (i
= 0; i
< n_ops
; i
++)
2542 if (GET_CODE (ops
[i
].op
) == CONST
)
2545 /* Now simplify each pair of operands until nothing changes. The first
2546 time through just simplify constants against each other. */
2553 for (i
= 0; i
< n_ops
- 1; i
++)
2554 for (j
= i
+ 1; j
< n_ops
; j
++)
2556 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2557 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2559 if (lhs
!= 0 && rhs
!= 0
2560 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2562 enum rtx_code ncode
= PLUS
;
2568 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2570 else if (swap_commutative_operands_p (lhs
, rhs
))
2571 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2573 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2575 /* Reject "simplifications" that just wrap the two
2576 arguments in a CONST. Failure to do so can result
2577 in infinite recursion with simplify_binary_operation
2578 when it calls us to simplify CONST operations. */
2580 && ! (GET_CODE (tem
) == CONST
2581 && GET_CODE (XEXP (tem
, 0)) == ncode
2582 && XEXP (XEXP (tem
, 0), 0) == lhs
2583 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2584 /* Don't allow -x + -1 -> ~x simplifications in the
2585 first pass. This allows us the chance to combine
2586 the -1 with other constants. */
2588 && GET_CODE (tem
) == NOT
2589 && XEXP (tem
, 0) == rhs
))
2592 if (GET_CODE (tem
) == NEG
)
2593 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2594 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2595 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2599 ops
[j
].op
= NULL_RTX
;
2609 /* Pack all the operands to the lower-numbered entries. */
2610 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2615 /* Sort the operations based on swap_commutative_operands_p. */
2616 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2618 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2620 && GET_CODE (ops
[1].op
) == CONST_INT
2621 && CONSTANT_P (ops
[0].op
)
2623 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
2625 /* We suppressed creation of trivial CONST expressions in the
2626 combination loop to avoid recursion. Create one manually now.
2627 The combination loop should have ensured that there is exactly
2628 one CONST_INT, and the sort will have ensured that it is last
2629 in the array and that any other constant will be next-to-last. */
2632 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2633 && CONSTANT_P (ops
[n_ops
- 2].op
))
2635 rtx value
= ops
[n_ops
- 1].op
;
2636 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2637 value
= neg_const_int (mode
, value
);
2638 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2642 /* Count the number of CONSTs that we generated. */
2644 for (i
= 0; i
< n_ops
; i
++)
2645 if (GET_CODE (ops
[i
].op
) == CONST
)
2648 /* Give up if we didn't reduce the number of operands we had. Make
2649 sure we count a CONST as two operands. If we have the same
2650 number of operands, but have made more CONSTs than before, this
2651 is also an improvement, so accept it. */
2653 && (n_ops
+ n_consts
> input_ops
2654 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2657 /* Put a non-negated operand first, if possible. */
2659 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2662 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
2671 /* Now make the result by performing the requested operations. */
2673 for (i
= 1; i
< n_ops
; i
++)
2674 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2675 mode
, result
, ops
[i
].op
);
2680 /* Like simplify_binary_operation except used for relational operators.
2681 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2682 not also be VOIDmode.
2684 CMP_MODE specifies in which mode the comparison is done in, so it is
2685 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2686 the operands or, if both are VOIDmode, the operands are compared in
2687 "infinite precision". */
2689 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2690 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2692 rtx tem
, trueop0
, trueop1
;
2694 if (cmp_mode
== VOIDmode
)
2695 cmp_mode
= GET_MODE (op0
);
2696 if (cmp_mode
== VOIDmode
)
2697 cmp_mode
= GET_MODE (op1
);
2699 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
2702 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2704 if (tem
== const0_rtx
)
2705 return CONST0_RTX (mode
);
2706 #ifdef FLOAT_STORE_FLAG_VALUE
2708 REAL_VALUE_TYPE val
;
2709 val
= FLOAT_STORE_FLAG_VALUE (mode
);
2710 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
2716 if (VECTOR_MODE_P (mode
))
2718 if (tem
== const0_rtx
)
2719 return CONST0_RTX (mode
);
2720 #ifdef VECTOR_STORE_FLAG_VALUE
2725 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
2726 if (val
== NULL_RTX
)
2728 if (val
== const1_rtx
)
2729 return CONST1_RTX (mode
);
2731 units
= GET_MODE_NUNITS (mode
);
2732 v
= rtvec_alloc (units
);
2733 for (i
= 0; i
< units
; i
++)
2734 RTVEC_ELT (v
, i
) = val
;
2735 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
2745 /* For the following tests, ensure const0_rtx is op1. */
2746 if (swap_commutative_operands_p (op0
, op1
)
2747 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
2748 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
2750 /* If op0 is a compare, extract the comparison arguments from it. */
2751 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2752 return simplify_relational_operation (code
, mode
, VOIDmode
,
2753 XEXP (op0
, 0), XEXP (op0
, 1));
2755 if (mode
== VOIDmode
2756 || GET_MODE_CLASS (cmp_mode
) == MODE_CC
2760 trueop0
= avoid_constant_pool_reference (op0
);
2761 trueop1
= avoid_constant_pool_reference (op1
);
2762 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
2766 /* This part of simplify_relational_operation is only used when CMP_MODE
2767 is not in class MODE_CC (i.e. it is a real comparison).
2769 MODE is the mode of the result, while CMP_MODE specifies in which
2770 mode the comparison is done in, so it is the mode of the operands. */
2772 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
2773 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2775 if (GET_CODE (op1
) == CONST_INT
)
2777 if (INTVAL (op1
) == 0 && COMPARISON_P (op0
))
2779 /* If op0 is a comparison, extract the comparison arguments form it. */
2782 if (GET_MODE (op0
) == cmp_mode
)
2783 return simplify_rtx (op0
);
2785 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
2786 XEXP (op0
, 0), XEXP (op0
, 1));
2788 else if (code
== EQ
)
2790 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
2791 if (new_code
!= UNKNOWN
)
2792 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
2793 XEXP (op0
, 0), XEXP (op0
, 1));
2801 /* Check if the given comparison (done in the given MODE) is actually a
2802 tautology or a contradiction.
2803 If no simplification is possible, this function returns zero.
2804 Otherwise, it returns either const_true_rtx or const0_rtx. */
2807 simplify_const_relational_operation (enum rtx_code code
,
2808 enum machine_mode mode
,
2811 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2816 gcc_assert (mode
!= VOIDmode
2817 || (GET_MODE (op0
) == VOIDmode
2818 && GET_MODE (op1
) == VOIDmode
));
2820 /* If op0 is a compare, extract the comparison arguments from it. */
2821 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2822 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2824 /* We can't simplify MODE_CC values since we don't know what the
2825 actual comparison is. */
2826 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2829 /* Make sure the constant is second. */
2830 if (swap_commutative_operands_p (op0
, op1
))
2832 tem
= op0
, op0
= op1
, op1
= tem
;
2833 code
= swap_condition (code
);
2836 trueop0
= avoid_constant_pool_reference (op0
);
2837 trueop1
= avoid_constant_pool_reference (op1
);
2839 /* For integer comparisons of A and B maybe we can simplify A - B and can
2840 then simplify a comparison of that with zero. If A and B are both either
2841 a register or a CONST_INT, this can't help; testing for these cases will
2842 prevent infinite recursion here and speed things up.
2844 If CODE is an unsigned comparison, then we can never do this optimization,
2845 because it gives an incorrect result if the subtraction wraps around zero.
2846 ANSI C defines unsigned operations such that they never overflow, and
2847 thus such cases can not be ignored; but we cannot do it even for
2848 signed comparisons for languages such as Java, so test flag_wrapv. */
2850 if (!flag_wrapv
&& INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2851 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
2852 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
2853 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2854 /* We cannot do this for == or != if tem is a nonzero address. */
2855 && ((code
!= EQ
&& code
!= NE
) || ! nonzero_address_p (tem
))
2856 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2857 return simplify_const_relational_operation (signed_condition (code
),
2858 mode
, tem
, const0_rtx
);
2860 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2861 return const_true_rtx
;
2863 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2866 /* For modes without NaNs, if the two operands are equal, we know the
2867 result except if they have side-effects. */
2868 if (! HONOR_NANS (GET_MODE (trueop0
))
2869 && rtx_equal_p (trueop0
, trueop1
)
2870 && ! side_effects_p (trueop0
))
2871 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2873 /* If the operands are floating-point constants, see if we can fold
2875 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2876 && GET_CODE (trueop1
) == CONST_DOUBLE
2877 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2879 REAL_VALUE_TYPE d0
, d1
;
2881 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2882 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2884 /* Comparisons are unordered iff at least one of the values is NaN. */
2885 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2895 return const_true_rtx
;
2908 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2909 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2910 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2913 /* Otherwise, see if the operands are both integers. */
2914 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2915 && (GET_CODE (trueop0
) == CONST_DOUBLE
2916 || GET_CODE (trueop0
) == CONST_INT
)
2917 && (GET_CODE (trueop1
) == CONST_DOUBLE
2918 || GET_CODE (trueop1
) == CONST_INT
))
2920 int width
= GET_MODE_BITSIZE (mode
);
2921 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2922 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2924 /* Get the two words comprising each integer constant. */
2925 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2927 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2928 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2932 l0u
= l0s
= INTVAL (trueop0
);
2933 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2936 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2938 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2939 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2943 l1u
= l1s
= INTVAL (trueop1
);
2944 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2947 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2948 we have to sign or zero-extend the values. */
2949 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2951 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2952 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2954 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2955 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2957 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2958 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2960 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2961 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2963 equal
= (h0u
== h1u
&& l0u
== l1u
);
2964 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2965 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2966 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2967 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2970 /* Otherwise, there are some code-specific tests we can make. */
2973 /* Optimize comparisons with upper and lower bounds. */
2974 if (SCALAR_INT_MODE_P (mode
)
2975 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
2988 get_mode_bounds (mode
, sign
, mode
, &mmin
, &mmax
);
2995 /* x >= min is always true. */
2996 if (rtx_equal_p (trueop1
, mmin
))
2997 tem
= const_true_rtx
;
3003 /* x <= max is always true. */
3004 if (rtx_equal_p (trueop1
, mmax
))
3005 tem
= const_true_rtx
;
3010 /* x > max is always false. */
3011 if (rtx_equal_p (trueop1
, mmax
))
3017 /* x < min is always false. */
3018 if (rtx_equal_p (trueop1
, mmin
))
3025 if (tem
== const0_rtx
3026 || tem
== const_true_rtx
)
3033 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3038 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3039 return const_true_rtx
;
3043 /* Optimize abs(x) < 0.0. */
3044 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
3046 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3048 if (GET_CODE (tem
) == ABS
)
3054 /* Optimize abs(x) >= 0.0. */
3055 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
3057 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3059 if (GET_CODE (tem
) == ABS
)
3060 return const_true_rtx
;
3065 /* Optimize ! (abs(x) < 0.0). */
3066 if (trueop1
== CONST0_RTX (mode
))
3068 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3070 if (GET_CODE (tem
) == ABS
)
3071 return const_true_rtx
;
3082 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3088 return equal
? const_true_rtx
: const0_rtx
;
3091 return ! equal
? const_true_rtx
: const0_rtx
;
3094 return op0lt
? const_true_rtx
: const0_rtx
;
3097 return op1lt
? const_true_rtx
: const0_rtx
;
3099 return op0ltu
? const_true_rtx
: const0_rtx
;
3101 return op1ltu
? const_true_rtx
: const0_rtx
;
3104 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
3107 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
3109 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
3111 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
3113 return const_true_rtx
;
3121 /* Simplify CODE, an operation with result mode MODE and three operands,
3122 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3123 a constant. Return 0 if no simplifications is possible. */
3126 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
3127 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
3130 unsigned int width
= GET_MODE_BITSIZE (mode
);
3132 /* VOIDmode means "infinite" precision. */
3134 width
= HOST_BITS_PER_WIDE_INT
;
3140 if (GET_CODE (op0
) == CONST_INT
3141 && GET_CODE (op1
) == CONST_INT
3142 && GET_CODE (op2
) == CONST_INT
3143 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
3144 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
3146 /* Extracting a bit-field from a constant */
3147 HOST_WIDE_INT val
= INTVAL (op0
);
3149 if (BITS_BIG_ENDIAN
)
3150 val
>>= (GET_MODE_BITSIZE (op0_mode
)
3151 - INTVAL (op2
) - INTVAL (op1
));
3153 val
>>= INTVAL (op2
);
3155 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
3157 /* First zero-extend. */
3158 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
3159 /* If desired, propagate sign bit. */
3160 if (code
== SIGN_EXTRACT
3161 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
3162 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
3165 /* Clear the bits that don't belong in our mode,
3166 unless they and our sign bit are all one.
3167 So we get either a reasonable negative value or a reasonable
3168 unsigned value for this mode. */
3169 if (width
< HOST_BITS_PER_WIDE_INT
3170 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
3171 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
3172 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3174 return gen_int_mode (val
, mode
);
3179 if (GET_CODE (op0
) == CONST_INT
)
3180 return op0
!= const0_rtx
? op1
: op2
;
3182 /* Convert c ? a : a into "a". */
3183 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
3186 /* Convert a != b ? a : b into "a". */
3187 if (GET_CODE (op0
) == NE
3188 && ! side_effects_p (op0
)
3189 && ! HONOR_NANS (mode
)
3190 && ! HONOR_SIGNED_ZEROS (mode
)
3191 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3192 && rtx_equal_p (XEXP (op0
, 1), op2
))
3193 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3194 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3197 /* Convert a == b ? a : b into "b". */
3198 if (GET_CODE (op0
) == EQ
3199 && ! side_effects_p (op0
)
3200 && ! HONOR_NANS (mode
)
3201 && ! HONOR_SIGNED_ZEROS (mode
)
3202 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3203 && rtx_equal_p (XEXP (op0
, 1), op2
))
3204 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3205 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3208 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
3210 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
3211 ? GET_MODE (XEXP (op0
, 1))
3212 : GET_MODE (XEXP (op0
, 0)));
3215 /* Look for happy constants in op1 and op2. */
3216 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
3218 HOST_WIDE_INT t
= INTVAL (op1
);
3219 HOST_WIDE_INT f
= INTVAL (op2
);
3221 if (t
== STORE_FLAG_VALUE
&& f
== 0)
3222 code
= GET_CODE (op0
);
3223 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
3226 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
3234 return simplify_gen_relational (code
, mode
, cmp_mode
,
3235 XEXP (op0
, 0), XEXP (op0
, 1));
3238 if (cmp_mode
== VOIDmode
)
3239 cmp_mode
= op0_mode
;
3240 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
3241 cmp_mode
, XEXP (op0
, 0),
3244 /* See if any simplifications were possible. */
3247 if (GET_CODE (temp
) == CONST_INT
)
3248 return temp
== const0_rtx
? op2
: op1
;
3250 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
3256 gcc_assert (GET_MODE (op0
) == mode
);
3257 gcc_assert (GET_MODE (op1
) == mode
);
3258 gcc_assert (VECTOR_MODE_P (mode
));
3259 op2
= avoid_constant_pool_reference (op2
);
3260 if (GET_CODE (op2
) == CONST_INT
)
3262 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3263 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3264 int mask
= (1 << n_elts
) - 1;
3266 if (!(INTVAL (op2
) & mask
))
3268 if ((INTVAL (op2
) & mask
) == mask
)
3271 op0
= avoid_constant_pool_reference (op0
);
3272 op1
= avoid_constant_pool_reference (op1
);
3273 if (GET_CODE (op0
) == CONST_VECTOR
3274 && GET_CODE (op1
) == CONST_VECTOR
)
3276 rtvec v
= rtvec_alloc (n_elts
);
3279 for (i
= 0; i
< n_elts
; i
++)
3280 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
3281 ? CONST_VECTOR_ELT (op0
, i
)
3282 : CONST_VECTOR_ELT (op1
, i
));
3283 return gen_rtx_CONST_VECTOR (mode
, v
);
3295 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3296 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3298 Works by unpacking OP into a collection of 8-bit values
3299 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3300 and then repacking them again for OUTERMODE. */
3303 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
3304 enum machine_mode innermode
, unsigned int byte
)
3306 /* We support up to 512-bit values (for V8DFmode). */
3310 value_mask
= (1 << value_bit
) - 1
3312 unsigned char value
[max_bitsize
/ value_bit
];
3321 rtvec result_v
= NULL
;
3322 enum mode_class outer_class
;
3323 enum machine_mode outer_submode
;
3325 /* Some ports misuse CCmode. */
3326 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
3329 /* Unpack the value. */
3331 if (GET_CODE (op
) == CONST_VECTOR
)
3333 num_elem
= CONST_VECTOR_NUNITS (op
);
3334 elems
= &CONST_VECTOR_ELT (op
, 0);
3335 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
3341 elem_bitsize
= max_bitsize
;
3343 /* If this asserts, it is too complicated; reducing value_bit may help. */
3344 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
3345 /* I don't know how to handle endianness of sub-units. */
3346 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
3348 for (elem
= 0; elem
< num_elem
; elem
++)
3351 rtx el
= elems
[elem
];
3353 /* Vectors are kept in target memory order. (This is probably
3356 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3357 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3359 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3360 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3361 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3362 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3363 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3366 switch (GET_CODE (el
))
3370 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3372 *vp
++ = INTVAL (el
) >> i
;
3373 /* CONST_INTs are always logically sign-extended. */
3374 for (; i
< elem_bitsize
; i
+= value_bit
)
3375 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
3379 if (GET_MODE (el
) == VOIDmode
)
3381 /* If this triggers, someone should have generated a
3382 CONST_INT instead. */
3383 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
3385 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
3386 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
3387 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
3390 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
3393 /* It shouldn't matter what's done here, so fill it with
3395 for (; i
< max_bitsize
; i
+= value_bit
)
3400 long tmp
[max_bitsize
/ 32];
3401 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
3403 gcc_assert (GET_MODE_CLASS (GET_MODE (el
)) == MODE_FLOAT
);
3404 gcc_assert (bitsize
<= elem_bitsize
);
3405 gcc_assert (bitsize
% value_bit
== 0);
3407 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
3410 /* real_to_target produces its result in words affected by
3411 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3412 and use WORDS_BIG_ENDIAN instead; see the documentation
3413 of SUBREG in rtl.texi. */
3414 for (i
= 0; i
< bitsize
; i
+= value_bit
)
3417 if (WORDS_BIG_ENDIAN
)
3418 ibase
= bitsize
- 1 - i
;
3421 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
3424 /* It shouldn't matter what's done here, so fill it with
3426 for (; i
< elem_bitsize
; i
+= value_bit
)
3436 /* Now, pick the right byte to start with. */
3437 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3438 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3439 will already have offset 0. */
3440 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
3442 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
3444 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3445 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3446 byte
= (subword_byte
% UNITS_PER_WORD
3447 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3450 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3451 so if it's become negative it will instead be very large.) */
3452 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
3454 /* Convert from bytes to chunks of size value_bit. */
3455 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
3457 /* Re-pack the value. */
3459 if (VECTOR_MODE_P (outermode
))
3461 num_elem
= GET_MODE_NUNITS (outermode
);
3462 result_v
= rtvec_alloc (num_elem
);
3463 elems
= &RTVEC_ELT (result_v
, 0);
3464 outer_submode
= GET_MODE_INNER (outermode
);
3470 outer_submode
= outermode
;
3473 outer_class
= GET_MODE_CLASS (outer_submode
);
3474 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
3476 gcc_assert (elem_bitsize
% value_bit
== 0);
3477 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
3479 for (elem
= 0; elem
< num_elem
; elem
++)
3483 /* Vectors are stored in target memory order. (This is probably
3486 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3487 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3489 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3490 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3491 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3492 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3493 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3496 switch (outer_class
)
3499 case MODE_PARTIAL_INT
:
3501 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
3504 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3506 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
3507 for (; i
< elem_bitsize
; i
+= value_bit
)
3508 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
3509 << (i
- HOST_BITS_PER_WIDE_INT
));
3511 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3513 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3514 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
3516 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
3523 long tmp
[max_bitsize
/ 32];
3525 /* real_from_target wants its input in words affected by
3526 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3527 and use WORDS_BIG_ENDIAN instead; see the documentation
3528 of SUBREG in rtl.texi. */
3529 for (i
= 0; i
< max_bitsize
/ 32; i
++)
3531 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
3534 if (WORDS_BIG_ENDIAN
)
3535 ibase
= elem_bitsize
- 1 - i
;
3538 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
3541 real_from_target (&r
, tmp
, outer_submode
);
3542 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
3550 if (VECTOR_MODE_P (outermode
))
3551 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
3556 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3557 Return 0 if no simplifications are possible. */
3559 simplify_subreg (enum machine_mode outermode
, rtx op
,
3560 enum machine_mode innermode
, unsigned int byte
)
3562 /* Little bit of sanity checking. */
3563 gcc_assert (innermode
!= VOIDmode
);
3564 gcc_assert (outermode
!= VOIDmode
);
3565 gcc_assert (innermode
!= BLKmode
);
3566 gcc_assert (outermode
!= BLKmode
);
3568 gcc_assert (GET_MODE (op
) == innermode
3569 || GET_MODE (op
) == VOIDmode
);
3571 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
3572 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
3574 if (outermode
== innermode
&& !byte
)
3577 if (GET_CODE (op
) == CONST_INT
3578 || GET_CODE (op
) == CONST_DOUBLE
3579 || GET_CODE (op
) == CONST_VECTOR
)
3580 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
3582 /* Changing mode twice with SUBREG => just change it once,
3583 or not at all if changing back op starting mode. */
3584 if (GET_CODE (op
) == SUBREG
)
3586 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3587 int final_offset
= byte
+ SUBREG_BYTE (op
);
3590 if (outermode
== innermostmode
3591 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3592 return SUBREG_REG (op
);
3594 /* The SUBREG_BYTE represents offset, as if the value were stored
3595 in memory. Irritating exception is paradoxical subreg, where
3596 we define SUBREG_BYTE to be 0. On big endian machines, this
3597 value should be negative. For a moment, undo this exception. */
3598 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3600 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3601 if (WORDS_BIG_ENDIAN
)
3602 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3603 if (BYTES_BIG_ENDIAN
)
3604 final_offset
+= difference
% UNITS_PER_WORD
;
3606 if (SUBREG_BYTE (op
) == 0
3607 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3609 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3610 if (WORDS_BIG_ENDIAN
)
3611 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3612 if (BYTES_BIG_ENDIAN
)
3613 final_offset
+= difference
% UNITS_PER_WORD
;
3616 /* See whether resulting subreg will be paradoxical. */
3617 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3619 /* In nonparadoxical subregs we can't handle negative offsets. */
3620 if (final_offset
< 0)
3622 /* Bail out in case resulting subreg would be incorrect. */
3623 if (final_offset
% GET_MODE_SIZE (outermode
)
3624 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3630 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3632 /* In paradoxical subreg, see if we are still looking on lower part.
3633 If so, our SUBREG_BYTE will be 0. */
3634 if (WORDS_BIG_ENDIAN
)
3635 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3636 if (BYTES_BIG_ENDIAN
)
3637 offset
+= difference
% UNITS_PER_WORD
;
3638 if (offset
== final_offset
)
3644 /* Recurse for further possible simplifications. */
3645 newx
= simplify_subreg (outermode
, SUBREG_REG (op
),
3646 GET_MODE (SUBREG_REG (op
)),
3650 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3653 /* SUBREG of a hard register => just change the register number
3654 and/or mode. If the hard register is not valid in that mode,
3655 suppress this simplification. If the hard register is the stack,
3656 frame, or argument pointer, leave this as a SUBREG. */
3659 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3660 #ifdef CANNOT_CHANGE_MODE_CLASS
3661 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3662 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3663 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3665 && ((reload_completed
&& !frame_pointer_needed
)
3666 || (REGNO (op
) != FRAME_POINTER_REGNUM
3667 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3668 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3671 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3672 && REGNO (op
) != ARG_POINTER_REGNUM
3674 && REGNO (op
) != STACK_POINTER_REGNUM
3675 && subreg_offset_representable_p (REGNO (op
), innermode
,
3678 rtx tem
= gen_rtx_SUBREG (outermode
, op
, byte
);
3679 int final_regno
= subreg_hard_regno (tem
, 0);
3681 /* ??? We do allow it if the current REG is not valid for
3682 its mode. This is a kludge to work around how float/complex
3683 arguments are passed on 32-bit SPARC and should be fixed. */
3684 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3685 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
3687 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3689 /* Propagate original regno. We don't have any way to specify
3690 the offset inside original regno, so do so only for lowpart.
3691 The information is used only by alias analysis that can not
3692 grog partial register anyway. */
3694 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3695 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3700 /* If we have a SUBREG of a register that we are replacing and we are
3701 replacing it with a MEM, make a new MEM and try replacing the
3702 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3703 or if we would be widening it. */
3706 && ! mode_dependent_address_p (XEXP (op
, 0))
3707 /* Allow splitting of volatile memory references in case we don't
3708 have instruction to move the whole thing. */
3709 && (! MEM_VOLATILE_P (op
)
3710 || ! have_insn_for (SET
, innermode
))
3711 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3712 return adjust_address_nv (op
, outermode
, byte
);
3714 /* Handle complex values represented as CONCAT
3715 of real and imaginary part. */
3716 if (GET_CODE (op
) == CONCAT
)
3718 int is_realpart
= byte
< (unsigned int) GET_MODE_UNIT_SIZE (innermode
);
3719 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
3720 unsigned int final_offset
;
3723 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
3724 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3727 /* We can at least simplify it by referring directly to the
3729 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3732 /* Optimize SUBREG truncations of zero and sign extended values. */
3733 if ((GET_CODE (op
) == ZERO_EXTEND
3734 || GET_CODE (op
) == SIGN_EXTEND
)
3735 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
3737 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
3739 /* If we're requesting the lowpart of a zero or sign extension,
3740 there are three possibilities. If the outermode is the same
3741 as the origmode, we can omit both the extension and the subreg.
3742 If the outermode is not larger than the origmode, we can apply
3743 the truncation without the extension. Finally, if the outermode
3744 is larger than the origmode, but both are integer modes, we
3745 can just extend to the appropriate mode. */
3748 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
3749 if (outermode
== origmode
)
3750 return XEXP (op
, 0);
3751 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
3752 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
3753 subreg_lowpart_offset (outermode
,
3755 if (SCALAR_INT_MODE_P (outermode
))
3756 return simplify_gen_unary (GET_CODE (op
), outermode
,
3757 XEXP (op
, 0), origmode
);
3760 /* A SUBREG resulting from a zero extension may fold to zero if
3761 it extracts higher bits that the ZERO_EXTEND's source bits. */
3762 if (GET_CODE (op
) == ZERO_EXTEND
3763 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
3764 return CONST0_RTX (outermode
);
3770 /* Make a SUBREG operation or equivalent if it folds. */
3773 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
3774 enum machine_mode innermode
, unsigned int byte
)
3777 /* Little bit of sanity checking. */
3778 gcc_assert (innermode
!= VOIDmode
);
3779 gcc_assert (outermode
!= VOIDmode
);
3780 gcc_assert (innermode
!= BLKmode
);
3781 gcc_assert (outermode
!= BLKmode
);
3783 gcc_assert (GET_MODE (op
) == innermode
3784 || GET_MODE (op
) == VOIDmode
);
3786 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
3787 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
3789 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
3793 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
3794 || (REG_P (op
) && REGNO (op
) < FIRST_PSEUDO_REGISTER
))
3797 return gen_rtx_SUBREG (outermode
, op
, byte
);
3799 /* Simplify X, an rtx expression.
3801 Return the simplified expression or NULL if no simplifications
3804 This is the preferred entry point into the simplification routines;
3805 however, we still allow passes to call the more specific routines.
3807 Right now GCC has three (yes, three) major bodies of RTL simplification
3808 code that need to be unified.
3810 1. fold_rtx in cse.c. This code uses various CSE specific
3811 information to aid in RTL simplification.
3813 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3814 it uses combine specific information to aid in RTL
3817 3. The routines in this file.
3820 Long term we want to only have one body of simplification code; to
3821 get to that state I recommend the following steps:
3823 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3824 which are not pass dependent state into these routines.
3826 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3827 use this routine whenever possible.
3829 3. Allow for pass dependent state to be provided to these
3830 routines and add simplifications based on the pass dependent
3831 state. Remove code from cse.c & combine.c that becomes
3834 It will take time, but ultimately the compiler will be easier to
3835 maintain and improve. It's totally silly that when we add a
3836 simplification that it needs to be added to 4 places (3 for RTL
3837 simplification and 1 for tree simplification. */
3840 simplify_rtx (rtx x
)
3842 enum rtx_code code
= GET_CODE (x
);
3843 enum machine_mode mode
= GET_MODE (x
);
3845 switch (GET_RTX_CLASS (code
))
3848 return simplify_unary_operation (code
, mode
,
3849 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3850 case RTX_COMM_ARITH
:
3851 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3852 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
3854 /* Fall through.... */
3857 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3860 case RTX_BITFIELD_OPS
:
3861 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3862 XEXP (x
, 0), XEXP (x
, 1),
3866 case RTX_COMM_COMPARE
:
3867 return simplify_relational_operation (code
, mode
,
3868 ((GET_MODE (XEXP (x
, 0))
3870 ? GET_MODE (XEXP (x
, 0))
3871 : GET_MODE (XEXP (x
, 1))),
3877 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
3878 GET_MODE (SUBREG_REG (x
)),
3885 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3886 if (GET_CODE (XEXP (x
, 0)) == HIGH
3887 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))