1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static bool mode_signbit_p (enum machine_mode
, rtx
);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
57 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
59 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
61 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
62 enum machine_mode
, rtx
, rtx
);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
67 neg_const_int (enum machine_mode mode
, rtx i
)
69 return gen_int_mode (- INTVAL (i
), mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (enum machine_mode mode
, rtx x
)
78 unsigned HOST_WIDE_INT val
;
81 if (GET_MODE_CLASS (mode
) != MODE_INT
)
84 width
= GET_MODE_BITSIZE (mode
);
88 if (width
<= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x
) == CONST_INT
)
91 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x
) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x
) == 0)
95 val
= CONST_DOUBLE_HIGH (x
);
96 width
-= HOST_BITS_PER_WIDE_INT
;
101 if (width
< HOST_BITS_PER_WIDE_INT
)
102 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
103 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
110 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0
, op1
))
118 tem
= op0
, op0
= op1
, op1
= tem
;
120 /* If this simplifies, do it. */
121 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
125 /* Handle addition and subtraction specially. Otherwise, just form
128 if (code
== PLUS
|| code
== MINUS
)
130 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
135 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
141 avoid_constant_pool_reference (rtx x
)
144 enum machine_mode cmode
;
146 switch (GET_CODE (x
))
152 /* Handle float extensions of constant pool references. */
154 c
= avoid_constant_pool_reference (tmp
);
155 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
159 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr
= targetm
.delegitimize_address (addr
);
173 if (GET_CODE (addr
) == LO_SUM
)
174 addr
= XEXP (addr
, 1);
176 if (GET_CODE (addr
) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr
))
180 c
= get_pool_constant (addr
);
181 cmode
= get_pool_mode (addr
);
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode
!= GET_MODE (x
))
188 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
199 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
200 enum machine_mode op_mode
)
204 /* If this simplifies, use it. */
205 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
208 return gen_rtx_fmt_e (code
, mode
, op
);
211 /* Likewise for ternary operations. */
214 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
215 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
219 /* If this simplifies, use it. */
220 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
224 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
231 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
232 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
236 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
240 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
243 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
247 simplify_replace_rtx (rtx x
, rtx old_rtx
, rtx new_rtx
)
249 enum rtx_code code
= GET_CODE (x
);
250 enum machine_mode mode
= GET_MODE (x
);
251 enum machine_mode op_mode
;
254 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
261 switch (GET_RTX_CLASS (code
))
265 op_mode
= GET_MODE (op0
);
266 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
267 if (op0
== XEXP (x
, 0))
269 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
273 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
274 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
275 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
277 return simplify_gen_binary (code
, mode
, op0
, op1
);
280 case RTX_COMM_COMPARE
:
283 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
284 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
285 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
286 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
288 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
291 case RTX_BITFIELD_OPS
:
293 op_mode
= GET_MODE (op0
);
294 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
295 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
296 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
297 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
299 if (op_mode
== VOIDmode
)
300 op_mode
= GET_MODE (op0
);
301 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
304 /* The only case we try to handle is a SUBREG. */
307 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
308 if (op0
== SUBREG_REG (x
))
310 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
311 GET_MODE (SUBREG_REG (x
)),
313 return op0
? op0
: x
;
320 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
321 if (op0
== XEXP (x
, 0))
323 return replace_equiv_address_nv (x
, op0
);
325 else if (code
== LO_SUM
)
327 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
328 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
334 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
336 return gen_rtx_LO_SUM (mode
, op0
, op1
);
338 else if (code
== REG
)
340 if (REG_P (old_rtx
) && REGNO (x
) == REGNO (old_rtx
))
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
355 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
356 rtx op
, enum machine_mode op_mode
)
358 unsigned int width
= GET_MODE_BITSIZE (mode
);
359 rtx trueop
= avoid_constant_pool_reference (op
);
361 if (code
== VEC_DUPLICATE
)
363 if (!VECTOR_MODE_P (mode
))
365 if (GET_MODE (trueop
) != VOIDmode
366 && !VECTOR_MODE_P (GET_MODE (trueop
))
367 && GET_MODE_INNER (mode
) != GET_MODE (trueop
))
369 if (GET_MODE (trueop
) != VOIDmode
370 && VECTOR_MODE_P (GET_MODE (trueop
))
371 && GET_MODE_INNER (mode
) != GET_MODE_INNER (GET_MODE (trueop
)))
373 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
374 || GET_CODE (trueop
) == CONST_VECTOR
)
376 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
377 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
378 rtvec v
= rtvec_alloc (n_elts
);
381 if (GET_CODE (trueop
) != CONST_VECTOR
)
382 for (i
= 0; i
< n_elts
; i
++)
383 RTVEC_ELT (v
, i
) = trueop
;
386 enum machine_mode inmode
= GET_MODE (trueop
);
387 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
388 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
390 if (in_n_elts
>= n_elts
|| n_elts
% in_n_elts
)
392 for (i
= 0; i
< n_elts
; i
++)
393 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
395 return gen_rtx_CONST_VECTOR (mode
, v
);
398 else if (GET_CODE (op
) == CONST
)
399 return simplify_unary_operation (code
, mode
, XEXP (op
, 0), op_mode
);
401 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
403 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
404 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
405 enum machine_mode opmode
= GET_MODE (trueop
);
406 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
407 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
408 rtvec v
= rtvec_alloc (n_elts
);
411 if (op_n_elts
!= n_elts
)
414 for (i
= 0; i
< n_elts
; i
++)
416 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
417 CONST_VECTOR_ELT (trueop
, i
),
418 GET_MODE_INNER (opmode
));
421 RTVEC_ELT (v
, i
) = x
;
423 return gen_rtx_CONST_VECTOR (mode
, v
);
426 /* The order of these tests is critical so that, for example, we don't
427 check the wrong mode (input vs. output) for a conversion operation,
428 such as FIX. At some point, this should be simplified. */
430 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
431 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
433 HOST_WIDE_INT hv
, lv
;
436 if (GET_CODE (trueop
) == CONST_INT
)
437 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
439 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
441 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
442 d
= real_value_truncate (mode
, d
);
443 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
445 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
446 && (GET_CODE (trueop
) == CONST_DOUBLE
447 || GET_CODE (trueop
) == CONST_INT
))
449 HOST_WIDE_INT hv
, lv
;
452 if (GET_CODE (trueop
) == CONST_INT
)
453 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
455 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
457 if (op_mode
== VOIDmode
)
459 /* We don't know how to interpret negative-looking numbers in
460 this case, so don't try to fold those. */
464 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
467 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
469 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
470 d
= real_value_truncate (mode
, d
);
471 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
474 if (GET_CODE (trueop
) == CONST_INT
475 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
477 HOST_WIDE_INT arg0
= INTVAL (trueop
);
491 val
= (arg0
>= 0 ? arg0
: - arg0
);
495 /* Don't use ffs here. Instead, get low order bit and then its
496 number. If arg0 is zero, this will return 0, as desired. */
497 arg0
&= GET_MODE_MASK (mode
);
498 val
= exact_log2 (arg0
& (- arg0
)) + 1;
502 arg0
&= GET_MODE_MASK (mode
);
503 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
506 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
510 arg0
&= GET_MODE_MASK (mode
);
513 /* Even if the value at zero is undefined, we have to come
514 up with some replacement. Seems good enough. */
515 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
516 val
= GET_MODE_BITSIZE (mode
);
519 val
= exact_log2 (arg0
& -arg0
);
523 arg0
&= GET_MODE_MASK (mode
);
526 val
++, arg0
&= arg0
- 1;
530 arg0
&= GET_MODE_MASK (mode
);
533 val
++, arg0
&= arg0
- 1;
542 /* When zero-extending a CONST_INT, we need to know its
544 if (op_mode
== VOIDmode
)
546 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
548 /* If we were really extending the mode,
549 we would have to distinguish between zero-extension
550 and sign-extension. */
551 if (width
!= GET_MODE_BITSIZE (op_mode
))
555 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
556 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
562 if (op_mode
== VOIDmode
)
564 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
566 /* If we were really extending the mode,
567 we would have to distinguish between zero-extension
568 and sign-extension. */
569 if (width
!= GET_MODE_BITSIZE (op_mode
))
573 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
576 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
578 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
579 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
596 val
= trunc_int_for_mode (val
, mode
);
598 return GEN_INT (val
);
601 /* We can do some operations on integer CONST_DOUBLEs. Also allow
602 for a DImode operation on a CONST_INT. */
603 else if (GET_MODE (trueop
) == VOIDmode
604 && width
<= HOST_BITS_PER_WIDE_INT
* 2
605 && (GET_CODE (trueop
) == CONST_DOUBLE
606 || GET_CODE (trueop
) == CONST_INT
))
608 unsigned HOST_WIDE_INT l1
, lv
;
609 HOST_WIDE_INT h1
, hv
;
611 if (GET_CODE (trueop
) == CONST_DOUBLE
)
612 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
614 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
624 neg_double (l1
, h1
, &lv
, &hv
);
629 neg_double (l1
, h1
, &lv
, &hv
);
641 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
644 lv
= exact_log2 (l1
& -l1
) + 1;
650 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
651 - HOST_BITS_PER_WIDE_INT
;
653 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
654 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
655 lv
= GET_MODE_BITSIZE (mode
);
661 lv
= exact_log2 (l1
& -l1
);
663 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
664 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
665 lv
= GET_MODE_BITSIZE (mode
);
688 /* This is just a change-of-mode, so do nothing. */
693 if (op_mode
== VOIDmode
)
696 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
700 lv
= l1
& GET_MODE_MASK (op_mode
);
704 if (op_mode
== VOIDmode
705 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
709 lv
= l1
& GET_MODE_MASK (op_mode
);
710 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
711 && (lv
& ((HOST_WIDE_INT
) 1
712 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
713 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
715 hv
= HWI_SIGN_EXTEND (lv
);
726 return immed_double_const (lv
, hv
, mode
);
729 else if (GET_CODE (trueop
) == CONST_DOUBLE
730 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
732 REAL_VALUE_TYPE d
, t
;
733 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
738 if (HONOR_SNANS (mode
) && real_isnan (&d
))
740 real_sqrt (&t
, mode
, &d
);
744 d
= REAL_VALUE_ABS (d
);
747 d
= REAL_VALUE_NEGATE (d
);
750 d
= real_value_truncate (mode
, d
);
753 /* All this does is change the mode. */
756 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
763 real_to_target (tmp
, &d
, GET_MODE (trueop
));
764 for (i
= 0; i
< 4; i
++)
766 real_from_target (&d
, tmp
, mode
);
771 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
774 else if (GET_CODE (trueop
) == CONST_DOUBLE
775 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
776 && GET_MODE_CLASS (mode
) == MODE_INT
777 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
779 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
780 operators are intentionally left unspecified (to ease implementation
781 by target backends), for consistency, this routine implements the
782 same semantics for constant folding as used by the middle-end. */
784 HOST_WIDE_INT xh
, xl
, th
, tl
;
785 REAL_VALUE_TYPE x
, t
;
786 REAL_VALUE_FROM_CONST_DOUBLE (x
, trueop
);
790 if (REAL_VALUE_ISNAN (x
))
793 /* Test against the signed upper bound. */
794 if (width
> HOST_BITS_PER_WIDE_INT
)
796 th
= ((unsigned HOST_WIDE_INT
) 1
797 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
803 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
805 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
806 if (REAL_VALUES_LESS (t
, x
))
813 /* Test against the signed lower bound. */
814 if (width
> HOST_BITS_PER_WIDE_INT
)
816 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
822 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
824 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
825 if (REAL_VALUES_LESS (x
, t
))
831 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
835 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
838 /* Test against the unsigned upper bound. */
839 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
844 else if (width
>= HOST_BITS_PER_WIDE_INT
)
846 th
= ((unsigned HOST_WIDE_INT
) 1
847 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
853 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
855 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
856 if (REAL_VALUES_LESS (t
, x
))
863 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
869 return immed_double_const (xl
, xh
, mode
);
872 /* This was formerly used only for non-IEEE float.
873 eggert@twinsun.com says it is safe for IEEE also. */
876 enum rtx_code reversed
;
879 /* There are some simplifications we can do even if the operands
884 /* (not (not X)) == X. */
885 if (GET_CODE (op
) == NOT
)
888 /* (not (eq X Y)) == (ne X Y), etc. */
889 if (COMPARISON_P (op
)
890 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
891 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
893 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
894 XEXP (op
, 0), XEXP (op
, 1));
896 /* (not (plus X -1)) can become (neg X). */
897 if (GET_CODE (op
) == PLUS
898 && XEXP (op
, 1) == constm1_rtx
)
899 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
901 /* Similarly, (not (neg X)) is (plus X -1). */
902 if (GET_CODE (op
) == NEG
)
903 return plus_constant (XEXP (op
, 0), -1);
905 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
906 if (GET_CODE (op
) == XOR
907 && GET_CODE (XEXP (op
, 1)) == CONST_INT
908 && (temp
= simplify_unary_operation (NOT
, mode
,
911 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
913 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
914 if (GET_CODE (op
) == PLUS
915 && GET_CODE (XEXP (op
, 1)) == CONST_INT
916 && mode_signbit_p (mode
, XEXP (op
, 1))
917 && (temp
= simplify_unary_operation (NOT
, mode
,
920 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
924 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
925 operands other than 1, but that is not valid. We could do a
926 similar simplification for (not (lshiftrt C X)) where C is
927 just the sign bit, but this doesn't seem common enough to
929 if (GET_CODE (op
) == ASHIFT
930 && XEXP (op
, 0) == const1_rtx
)
932 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
933 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
936 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
937 by reversing the comparison code if valid. */
938 if (STORE_FLAG_VALUE
== -1
940 && (reversed
= reversed_comparison_code (op
, NULL_RTX
))
942 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
943 XEXP (op
, 0), XEXP (op
, 1));
945 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
946 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
947 so we can perform the above simplification. */
949 if (STORE_FLAG_VALUE
== -1
950 && GET_CODE (op
) == ASHIFTRT
951 && GET_CODE (XEXP (op
, 1)) == CONST_INT
952 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
953 return simplify_gen_relational (GE
, mode
, VOIDmode
,
954 XEXP (op
, 0), const0_rtx
);
959 /* (neg (neg X)) == X. */
960 if (GET_CODE (op
) == NEG
)
963 /* (neg (plus X 1)) can become (not X). */
964 if (GET_CODE (op
) == PLUS
965 && XEXP (op
, 1) == const1_rtx
)
966 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
968 /* Similarly, (neg (not X)) is (plus X 1). */
969 if (GET_CODE (op
) == NOT
)
970 return plus_constant (XEXP (op
, 0), 1);
972 /* (neg (minus X Y)) can become (minus Y X). This transformation
973 isn't safe for modes with signed zeros, since if X and Y are
974 both +0, (minus Y X) is the same as (minus X Y). If the
975 rounding mode is towards +infinity (or -infinity) then the two
976 expressions will be rounded differently. */
977 if (GET_CODE (op
) == MINUS
978 && !HONOR_SIGNED_ZEROS (mode
)
979 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
980 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1),
983 if (GET_CODE (op
) == PLUS
984 && !HONOR_SIGNED_ZEROS (mode
)
985 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
987 /* (neg (plus A C)) is simplified to (minus -C A). */
988 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
989 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
991 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1),
994 return simplify_gen_binary (MINUS
, mode
, temp
,
998 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
999 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1000 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1003 /* (neg (mult A B)) becomes (mult (neg A) B).
1004 This works even for floating-point values. */
1005 if (GET_CODE (op
) == MULT
1006 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1008 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1009 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
1012 /* NEG commutes with ASHIFT since it is multiplication. Only do
1013 this if we can then eliminate the NEG (e.g., if the operand
1015 if (GET_CODE (op
) == ASHIFT
)
1017 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0),
1020 return simplify_gen_binary (ASHIFT
, mode
, temp
,
1024 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1025 C is equal to the width of MODE minus 1. */
1026 if (GET_CODE (op
) == ASHIFTRT
1027 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1028 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
1029 return simplify_gen_binary (LSHIFTRT
, mode
,
1030 XEXP (op
, 0), XEXP (op
, 1));
1032 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1033 C is equal to the width of MODE minus 1. */
1034 if (GET_CODE (op
) == LSHIFTRT
1035 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1036 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
1037 return simplify_gen_binary (ASHIFTRT
, mode
,
1038 XEXP (op
, 0), XEXP (op
, 1));
1043 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044 becomes just the MINUS if its mode is MODE. This allows
1045 folding switch statements on machines using casesi (such as
1047 if (GET_CODE (op
) == TRUNCATE
1048 && GET_MODE (XEXP (op
, 0)) == mode
1049 && GET_CODE (XEXP (op
, 0)) == MINUS
1050 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1051 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1052 return XEXP (op
, 0);
1054 /* Check for a sign extension of a subreg of a promoted
1055 variable, where the promotion is sign-extended, and the
1056 target mode is the same as the variable's promotion. */
1057 if (GET_CODE (op
) == SUBREG
1058 && SUBREG_PROMOTED_VAR_P (op
)
1059 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1060 && GET_MODE (XEXP (op
, 0)) == mode
)
1061 return XEXP (op
, 0);
1063 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1064 if (! POINTERS_EXTEND_UNSIGNED
1065 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1067 || (GET_CODE (op
) == SUBREG
1068 && REG_P (SUBREG_REG (op
))
1069 && REG_POINTER (SUBREG_REG (op
))
1070 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1071 return convert_memory_address (Pmode
, op
);
1076 /* Check for a zero extension of a subreg of a promoted
1077 variable, where the promotion is zero-extended, and the
1078 target mode is the same as the variable's promotion. */
1079 if (GET_CODE (op
) == SUBREG
1080 && SUBREG_PROMOTED_VAR_P (op
)
1081 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1082 && GET_MODE (XEXP (op
, 0)) == mode
)
1083 return XEXP (op
, 0);
1085 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1086 if (POINTERS_EXTEND_UNSIGNED
> 0
1087 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1089 || (GET_CODE (op
) == SUBREG
1090 && REG_P (SUBREG_REG (op
))
1091 && REG_POINTER (SUBREG_REG (op
))
1092 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1093 return convert_memory_address (Pmode
, op
);
1105 /* Subroutine of simplify_binary_operation to simplify a commutative,
1106 associative binary operation CODE with result mode MODE, operating
1107 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1108 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1109 canonicalization is possible. */
1112 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1117 /* Linearize the operator to the left. */
1118 if (GET_CODE (op1
) == code
)
1120 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1121 if (GET_CODE (op0
) == code
)
1123 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1124 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1127 /* "a op (b op c)" becomes "(b op c) op a". */
1128 if (! swap_commutative_operands_p (op1
, op0
))
1129 return simplify_gen_binary (code
, mode
, op1
, op0
);
1136 if (GET_CODE (op0
) == code
)
1138 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1139 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1141 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1142 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1145 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1146 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1147 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1148 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1150 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1152 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1153 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1154 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1155 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1157 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1163 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1164 and OP1. Return 0 if no simplification is possible.
1166 Don't use this for relational operations such as EQ or LT.
1167 Use simplify_relational_operation instead. */
1169 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1172 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
1174 unsigned int width
= GET_MODE_BITSIZE (mode
);
1175 rtx trueop0
, trueop1
;
1178 #ifdef ENABLE_CHECKING
1179 /* Relational operations don't work here. We must know the mode
1180 of the operands in order to do the comparison correctly.
1181 Assuming a full word can give incorrect results.
1182 Consider comparing 128 with -128 in QImode. */
1184 if (GET_RTX_CLASS (code
) == RTX_COMPARE
1185 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
1189 /* Make sure the constant is second. */
1190 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1191 && swap_commutative_operands_p (op0
, op1
))
1193 tem
= op0
, op0
= op1
, op1
= tem
;
1196 trueop0
= avoid_constant_pool_reference (op0
);
1197 trueop1
= avoid_constant_pool_reference (op1
);
1199 if (VECTOR_MODE_P (mode
)
1200 && GET_CODE (trueop0
) == CONST_VECTOR
1201 && GET_CODE (trueop1
) == CONST_VECTOR
)
1203 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1204 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1205 enum machine_mode op0mode
= GET_MODE (trueop0
);
1206 int op0_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op0mode
));
1207 unsigned op0_n_elts
= (GET_MODE_SIZE (op0mode
) / op0_elt_size
);
1208 enum machine_mode op1mode
= GET_MODE (trueop1
);
1209 int op1_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op1mode
));
1210 unsigned op1_n_elts
= (GET_MODE_SIZE (op1mode
) / op1_elt_size
);
1211 rtvec v
= rtvec_alloc (n_elts
);
1214 if (op0_n_elts
!= n_elts
|| op1_n_elts
!= n_elts
)
1217 for (i
= 0; i
< n_elts
; i
++)
1219 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
1220 CONST_VECTOR_ELT (trueop0
, i
),
1221 CONST_VECTOR_ELT (trueop1
, i
));
1224 RTVEC_ELT (v
, i
) = x
;
1227 return gen_rtx_CONST_VECTOR (mode
, v
);
1230 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1231 && GET_CODE (trueop0
) == CONST_DOUBLE
1232 && GET_CODE (trueop1
) == CONST_DOUBLE
1233 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
1244 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
1246 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
1248 for (i
= 0; i
< 4; i
++)
1252 else if (code
== IOR
)
1254 else if (code
== XOR
)
1259 real_from_target (&r
, tmp0
, mode
);
1260 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
1264 REAL_VALUE_TYPE f0
, f1
, value
;
1266 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
1267 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
1268 f0
= real_value_truncate (mode
, f0
);
1269 f1
= real_value_truncate (mode
, f1
);
1271 if (HONOR_SNANS (mode
)
1272 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
1276 && REAL_VALUES_EQUAL (f1
, dconst0
)
1277 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
1280 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
1281 && flag_trapping_math
1282 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
1284 int s0
= REAL_VALUE_NEGATIVE (f0
);
1285 int s1
= REAL_VALUE_NEGATIVE (f1
);
1290 /* Inf + -Inf = NaN plus exception. */
1295 /* Inf - Inf = NaN plus exception. */
1300 /* Inf / Inf = NaN plus exception. */
1307 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
1308 && flag_trapping_math
1309 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
1310 || (REAL_VALUE_ISINF (f1
)
1311 && REAL_VALUES_EQUAL (f0
, dconst0
))))
1312 /* Inf * 0 = NaN plus exception. */
1315 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
1317 value
= real_value_truncate (mode
, value
);
1318 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
1322 /* We can fold some multi-word operations. */
1323 if (GET_MODE_CLASS (mode
) == MODE_INT
1324 && width
== HOST_BITS_PER_WIDE_INT
* 2
1325 && (GET_CODE (trueop0
) == CONST_DOUBLE
1326 || GET_CODE (trueop0
) == CONST_INT
)
1327 && (GET_CODE (trueop1
) == CONST_DOUBLE
1328 || GET_CODE (trueop1
) == CONST_INT
))
1330 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
1331 HOST_WIDE_INT h1
, h2
, hv
, ht
;
1333 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1334 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
1336 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
1338 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1339 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
1341 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
1346 /* A - B == A + (-B). */
1347 neg_double (l2
, h2
, &lv
, &hv
);
1350 /* Fall through.... */
1353 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1357 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1361 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
1362 &lv
, &hv
, <
, &ht
))
1367 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
1368 <
, &ht
, &lv
, &hv
))
1373 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
1374 &lv
, &hv
, <
, &ht
))
1379 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
1380 <
, &ht
, &lv
, &hv
))
1385 lv
= l1
& l2
, hv
= h1
& h2
;
1389 lv
= l1
| l2
, hv
= h1
| h2
;
1393 lv
= l1
^ l2
, hv
= h1
^ h2
;
1399 && ((unsigned HOST_WIDE_INT
) l1
1400 < (unsigned HOST_WIDE_INT
) l2
)))
1409 && ((unsigned HOST_WIDE_INT
) l1
1410 > (unsigned HOST_WIDE_INT
) l2
)))
1417 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1419 && ((unsigned HOST_WIDE_INT
) l1
1420 < (unsigned HOST_WIDE_INT
) l2
)))
1427 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1429 && ((unsigned HOST_WIDE_INT
) l1
1430 > (unsigned HOST_WIDE_INT
) l2
)))
1436 case LSHIFTRT
: case ASHIFTRT
:
1438 case ROTATE
: case ROTATERT
:
1439 if (SHIFT_COUNT_TRUNCATED
)
1440 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1442 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1445 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1446 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1448 else if (code
== ASHIFT
)
1449 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1450 else if (code
== ROTATE
)
1451 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1452 else /* code == ROTATERT */
1453 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1460 return immed_double_const (lv
, hv
, mode
);
1463 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1464 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1466 /* Even if we can't compute a constant result,
1467 there are some cases worth simplifying. */
1472 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1473 when x is NaN, infinite, or finite and nonzero. They aren't
1474 when x is -0 and the rounding mode is not towards -infinity,
1475 since (-0) + 0 is then 0. */
1476 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1479 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1480 transformations are safe even for IEEE. */
1481 if (GET_CODE (op0
) == NEG
)
1482 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1483 else if (GET_CODE (op1
) == NEG
)
1484 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1486 /* (~a) + 1 -> -a */
1487 if (INTEGRAL_MODE_P (mode
)
1488 && GET_CODE (op0
) == NOT
1489 && trueop1
== const1_rtx
)
1490 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1492 /* Handle both-operands-constant cases. We can only add
1493 CONST_INTs to constants since the sum of relocatable symbols
1494 can't be handled by most assemblers. Don't add CONST_INT
1495 to CONST_INT since overflow won't be computed properly if wider
1496 than HOST_BITS_PER_WIDE_INT. */
1498 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1499 && GET_CODE (op1
) == CONST_INT
)
1500 return plus_constant (op0
, INTVAL (op1
));
1501 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1502 && GET_CODE (op0
) == CONST_INT
)
1503 return plus_constant (op1
, INTVAL (op0
));
1505 /* See if this is something like X * C - X or vice versa or
1506 if the multiplication is written as a shift. If so, we can
1507 distribute and make a new multiply, shift, or maybe just
1508 have X (if C is 2 in the example above). But don't make
1509 something more expensive than we had before. */
1511 if (! FLOAT_MODE_P (mode
))
1513 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1514 rtx lhs
= op0
, rhs
= op1
;
1516 if (GET_CODE (lhs
) == NEG
)
1517 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1518 else if (GET_CODE (lhs
) == MULT
1519 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1521 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1523 else if (GET_CODE (lhs
) == ASHIFT
1524 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1525 && INTVAL (XEXP (lhs
, 1)) >= 0
1526 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1528 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1529 lhs
= XEXP (lhs
, 0);
1532 if (GET_CODE (rhs
) == NEG
)
1533 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1534 else if (GET_CODE (rhs
) == MULT
1535 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1537 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1539 else if (GET_CODE (rhs
) == ASHIFT
1540 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1541 && INTVAL (XEXP (rhs
, 1)) >= 0
1542 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1544 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1545 rhs
= XEXP (rhs
, 0);
1548 if (rtx_equal_p (lhs
, rhs
))
1550 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1551 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1552 GEN_INT (coeff0
+ coeff1
));
1553 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1558 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1559 if ((GET_CODE (op1
) == CONST_INT
1560 || GET_CODE (op1
) == CONST_DOUBLE
)
1561 && GET_CODE (op0
) == XOR
1562 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1563 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1564 && mode_signbit_p (mode
, op1
))
1565 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1566 simplify_gen_binary (XOR
, mode
, op1
,
1569 /* If one of the operands is a PLUS or a MINUS, see if we can
1570 simplify this by the associative law.
1571 Don't use the associative law for floating point.
1572 The inaccuracy makes it nonassociative,
1573 and subtle programs can break if operations are associated. */
1575 if (INTEGRAL_MODE_P (mode
)
1576 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1577 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1578 || (GET_CODE (op0
) == CONST
1579 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1580 || (GET_CODE (op1
) == CONST
1581 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1582 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1585 /* Reassociate floating point addition only when the user
1586 specifies unsafe math optimizations. */
1587 if (FLOAT_MODE_P (mode
)
1588 && flag_unsafe_math_optimizations
)
1590 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1598 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1599 using cc0, in which case we want to leave it as a COMPARE
1600 so we can distinguish it from a register-register-copy.
1602 In IEEE floating point, x-0 is not the same as x. */
1604 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1605 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1606 && trueop1
== CONST0_RTX (mode
))
1610 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1611 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1612 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1613 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1615 rtx xop00
= XEXP (op0
, 0);
1616 rtx xop10
= XEXP (op1
, 0);
1619 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1621 if (REG_P (xop00
) && REG_P (xop10
)
1622 && GET_MODE (xop00
) == GET_MODE (xop10
)
1623 && REGNO (xop00
) == REGNO (xop10
)
1624 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1625 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1632 /* We can't assume x-x is 0 even with non-IEEE floating point,
1633 but since it is zero except in very strange circumstances, we
1634 will treat it as zero with -funsafe-math-optimizations. */
1635 if (rtx_equal_p (trueop0
, trueop1
)
1636 && ! side_effects_p (op0
)
1637 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1638 return CONST0_RTX (mode
);
1640 /* Change subtraction from zero into negation. (0 - x) is the
1641 same as -x when x is NaN, infinite, or finite and nonzero.
1642 But if the mode has signed zeros, and does not round towards
1643 -infinity, then 0 - 0 is 0, not -0. */
1644 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1645 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1647 /* (-1 - a) is ~a. */
1648 if (trueop0
== constm1_rtx
)
1649 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1651 /* Subtracting 0 has no effect unless the mode has signed zeros
1652 and supports rounding towards -infinity. In such a case,
1654 if (!(HONOR_SIGNED_ZEROS (mode
)
1655 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1656 && trueop1
== CONST0_RTX (mode
))
1659 /* See if this is something like X * C - X or vice versa or
1660 if the multiplication is written as a shift. If so, we can
1661 distribute and make a new multiply, shift, or maybe just
1662 have X (if C is 2 in the example above). But don't make
1663 something more expensive than we had before. */
1665 if (! FLOAT_MODE_P (mode
))
1667 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1668 rtx lhs
= op0
, rhs
= op1
;
1670 if (GET_CODE (lhs
) == NEG
)
1671 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1672 else if (GET_CODE (lhs
) == MULT
1673 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1675 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1677 else if (GET_CODE (lhs
) == ASHIFT
1678 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1679 && INTVAL (XEXP (lhs
, 1)) >= 0
1680 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1682 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1683 lhs
= XEXP (lhs
, 0);
1686 if (GET_CODE (rhs
) == NEG
)
1687 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1688 else if (GET_CODE (rhs
) == MULT
1689 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1691 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1693 else if (GET_CODE (rhs
) == ASHIFT
1694 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1695 && INTVAL (XEXP (rhs
, 1)) >= 0
1696 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1698 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1699 rhs
= XEXP (rhs
, 0);
1702 if (rtx_equal_p (lhs
, rhs
))
1704 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1705 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1706 GEN_INT (coeff0
- coeff1
));
1707 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1712 /* (a - (-b)) -> (a + b). True even for IEEE. */
1713 if (GET_CODE (op1
) == NEG
)
1714 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1716 /* (-x - c) may be simplified as (-c - x). */
1717 if (GET_CODE (op0
) == NEG
1718 && (GET_CODE (op1
) == CONST_INT
1719 || GET_CODE (op1
) == CONST_DOUBLE
))
1721 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1723 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1726 /* If one of the operands is a PLUS or a MINUS, see if we can
1727 simplify this by the associative law.
1728 Don't use the associative law for floating point.
1729 The inaccuracy makes it nonassociative,
1730 and subtle programs can break if operations are associated. */
1732 if (INTEGRAL_MODE_P (mode
)
1733 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1734 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1735 || (GET_CODE (op0
) == CONST
1736 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1737 || (GET_CODE (op1
) == CONST
1738 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1739 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1742 /* Don't let a relocatable value get a negative coeff. */
1743 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1744 return simplify_gen_binary (PLUS
, mode
,
1746 neg_const_int (mode
, op1
));
1748 /* (x - (x & y)) -> (x & ~y) */
1749 if (GET_CODE (op1
) == AND
)
1751 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1753 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1754 GET_MODE (XEXP (op1
, 1)));
1755 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1757 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1759 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1760 GET_MODE (XEXP (op1
, 0)));
1761 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1767 if (trueop1
== constm1_rtx
)
1768 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1770 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1771 x is NaN, since x * 0 is then also NaN. Nor is it valid
1772 when the mode has signed zeros, since multiplying a negative
1773 number by 0 will give -0, not 0. */
1774 if (!HONOR_NANS (mode
)
1775 && !HONOR_SIGNED_ZEROS (mode
)
1776 && trueop1
== CONST0_RTX (mode
)
1777 && ! side_effects_p (op0
))
1780 /* In IEEE floating point, x*1 is not equivalent to x for
1782 if (!HONOR_SNANS (mode
)
1783 && trueop1
== CONST1_RTX (mode
))
1786 /* Convert multiply by constant power of two into shift unless
1787 we are still generating RTL. This test is a kludge. */
1788 if (GET_CODE (trueop1
) == CONST_INT
1789 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1790 /* If the mode is larger than the host word size, and the
1791 uppermost bit is set, then this isn't a power of two due
1792 to implicit sign extension. */
1793 && (width
<= HOST_BITS_PER_WIDE_INT
1794 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1795 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1797 /* x*2 is x+x and x*(-1) is -x */
1798 if (GET_CODE (trueop1
) == CONST_DOUBLE
1799 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1800 && GET_MODE (op0
) == mode
)
1803 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1805 if (REAL_VALUES_EQUAL (d
, dconst2
))
1806 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1808 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1809 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1812 /* Reassociate multiplication, but for floating point MULTs
1813 only when the user specifies unsafe math optimizations. */
1814 if (! FLOAT_MODE_P (mode
)
1815 || flag_unsafe_math_optimizations
)
1817 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1824 if (trueop1
== const0_rtx
)
1826 if (GET_CODE (trueop1
) == CONST_INT
1827 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1828 == GET_MODE_MASK (mode
)))
1830 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1832 /* A | (~A) -> -1 */
1833 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1834 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1835 && ! side_effects_p (op0
)
1836 && GET_MODE_CLASS (mode
) != MODE_CC
)
1838 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1844 if (trueop1
== const0_rtx
)
1846 if (GET_CODE (trueop1
) == CONST_INT
1847 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1848 == GET_MODE_MASK (mode
)))
1849 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1850 if (trueop0
== trueop1
1851 && ! side_effects_p (op0
)
1852 && GET_MODE_CLASS (mode
) != MODE_CC
)
1855 /* Canonicalize XOR of the most significant bit to PLUS. */
1856 if ((GET_CODE (op1
) == CONST_INT
1857 || GET_CODE (op1
) == CONST_DOUBLE
)
1858 && mode_signbit_p (mode
, op1
))
1859 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
1860 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1861 if ((GET_CODE (op1
) == CONST_INT
1862 || GET_CODE (op1
) == CONST_DOUBLE
)
1863 && GET_CODE (op0
) == PLUS
1864 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1865 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1866 && mode_signbit_p (mode
, XEXP (op0
, 1)))
1867 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1868 simplify_gen_binary (XOR
, mode
, op1
,
1871 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1877 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1879 /* If we are turning off bits already known off in OP0, we need
1881 if (GET_CODE (trueop1
) == CONST_INT
1882 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1883 && (nonzero_bits (trueop0
, mode
) & ~INTVAL (trueop1
)) == 0)
1885 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1886 && GET_MODE_CLASS (mode
) != MODE_CC
)
1889 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1890 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1891 && ! side_effects_p (op0
)
1892 && GET_MODE_CLASS (mode
) != MODE_CC
)
1894 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1895 ((A & N) + B) & M -> (A + B) & M
1896 Similarly if (N & M) == 0,
1897 ((A | N) + B) & M -> (A + B) & M
1898 and for - instead of + and/or ^ instead of |. */
1899 if (GET_CODE (trueop1
) == CONST_INT
1900 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1901 && ~INTVAL (trueop1
)
1902 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
1903 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
1908 pmop
[0] = XEXP (op0
, 0);
1909 pmop
[1] = XEXP (op0
, 1);
1911 for (which
= 0; which
< 2; which
++)
1914 switch (GET_CODE (tem
))
1917 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
1918 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
1919 == INTVAL (trueop1
))
1920 pmop
[which
] = XEXP (tem
, 0);
1924 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
1925 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
1926 pmop
[which
] = XEXP (tem
, 0);
1933 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
1935 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
1937 return simplify_gen_binary (code
, mode
, tem
, op1
);
1940 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1946 /* 0/x is 0 (or x&0 if x has side-effects). */
1947 if (trueop0
== const0_rtx
)
1948 return side_effects_p (op1
)
1949 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
1952 if (trueop1
== const1_rtx
)
1954 /* Handle narrowing UDIV. */
1955 rtx x
= gen_lowpart_common (mode
, op0
);
1958 if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1959 return gen_lowpart_SUBREG (mode
, op0
);
1962 /* Convert divide by power of two into shift. */
1963 if (GET_CODE (trueop1
) == CONST_INT
1964 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1965 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (arg1
));
1969 /* Handle floating point and integers separately. */
1970 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1972 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1973 safe for modes with NaNs, since 0.0 / 0.0 will then be
1974 NaN rather than 0.0. Nor is it safe for modes with signed
1975 zeros, since dividing 0 by a negative number gives -0.0 */
1976 if (trueop0
== CONST0_RTX (mode
)
1977 && !HONOR_NANS (mode
)
1978 && !HONOR_SIGNED_ZEROS (mode
)
1979 && ! side_effects_p (op1
))
1982 if (trueop1
== CONST1_RTX (mode
)
1983 && !HONOR_SNANS (mode
))
1986 if (GET_CODE (trueop1
) == CONST_DOUBLE
1987 && trueop1
!= CONST0_RTX (mode
))
1990 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1993 if (REAL_VALUES_EQUAL (d
, dconstm1
)
1994 && !HONOR_SNANS (mode
))
1995 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1997 /* Change FP division by a constant into multiplication.
1998 Only do this with -funsafe-math-optimizations. */
1999 if (flag_unsafe_math_optimizations
2000 && !REAL_VALUES_EQUAL (d
, dconst0
))
2002 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2003 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2004 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2010 /* 0/x is 0 (or x&0 if x has side-effects). */
2011 if (trueop0
== const0_rtx
)
2012 return side_effects_p (op1
)
2013 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
2016 if (trueop1
== const1_rtx
)
2018 /* Handle narrowing DIV. */
2019 rtx x
= gen_lowpart_common (mode
, op0
);
2022 if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
2023 return gen_lowpart_SUBREG (mode
, op0
);
2027 if (trueop1
== constm1_rtx
)
2029 rtx x
= gen_lowpart_common (mode
, op0
);
2031 x
= (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
2032 ? gen_lowpart_SUBREG (mode
, op0
) : op0
;
2033 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2039 /* 0%x is 0 (or x&0 if x has side-effects). */
2040 if (trueop0
== const0_rtx
)
2041 return side_effects_p (op1
)
2042 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
2044 /* x%1 is 0 (of x&0 if x has side-effects). */
2045 if (trueop1
== const1_rtx
)
2046 return side_effects_p (op0
)
2047 ? simplify_gen_binary (AND
, mode
, op0
, const0_rtx
)
2049 /* Implement modulus by power of two as AND. */
2050 if (GET_CODE (trueop1
) == CONST_INT
2051 && exact_log2 (INTVAL (trueop1
)) > 0)
2052 return simplify_gen_binary (AND
, mode
, op0
,
2053 GEN_INT (INTVAL (op1
) - 1));
2057 /* 0%x is 0 (or x&0 if x has side-effects). */
2058 if (trueop0
== const0_rtx
)
2059 return side_effects_p (op1
)
2060 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
2062 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2063 if (trueop1
== const1_rtx
|| trueop1
== constm1_rtx
)
2064 return side_effects_p (op0
)
2065 ? simplify_gen_binary (AND
, mode
, op0
, const0_rtx
)
2072 /* Rotating ~0 always results in ~0. */
2073 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
2074 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2075 && ! side_effects_p (op1
))
2078 /* Fall through.... */
2082 if (trueop1
== const0_rtx
)
2084 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
2089 if (width
<= HOST_BITS_PER_WIDE_INT
2090 && GET_CODE (trueop1
) == CONST_INT
2091 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2092 && ! side_effects_p (op0
))
2094 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2096 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2102 if (width
<= HOST_BITS_PER_WIDE_INT
2103 && GET_CODE (trueop1
) == CONST_INT
2104 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2105 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2106 && ! side_effects_p (op0
))
2108 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2110 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2116 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
2118 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2120 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2126 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2128 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2130 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2139 /* ??? There are simplifications that can be done. */
2143 if (!VECTOR_MODE_P (mode
))
2145 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
2147 != GET_MODE_INNER (GET_MODE (trueop0
)))
2148 || GET_CODE (trueop1
) != PARALLEL
2149 || XVECLEN (trueop1
, 0) != 1
2150 || GET_CODE (XVECEXP (trueop1
, 0, 0)) != CONST_INT
)
2153 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2154 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP (trueop1
, 0, 0)));
2158 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
2159 || (GET_MODE_INNER (mode
)
2160 != GET_MODE_INNER (GET_MODE (trueop0
)))
2161 || GET_CODE (trueop1
) != PARALLEL
)
2164 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2166 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2167 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2168 rtvec v
= rtvec_alloc (n_elts
);
2171 if (XVECLEN (trueop1
, 0) != (int) n_elts
)
2173 for (i
= 0; i
< n_elts
; i
++)
2175 rtx x
= XVECEXP (trueop1
, 0, i
);
2177 if (GET_CODE (x
) != CONST_INT
)
2179 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, INTVAL (x
));
2182 return gen_rtx_CONST_VECTOR (mode
, v
);
2188 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2189 ? GET_MODE (trueop0
)
2190 : GET_MODE_INNER (mode
));
2191 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2192 ? GET_MODE (trueop1
)
2193 : GET_MODE_INNER (mode
));
2195 if (!VECTOR_MODE_P (mode
)
2196 || (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2197 != GET_MODE_SIZE (mode
)))
2200 if ((VECTOR_MODE_P (op0_mode
)
2201 && (GET_MODE_INNER (mode
)
2202 != GET_MODE_INNER (op0_mode
)))
2203 || (!VECTOR_MODE_P (op0_mode
)
2204 && GET_MODE_INNER (mode
) != op0_mode
))
2207 if ((VECTOR_MODE_P (op1_mode
)
2208 && (GET_MODE_INNER (mode
)
2209 != GET_MODE_INNER (op1_mode
)))
2210 || (!VECTOR_MODE_P (op1_mode
)
2211 && GET_MODE_INNER (mode
) != op1_mode
))
2214 if ((GET_CODE (trueop0
) == CONST_VECTOR
2215 || GET_CODE (trueop0
) == CONST_INT
2216 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2217 && (GET_CODE (trueop1
) == CONST_VECTOR
2218 || GET_CODE (trueop1
) == CONST_INT
2219 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2221 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2222 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2223 rtvec v
= rtvec_alloc (n_elts
);
2225 unsigned in_n_elts
= 1;
2227 if (VECTOR_MODE_P (op0_mode
))
2228 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2229 for (i
= 0; i
< n_elts
; i
++)
2233 if (!VECTOR_MODE_P (op0_mode
))
2234 RTVEC_ELT (v
, i
) = trueop0
;
2236 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2240 if (!VECTOR_MODE_P (op1_mode
))
2241 RTVEC_ELT (v
, i
) = trueop1
;
2243 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2248 return gen_rtx_CONST_VECTOR (mode
, v
);
2260 /* Get the integer argument values in two forms:
2261 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2263 arg0
= INTVAL (trueop0
);
2264 arg1
= INTVAL (trueop1
);
2266 if (width
< HOST_BITS_PER_WIDE_INT
)
2268 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2269 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2272 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2273 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2276 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2277 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2285 /* Compute the value of the arithmetic. */
2290 val
= arg0s
+ arg1s
;
2294 val
= arg0s
- arg1s
;
2298 val
= arg0s
* arg1s
;
2303 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2306 val
= arg0s
/ arg1s
;
2311 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2314 val
= arg0s
% arg1s
;
2319 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2322 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2327 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2330 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2346 /* If shift count is undefined, don't fold it; let the machine do
2347 what it wants. But truncate it if the machine will do that. */
2351 if (SHIFT_COUNT_TRUNCATED
)
2354 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
2361 if (SHIFT_COUNT_TRUNCATED
)
2364 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
2371 if (SHIFT_COUNT_TRUNCATED
)
2374 val
= arg0s
>> arg1
;
2376 /* Bootstrap compiler may not have sign extended the right shift.
2377 Manually extend the sign to insure bootstrap cc matches gcc. */
2378 if (arg0s
< 0 && arg1
> 0)
2379 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
2388 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2389 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2397 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2398 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2402 /* Do nothing here. */
2406 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2410 val
= ((unsigned HOST_WIDE_INT
) arg0
2411 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2415 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2419 val
= ((unsigned HOST_WIDE_INT
) arg0
2420 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2427 /* ??? There are simplifications that can be done. */
2434 val
= trunc_int_for_mode (val
, mode
);
2436 return GEN_INT (val
);
2439 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2442 Rather than test for specific case, we do this by a brute-force method
2443 and do all possible simplifications until no more changes occur. Then
2444 we rebuild the operation.
2446 If FORCE is true, then always generate the rtx. This is used to
2447 canonicalize stuff emitted from simplify_gen_binary. Note that this
2448 can still fail if the rtx is too complex. It won't fail just because
2449 the result is not 'simpler' than the input, however. */
2451 struct simplify_plus_minus_op_data
2458 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2460 const struct simplify_plus_minus_op_data
*d1
= p1
;
2461 const struct simplify_plus_minus_op_data
*d2
= p2
;
2463 return (commutative_operand_precedence (d2
->op
)
2464 - commutative_operand_precedence (d1
->op
));
2468 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2471 struct simplify_plus_minus_op_data ops
[8];
2473 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2477 memset (ops
, 0, sizeof ops
);
2479 /* Set up the two operands and then expand them until nothing has been
2480 changed. If we run out of room in our array, give up; this should
2481 almost never happen. */
2486 ops
[1].neg
= (code
== MINUS
);
2492 for (i
= 0; i
< n_ops
; i
++)
2494 rtx this_op
= ops
[i
].op
;
2495 int this_neg
= ops
[i
].neg
;
2496 enum rtx_code this_code
= GET_CODE (this_op
);
2505 ops
[n_ops
].op
= XEXP (this_op
, 1);
2506 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2509 ops
[i
].op
= XEXP (this_op
, 0);
2515 ops
[i
].op
= XEXP (this_op
, 0);
2516 ops
[i
].neg
= ! this_neg
;
2522 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2523 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2524 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2526 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2527 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2528 ops
[n_ops
].neg
= this_neg
;
2536 /* ~a -> (-a - 1) */
2539 ops
[n_ops
].op
= constm1_rtx
;
2540 ops
[n_ops
++].neg
= this_neg
;
2541 ops
[i
].op
= XEXP (this_op
, 0);
2542 ops
[i
].neg
= !this_neg
;
2550 ops
[i
].op
= neg_const_int (mode
, this_op
);
2563 /* If we only have two operands, we can't do anything. */
2564 if (n_ops
<= 2 && !force
)
2567 /* Count the number of CONSTs we didn't split above. */
2568 for (i
= 0; i
< n_ops
; i
++)
2569 if (GET_CODE (ops
[i
].op
) == CONST
)
2572 /* Now simplify each pair of operands until nothing changes. The first
2573 time through just simplify constants against each other. */
2580 for (i
= 0; i
< n_ops
- 1; i
++)
2581 for (j
= i
+ 1; j
< n_ops
; j
++)
2583 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2584 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2586 if (lhs
!= 0 && rhs
!= 0
2587 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2589 enum rtx_code ncode
= PLUS
;
2595 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2597 else if (swap_commutative_operands_p (lhs
, rhs
))
2598 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2600 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2602 /* Reject "simplifications" that just wrap the two
2603 arguments in a CONST. Failure to do so can result
2604 in infinite recursion with simplify_binary_operation
2605 when it calls us to simplify CONST operations. */
2607 && ! (GET_CODE (tem
) == CONST
2608 && GET_CODE (XEXP (tem
, 0)) == ncode
2609 && XEXP (XEXP (tem
, 0), 0) == lhs
2610 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2611 /* Don't allow -x + -1 -> ~x simplifications in the
2612 first pass. This allows us the chance to combine
2613 the -1 with other constants. */
2615 && GET_CODE (tem
) == NOT
2616 && XEXP (tem
, 0) == rhs
))
2619 if (GET_CODE (tem
) == NEG
)
2620 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2621 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2622 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2626 ops
[j
].op
= NULL_RTX
;
2636 /* Pack all the operands to the lower-numbered entries. */
2637 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2642 /* Sort the operations based on swap_commutative_operands_p. */
2643 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2645 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2647 && GET_CODE (ops
[1].op
) == CONST_INT
2648 && CONSTANT_P (ops
[0].op
)
2650 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
2652 /* We suppressed creation of trivial CONST expressions in the
2653 combination loop to avoid recursion. Create one manually now.
2654 The combination loop should have ensured that there is exactly
2655 one CONST_INT, and the sort will have ensured that it is last
2656 in the array and that any other constant will be next-to-last. */
2659 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2660 && CONSTANT_P (ops
[n_ops
- 2].op
))
2662 rtx value
= ops
[n_ops
- 1].op
;
2663 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2664 value
= neg_const_int (mode
, value
);
2665 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2669 /* Count the number of CONSTs that we generated. */
2671 for (i
= 0; i
< n_ops
; i
++)
2672 if (GET_CODE (ops
[i
].op
) == CONST
)
2675 /* Give up if we didn't reduce the number of operands we had. Make
2676 sure we count a CONST as two operands. If we have the same
2677 number of operands, but have made more CONSTs than before, this
2678 is also an improvement, so accept it. */
2680 && (n_ops
+ n_consts
> input_ops
2681 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2684 /* Put a non-negated operand first, if possible. */
2686 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2689 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
2698 /* Now make the result by performing the requested operations. */
2700 for (i
= 1; i
< n_ops
; i
++)
2701 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2702 mode
, result
, ops
[i
].op
);
2707 /* Like simplify_binary_operation except used for relational operators.
2708 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2711 CMP_MODE specifies in which mode the comparison is done in, so it is
2712 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2713 the operands or, if both are VOIDmode, the operands are compared in
2714 "infinite precision". */
2716 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2717 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2719 rtx tem
, trueop0
, trueop1
;
2721 if (cmp_mode
== VOIDmode
)
2722 cmp_mode
= GET_MODE (op0
);
2723 if (cmp_mode
== VOIDmode
)
2724 cmp_mode
= GET_MODE (op1
);
2726 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
2729 #ifdef FLOAT_STORE_FLAG_VALUE
2730 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2732 if (tem
== const0_rtx
)
2733 return CONST0_RTX (mode
);
2734 else if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2736 REAL_VALUE_TYPE val
;
2737 val
= FLOAT_STORE_FLAG_VALUE (mode
);
2738 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
2746 /* For the following tests, ensure const0_rtx is op1. */
2747 if (swap_commutative_operands_p (op0
, op1
)
2748 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
2749 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
2751 /* If op0 is a compare, extract the comparison arguments from it. */
2752 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2753 return simplify_relational_operation (code
, mode
, VOIDmode
,
2754 XEXP (op0
, 0), XEXP (op0
, 1));
2756 if (mode
== VOIDmode
2757 || GET_MODE_CLASS (cmp_mode
) == MODE_CC
2761 trueop0
= avoid_constant_pool_reference (op0
);
2762 trueop1
= avoid_constant_pool_reference (op1
);
2763 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
2767 /* This part of simplify_relational_operation is only used when CMP_MODE
2768 is not in class MODE_CC (i.e. it is a real comparison).
2770 MODE is the mode of the result, while CMP_MODE specifies in which
2771 mode the comparison is done in, so it is the mode of the operands. */
2773 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
2774 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2776 if (GET_CODE (op1
) == CONST_INT
)
2778 if (INTVAL (op1
) == 0 && COMPARISON_P (op0
))
2780 /* If op0 is a comparison, extract the comparison arguments form it. */
2783 if (GET_MODE (op0
) == cmp_mode
)
2784 return simplify_rtx (op0
);
2786 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
2787 XEXP (op0
, 0), XEXP (op0
, 1));
2789 else if (code
== EQ
)
2791 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
2792 if (new_code
!= UNKNOWN
)
2793 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
2794 XEXP (op0
, 0), XEXP (op0
, 1));
2802 /* Check if the given comparison (done in the given MODE) is actually a
2803 tautology or a contradiction.
2804 If no simplification is possible, this function returns zero.
2805 Otherwise, it returns either const_true_rtx or const0_rtx. */
2808 simplify_const_relational_operation (enum rtx_code code
,
2809 enum machine_mode mode
,
2812 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2817 if (mode
== VOIDmode
2818 && (GET_MODE (op0
) != VOIDmode
2819 || GET_MODE (op1
) != VOIDmode
))
2822 /* If op0 is a compare, extract the comparison arguments from it. */
2823 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2824 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2826 /* We can't simplify MODE_CC values since we don't know what the
2827 actual comparison is. */
2828 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2831 /* Make sure the constant is second. */
2832 if (swap_commutative_operands_p (op0
, op1
))
2834 tem
= op0
, op0
= op1
, op1
= tem
;
2835 code
= swap_condition (code
);
2838 trueop0
= avoid_constant_pool_reference (op0
);
2839 trueop1
= avoid_constant_pool_reference (op1
);
2841 /* For integer comparisons of A and B maybe we can simplify A - B and can
2842 then simplify a comparison of that with zero. If A and B are both either
2843 a register or a CONST_INT, this can't help; testing for these cases will
2844 prevent infinite recursion here and speed things up.
2846 If CODE is an unsigned comparison, then we can never do this optimization,
2847 because it gives an incorrect result if the subtraction wraps around zero.
2848 ANSI C defines unsigned operations such that they never overflow, and
2849 thus such cases can not be ignored; but we cannot do it even for
2850 signed comparisons for languages such as Java, so test flag_wrapv. */
2852 if (!flag_wrapv
&& INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2853 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
2854 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
2855 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2856 /* We cannot do this for == or != if tem is a nonzero address. */
2857 && ((code
!= EQ
&& code
!= NE
) || ! nonzero_address_p (tem
))
2858 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2859 return simplify_const_relational_operation (signed_condition (code
),
2860 mode
, tem
, const0_rtx
);
2862 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2863 return const_true_rtx
;
2865 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2868 /* For modes without NaNs, if the two operands are equal, we know the
2869 result except if they have side-effects. */
2870 if (! HONOR_NANS (GET_MODE (trueop0
))
2871 && rtx_equal_p (trueop0
, trueop1
)
2872 && ! side_effects_p (trueop0
))
2873 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2875 /* If the operands are floating-point constants, see if we can fold
2877 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2878 && GET_CODE (trueop1
) == CONST_DOUBLE
2879 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2881 REAL_VALUE_TYPE d0
, d1
;
2883 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2884 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2886 /* Comparisons are unordered iff at least one of the values is NaN. */
2887 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2897 return const_true_rtx
;
2910 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2911 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2912 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2915 /* Otherwise, see if the operands are both integers. */
2916 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2917 && (GET_CODE (trueop0
) == CONST_DOUBLE
2918 || GET_CODE (trueop0
) == CONST_INT
)
2919 && (GET_CODE (trueop1
) == CONST_DOUBLE
2920 || GET_CODE (trueop1
) == CONST_INT
))
2922 int width
= GET_MODE_BITSIZE (mode
);
2923 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2924 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2926 /* Get the two words comprising each integer constant. */
2927 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2929 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2930 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2934 l0u
= l0s
= INTVAL (trueop0
);
2935 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2938 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2940 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2941 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2945 l1u
= l1s
= INTVAL (trueop1
);
2946 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2949 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2950 we have to sign or zero-extend the values. */
2951 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2953 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2954 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2956 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2957 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2959 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2960 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2962 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2963 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2965 equal
= (h0u
== h1u
&& l0u
== l1u
);
2966 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2967 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2968 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2969 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2972 /* Otherwise, there are some code-specific tests we can make. */
2975 /* Optimize comparisons with upper and lower bounds. */
2976 if (SCALAR_INT_MODE_P (mode
)
2977 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
2990 get_mode_bounds (mode
, sign
, mode
, &mmin
, &mmax
);
2997 /* x >= min is always true. */
2998 if (rtx_equal_p (trueop1
, mmin
))
2999 tem
= const_true_rtx
;
3005 /* x <= max is always true. */
3006 if (rtx_equal_p (trueop1
, mmax
))
3007 tem
= const_true_rtx
;
3012 /* x > max is always false. */
3013 if (rtx_equal_p (trueop1
, mmax
))
3019 /* x < min is always false. */
3020 if (rtx_equal_p (trueop1
, mmin
))
3027 if (tem
== const0_rtx
3028 || tem
== const_true_rtx
)
3035 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3040 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3041 return const_true_rtx
;
3045 /* Optimize abs(x) < 0.0. */
3046 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
3048 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3050 if (GET_CODE (tem
) == ABS
)
3056 /* Optimize abs(x) >= 0.0. */
3057 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
3059 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3061 if (GET_CODE (tem
) == ABS
)
3062 return const_true_rtx
;
3067 /* Optimize ! (abs(x) < 0.0). */
3068 if (trueop1
== CONST0_RTX (mode
))
3070 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3072 if (GET_CODE (tem
) == ABS
)
3073 return const_true_rtx
;
3084 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3090 return equal
? const_true_rtx
: const0_rtx
;
3093 return ! equal
? const_true_rtx
: const0_rtx
;
3096 return op0lt
? const_true_rtx
: const0_rtx
;
3099 return op1lt
? const_true_rtx
: const0_rtx
;
3101 return op0ltu
? const_true_rtx
: const0_rtx
;
3103 return op1ltu
? const_true_rtx
: const0_rtx
;
3106 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
3109 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
3111 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
3113 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
3115 return const_true_rtx
;
3123 /* Simplify CODE, an operation with result mode MODE and three operands,
3124 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3125 a constant. Return 0 if no simplifications is possible. */
3128 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
3129 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
3132 unsigned int width
= GET_MODE_BITSIZE (mode
);
3134 /* VOIDmode means "infinite" precision. */
3136 width
= HOST_BITS_PER_WIDE_INT
;
3142 if (GET_CODE (op0
) == CONST_INT
3143 && GET_CODE (op1
) == CONST_INT
3144 && GET_CODE (op2
) == CONST_INT
3145 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
3146 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
3148 /* Extracting a bit-field from a constant */
3149 HOST_WIDE_INT val
= INTVAL (op0
);
3151 if (BITS_BIG_ENDIAN
)
3152 val
>>= (GET_MODE_BITSIZE (op0_mode
)
3153 - INTVAL (op2
) - INTVAL (op1
));
3155 val
>>= INTVAL (op2
);
3157 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
3159 /* First zero-extend. */
3160 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
3161 /* If desired, propagate sign bit. */
3162 if (code
== SIGN_EXTRACT
3163 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
3164 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
3167 /* Clear the bits that don't belong in our mode,
3168 unless they and our sign bit are all one.
3169 So we get either a reasonable negative value or a reasonable
3170 unsigned value for this mode. */
3171 if (width
< HOST_BITS_PER_WIDE_INT
3172 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
3173 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
3174 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3176 return GEN_INT (val
);
3181 if (GET_CODE (op0
) == CONST_INT
)
3182 return op0
!= const0_rtx
? op1
: op2
;
3184 /* Convert c ? a : a into "a". */
3185 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
3188 /* Convert a != b ? a : b into "a". */
3189 if (GET_CODE (op0
) == NE
3190 && ! side_effects_p (op0
)
3191 && ! HONOR_NANS (mode
)
3192 && ! HONOR_SIGNED_ZEROS (mode
)
3193 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3194 && rtx_equal_p (XEXP (op0
, 1), op2
))
3195 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3196 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3199 /* Convert a == b ? a : b into "b". */
3200 if (GET_CODE (op0
) == EQ
3201 && ! side_effects_p (op0
)
3202 && ! HONOR_NANS (mode
)
3203 && ! HONOR_SIGNED_ZEROS (mode
)
3204 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3205 && rtx_equal_p (XEXP (op0
, 1), op2
))
3206 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3207 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3210 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
3212 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
3213 ? GET_MODE (XEXP (op0
, 1))
3214 : GET_MODE (XEXP (op0
, 0)));
3217 /* Look for happy constants in op1 and op2. */
3218 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
3220 HOST_WIDE_INT t
= INTVAL (op1
);
3221 HOST_WIDE_INT f
= INTVAL (op2
);
3223 if (t
== STORE_FLAG_VALUE
&& f
== 0)
3224 code
= GET_CODE (op0
);
3225 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
3228 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
3236 return simplify_gen_relational (code
, mode
, cmp_mode
,
3237 XEXP (op0
, 0), XEXP (op0
, 1));
3240 if (cmp_mode
== VOIDmode
)
3241 cmp_mode
= op0_mode
;
3242 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
3243 cmp_mode
, XEXP (op0
, 0),
3246 /* See if any simplifications were possible. */
3249 if (GET_CODE (temp
) == CONST_INT
)
3250 return temp
== const0_rtx
? op2
: op1
;
3252 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
3258 if (GET_MODE (op0
) != mode
3259 || GET_MODE (op1
) != mode
3260 || !VECTOR_MODE_P (mode
))
3262 op2
= avoid_constant_pool_reference (op2
);
3263 if (GET_CODE (op2
) == CONST_INT
)
3265 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3266 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3267 int mask
= (1 << n_elts
) - 1;
3269 if (!(INTVAL (op2
) & mask
))
3271 if ((INTVAL (op2
) & mask
) == mask
)
3274 op0
= avoid_constant_pool_reference (op0
);
3275 op1
= avoid_constant_pool_reference (op1
);
3276 if (GET_CODE (op0
) == CONST_VECTOR
3277 && GET_CODE (op1
) == CONST_VECTOR
)
3279 rtvec v
= rtvec_alloc (n_elts
);
3282 for (i
= 0; i
< n_elts
; i
++)
3283 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
3284 ? CONST_VECTOR_ELT (op0
, i
)
3285 : CONST_VECTOR_ELT (op1
, i
));
3286 return gen_rtx_CONST_VECTOR (mode
, v
);
3298 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3299 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3301 Works by unpacking OP into a collection of 8-bit values
3302 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3303 and then repacking them again for OUTERMODE. */
3306 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
3307 enum machine_mode innermode
, unsigned int byte
)
3309 /* We support up to 512-bit values (for V8DFmode). */
3313 value_mask
= (1 << value_bit
) - 1
3315 unsigned char value
[max_bitsize
/ value_bit
];
3324 rtvec result_v
= NULL
;
3325 enum mode_class outer_class
;
3326 enum machine_mode outer_submode
;
3328 /* Some ports misuse CCmode. */
3329 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
3332 /* Unpack the value. */
3334 if (GET_CODE (op
) == CONST_VECTOR
)
3336 num_elem
= CONST_VECTOR_NUNITS (op
);
3337 elems
= &CONST_VECTOR_ELT (op
, 0);
3338 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
3344 elem_bitsize
= max_bitsize
;
3347 if (BITS_PER_UNIT
% value_bit
!= 0)
3348 abort (); /* Too complicated; reducing value_bit may help. */
3349 if (elem_bitsize
% BITS_PER_UNIT
!= 0)
3350 abort (); /* I don't know how to handle endianness of sub-units. */
3352 for (elem
= 0; elem
< num_elem
; elem
++)
3355 rtx el
= elems
[elem
];
3357 /* Vectors are kept in target memory order. (This is probably
3360 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3361 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3363 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3364 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3365 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3366 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3367 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3370 switch (GET_CODE (el
))
3374 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3376 *vp
++ = INTVAL (el
) >> i
;
3377 /* CONST_INTs are always logically sign-extended. */
3378 for (; i
< elem_bitsize
; i
+= value_bit
)
3379 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
3383 if (GET_MODE (el
) == VOIDmode
)
3385 /* If this triggers, someone should have generated a
3386 CONST_INT instead. */
3387 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3390 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
3391 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
3392 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
3395 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
3398 /* It shouldn't matter what's done here, so fill it with
3400 for (; i
< max_bitsize
; i
+= value_bit
)
3403 else if (GET_MODE_CLASS (GET_MODE (el
)) == MODE_FLOAT
)
3405 long tmp
[max_bitsize
/ 32];
3406 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
3408 if (bitsize
> elem_bitsize
)
3410 if (bitsize
% value_bit
!= 0)
3413 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
3416 /* real_to_target produces its result in words affected by
3417 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3418 and use WORDS_BIG_ENDIAN instead; see the documentation
3419 of SUBREG in rtl.texi. */
3420 for (i
= 0; i
< bitsize
; i
+= value_bit
)
3423 if (WORDS_BIG_ENDIAN
)
3424 ibase
= bitsize
- 1 - i
;
3427 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
3430 /* It shouldn't matter what's done here, so fill it with
3432 for (; i
< elem_bitsize
; i
+= value_bit
)
3444 /* Now, pick the right byte to start with. */
3445 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3446 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3447 will already have offset 0. */
3448 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
3450 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
3452 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3453 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3454 byte
= (subword_byte
% UNITS_PER_WORD
3455 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3458 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3459 so if it's become negative it will instead be very large.) */
3460 if (byte
>= GET_MODE_SIZE (innermode
))
3463 /* Convert from bytes to chunks of size value_bit. */
3464 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
3466 /* Re-pack the value. */
3468 if (VECTOR_MODE_P (outermode
))
3470 num_elem
= GET_MODE_NUNITS (outermode
);
3471 result_v
= rtvec_alloc (num_elem
);
3472 elems
= &RTVEC_ELT (result_v
, 0);
3473 outer_submode
= GET_MODE_INNER (outermode
);
3479 outer_submode
= outermode
;
3482 outer_class
= GET_MODE_CLASS (outer_submode
);
3483 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
3485 if (elem_bitsize
% value_bit
!= 0)
3487 if (elem_bitsize
+ value_start
* value_bit
> max_bitsize
)
3490 for (elem
= 0; elem
< num_elem
; elem
++)
3494 /* Vectors are stored in target memory order. (This is probably
3497 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3498 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3500 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3501 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3502 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3503 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3504 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3507 switch (outer_class
)
3510 case MODE_PARTIAL_INT
:
3512 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
3515 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3517 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
3518 for (; i
< elem_bitsize
; i
+= value_bit
)
3519 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
3520 << (i
- HOST_BITS_PER_WIDE_INT
));
3522 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3524 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3525 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
3527 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
3534 long tmp
[max_bitsize
/ 32];
3536 /* real_from_target wants its input in words affected by
3537 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3538 and use WORDS_BIG_ENDIAN instead; see the documentation
3539 of SUBREG in rtl.texi. */
3540 for (i
= 0; i
< max_bitsize
/ 32; i
++)
3542 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
3545 if (WORDS_BIG_ENDIAN
)
3546 ibase
= elem_bitsize
- 1 - i
;
3549 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
3552 real_from_target (&r
, tmp
, outer_submode
);
3553 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
3561 if (VECTOR_MODE_P (outermode
))
3562 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
3567 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3568 Return 0 if no simplifications are possible. */
3570 simplify_subreg (enum machine_mode outermode
, rtx op
,
3571 enum machine_mode innermode
, unsigned int byte
)
3573 /* Little bit of sanity checking. */
3574 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3575 || innermode
== BLKmode
|| outermode
== BLKmode
)
3578 if (GET_MODE (op
) != innermode
3579 && GET_MODE (op
) != VOIDmode
)
3582 if (byte
% GET_MODE_SIZE (outermode
)
3583 || byte
>= GET_MODE_SIZE (innermode
))
3586 if (outermode
== innermode
&& !byte
)
3589 if (GET_CODE (op
) == CONST_INT
3590 || GET_CODE (op
) == CONST_DOUBLE
3591 || GET_CODE (op
) == CONST_VECTOR
)
3592 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
3594 /* Changing mode twice with SUBREG => just change it once,
3595 or not at all if changing back op starting mode. */
3596 if (GET_CODE (op
) == SUBREG
)
3598 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3599 int final_offset
= byte
+ SUBREG_BYTE (op
);
3602 if (outermode
== innermostmode
3603 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3604 return SUBREG_REG (op
);
3606 /* The SUBREG_BYTE represents offset, as if the value were stored
3607 in memory. Irritating exception is paradoxical subreg, where
3608 we define SUBREG_BYTE to be 0. On big endian machines, this
3609 value should be negative. For a moment, undo this exception. */
3610 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3612 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3613 if (WORDS_BIG_ENDIAN
)
3614 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3615 if (BYTES_BIG_ENDIAN
)
3616 final_offset
+= difference
% UNITS_PER_WORD
;
3618 if (SUBREG_BYTE (op
) == 0
3619 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3621 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3622 if (WORDS_BIG_ENDIAN
)
3623 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3624 if (BYTES_BIG_ENDIAN
)
3625 final_offset
+= difference
% UNITS_PER_WORD
;
3628 /* See whether resulting subreg will be paradoxical. */
3629 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3631 /* In nonparadoxical subregs we can't handle negative offsets. */
3632 if (final_offset
< 0)
3634 /* Bail out in case resulting subreg would be incorrect. */
3635 if (final_offset
% GET_MODE_SIZE (outermode
)
3636 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3642 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3644 /* In paradoxical subreg, see if we are still looking on lower part.
3645 If so, our SUBREG_BYTE will be 0. */
3646 if (WORDS_BIG_ENDIAN
)
3647 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3648 if (BYTES_BIG_ENDIAN
)
3649 offset
+= difference
% UNITS_PER_WORD
;
3650 if (offset
== final_offset
)
3656 /* Recurse for further possible simplifications. */
3657 newx
= simplify_subreg (outermode
, SUBREG_REG (op
),
3658 GET_MODE (SUBREG_REG (op
)),
3662 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3665 /* SUBREG of a hard register => just change the register number
3666 and/or mode. If the hard register is not valid in that mode,
3667 suppress this simplification. If the hard register is the stack,
3668 frame, or argument pointer, leave this as a SUBREG. */
3671 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3672 #ifdef CANNOT_CHANGE_MODE_CLASS
3673 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3674 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3675 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3677 && ((reload_completed
&& !frame_pointer_needed
)
3678 || (REGNO (op
) != FRAME_POINTER_REGNUM
3679 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3680 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3683 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3684 && REGNO (op
) != ARG_POINTER_REGNUM
3686 && REGNO (op
) != STACK_POINTER_REGNUM
3687 && subreg_offset_representable_p (REGNO (op
), innermode
,
3690 rtx tem
= gen_rtx_SUBREG (outermode
, op
, byte
);
3691 int final_regno
= subreg_hard_regno (tem
, 0);
3693 /* ??? We do allow it if the current REG is not valid for
3694 its mode. This is a kludge to work around how float/complex
3695 arguments are passed on 32-bit SPARC and should be fixed. */
3696 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3697 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
3699 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3701 /* Propagate original regno. We don't have any way to specify
3702 the offset inside original regno, so do so only for lowpart.
3703 The information is used only by alias analysis that can not
3704 grog partial register anyway. */
3706 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3707 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3712 /* If we have a SUBREG of a register that we are replacing and we are
3713 replacing it with a MEM, make a new MEM and try replacing the
3714 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3715 or if we would be widening it. */
3718 && ! mode_dependent_address_p (XEXP (op
, 0))
3719 /* Allow splitting of volatile memory references in case we don't
3720 have instruction to move the whole thing. */
3721 && (! MEM_VOLATILE_P (op
)
3722 || ! have_insn_for (SET
, innermode
))
3723 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3724 return adjust_address_nv (op
, outermode
, byte
);
3726 /* Handle complex values represented as CONCAT
3727 of real and imaginary part. */
3728 if (GET_CODE (op
) == CONCAT
)
3730 int is_realpart
= byte
< (unsigned int) GET_MODE_UNIT_SIZE (innermode
);
3731 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
3732 unsigned int final_offset
;
3735 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
3736 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3739 /* We can at least simplify it by referring directly to the
3741 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3744 /* Optimize SUBREG truncations of zero and sign extended values. */
3745 if ((GET_CODE (op
) == ZERO_EXTEND
3746 || GET_CODE (op
) == SIGN_EXTEND
)
3747 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
3749 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
3751 /* If we're requesting the lowpart of a zero or sign extension,
3752 there are three possibilities. If the outermode is the same
3753 as the origmode, we can omit both the extension and the subreg.
3754 If the outermode is not larger than the origmode, we can apply
3755 the truncation without the extension. Finally, if the outermode
3756 is larger than the origmode, but both are integer modes, we
3757 can just extend to the appropriate mode. */
3760 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
3761 if (outermode
== origmode
)
3762 return XEXP (op
, 0);
3763 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
3764 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
3765 subreg_lowpart_offset (outermode
,
3767 if (SCALAR_INT_MODE_P (outermode
))
3768 return simplify_gen_unary (GET_CODE (op
), outermode
,
3769 XEXP (op
, 0), origmode
);
3772 /* A SUBREG resulting from a zero extension may fold to zero if
3773 it extracts higher bits that the ZERO_EXTEND's source bits. */
3774 if (GET_CODE (op
) == ZERO_EXTEND
3775 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
3776 return CONST0_RTX (outermode
);
3782 /* Make a SUBREG operation or equivalent if it folds. */
3785 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
3786 enum machine_mode innermode
, unsigned int byte
)
3789 /* Little bit of sanity checking. */
3790 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3791 || innermode
== BLKmode
|| outermode
== BLKmode
)
3794 if (GET_MODE (op
) != innermode
3795 && GET_MODE (op
) != VOIDmode
)
3798 if (byte
% GET_MODE_SIZE (outermode
)
3799 || byte
>= GET_MODE_SIZE (innermode
))
3802 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
3806 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
3809 return gen_rtx_SUBREG (outermode
, op
, byte
);
3811 /* Simplify X, an rtx expression.
3813 Return the simplified expression or NULL if no simplifications
3816 This is the preferred entry point into the simplification routines;
3817 however, we still allow passes to call the more specific routines.
3819 Right now GCC has three (yes, three) major bodies of RTL simplification
3820 code that need to be unified.
3822 1. fold_rtx in cse.c. This code uses various CSE specific
3823 information to aid in RTL simplification.
3825 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3826 it uses combine specific information to aid in RTL
3829 3. The routines in this file.
3832 Long term we want to only have one body of simplification code; to
3833 get to that state I recommend the following steps:
3835 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3836 which are not pass dependent state into these routines.
3838 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3839 use this routine whenever possible.
3841 3. Allow for pass dependent state to be provided to these
3842 routines and add simplifications based on the pass dependent
3843 state. Remove code from cse.c & combine.c that becomes
3846 It will take time, but ultimately the compiler will be easier to
3847 maintain and improve. It's totally silly that when we add a
3848 simplification that it needs to be added to 4 places (3 for RTL
3849 simplification and 1 for tree simplification. */
3852 simplify_rtx (rtx x
)
3854 enum rtx_code code
= GET_CODE (x
);
3855 enum machine_mode mode
= GET_MODE (x
);
3857 switch (GET_RTX_CLASS (code
))
3860 return simplify_unary_operation (code
, mode
,
3861 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3862 case RTX_COMM_ARITH
:
3863 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3864 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
3866 /* Fall through.... */
3869 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3872 case RTX_BITFIELD_OPS
:
3873 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3874 XEXP (x
, 0), XEXP (x
, 1),
3878 case RTX_COMM_COMPARE
:
3879 return simplify_relational_operation (code
, mode
,
3880 ((GET_MODE (XEXP (x
, 0))
3882 ? GET_MODE (XEXP (x
, 0))
3883 : GET_MODE (XEXP (x
, 1))),
3889 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
3890 GET_MODE (SUBREG_REG (x
)),
3897 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3898 if (GET_CODE (XEXP (x
, 0)) == HIGH
3899 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))