1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static bool plus_minus_operand_p (rtx
);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
57 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
59 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
61 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
62 enum machine_mode
, rtx
, rtx
);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
67 neg_const_int (enum machine_mode mode
, rtx i
)
69 return gen_int_mode (- INTVAL (i
), mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (enum machine_mode mode
, rtx x
)
78 unsigned HOST_WIDE_INT val
;
81 if (GET_MODE_CLASS (mode
) != MODE_INT
)
84 width
= GET_MODE_BITSIZE (mode
);
88 if (width
<= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x
) == CONST_INT
)
91 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x
) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x
) == 0)
95 val
= CONST_DOUBLE_HIGH (x
);
96 width
-= HOST_BITS_PER_WIDE_INT
;
101 if (width
< HOST_BITS_PER_WIDE_INT
)
102 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
103 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
110 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0
, op1
))
118 tem
= op0
, op0
= op1
, op1
= tem
;
120 /* If this simplifies, do it. */
121 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
125 /* Handle addition and subtraction specially. Otherwise, just form
128 if (code
== PLUS
|| code
== MINUS
)
130 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
135 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
141 avoid_constant_pool_reference (rtx x
)
144 enum machine_mode cmode
;
146 switch (GET_CODE (x
))
152 /* Handle float extensions of constant pool references. */
154 c
= avoid_constant_pool_reference (tmp
);
155 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
159 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr
= targetm
.delegitimize_address (addr
);
173 if (GET_CODE (addr
) == LO_SUM
)
174 addr
= XEXP (addr
, 1);
176 if (GET_CODE (addr
) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr
))
180 c
= get_pool_constant (addr
);
181 cmode
= get_pool_mode (addr
);
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode
!= GET_MODE (x
))
188 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
199 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
200 enum machine_mode op_mode
)
204 /* If this simplifies, use it. */
205 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
208 return gen_rtx_fmt_e (code
, mode
, op
);
211 /* Likewise for ternary operations. */
214 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
215 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
219 /* If this simplifies, use it. */
220 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
224 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
231 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
232 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
236 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
240 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
243 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
247 simplify_replace_rtx (rtx x
, rtx old_rtx
, rtx new_rtx
)
249 enum rtx_code code
= GET_CODE (x
);
250 enum machine_mode mode
= GET_MODE (x
);
251 enum machine_mode op_mode
;
254 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
261 switch (GET_RTX_CLASS (code
))
265 op_mode
= GET_MODE (op0
);
266 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
267 if (op0
== XEXP (x
, 0))
269 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
273 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
274 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
275 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
277 return simplify_gen_binary (code
, mode
, op0
, op1
);
280 case RTX_COMM_COMPARE
:
283 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
284 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
285 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
286 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
288 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
291 case RTX_BITFIELD_OPS
:
293 op_mode
= GET_MODE (op0
);
294 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
295 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
296 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
297 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
299 if (op_mode
== VOIDmode
)
300 op_mode
= GET_MODE (op0
);
301 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
304 /* The only case we try to handle is a SUBREG. */
307 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
308 if (op0
== SUBREG_REG (x
))
310 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
311 GET_MODE (SUBREG_REG (x
)),
313 return op0
? op0
: x
;
320 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
321 if (op0
== XEXP (x
, 0))
323 return replace_equiv_address_nv (x
, op0
);
325 else if (code
== LO_SUM
)
327 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
328 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
334 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
336 return gen_rtx_LO_SUM (mode
, op0
, op1
);
338 else if (code
== REG
)
340 if (rtx_equal_p (x
, old_rtx
))
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
355 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
356 rtx op
, enum machine_mode op_mode
)
358 unsigned int width
= GET_MODE_BITSIZE (mode
);
359 rtx trueop
= avoid_constant_pool_reference (op
);
361 if (code
== VEC_DUPLICATE
)
363 gcc_assert (VECTOR_MODE_P (mode
));
364 if (GET_MODE (trueop
) != VOIDmode
)
366 if (!VECTOR_MODE_P (GET_MODE (trueop
)))
367 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (trueop
));
369 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
370 (GET_MODE (trueop
)));
372 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
373 || GET_CODE (trueop
) == CONST_VECTOR
)
375 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
376 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
377 rtvec v
= rtvec_alloc (n_elts
);
380 if (GET_CODE (trueop
) != CONST_VECTOR
)
381 for (i
= 0; i
< n_elts
; i
++)
382 RTVEC_ELT (v
, i
) = trueop
;
385 enum machine_mode inmode
= GET_MODE (trueop
);
386 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
387 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
389 gcc_assert (in_n_elts
< n_elts
);
390 gcc_assert ((n_elts
% in_n_elts
) == 0);
391 for (i
= 0; i
< n_elts
; i
++)
392 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
394 return gen_rtx_CONST_VECTOR (mode
, v
);
397 else if (GET_CODE (op
) == CONST
)
398 return simplify_unary_operation (code
, mode
, XEXP (op
, 0), op_mode
);
400 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
402 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
403 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
404 enum machine_mode opmode
= GET_MODE (trueop
);
405 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
406 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
407 rtvec v
= rtvec_alloc (n_elts
);
410 gcc_assert (op_n_elts
== n_elts
);
411 for (i
= 0; i
< n_elts
; i
++)
413 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
414 CONST_VECTOR_ELT (trueop
, i
),
415 GET_MODE_INNER (opmode
));
418 RTVEC_ELT (v
, i
) = x
;
420 return gen_rtx_CONST_VECTOR (mode
, v
);
423 /* The order of these tests is critical so that, for example, we don't
424 check the wrong mode (input vs. output) for a conversion operation,
425 such as FIX. At some point, this should be simplified. */
427 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
428 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
430 HOST_WIDE_INT hv
, lv
;
433 if (GET_CODE (trueop
) == CONST_INT
)
434 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
436 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
438 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
439 d
= real_value_truncate (mode
, d
);
440 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
442 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
443 && (GET_CODE (trueop
) == CONST_DOUBLE
444 || GET_CODE (trueop
) == CONST_INT
))
446 HOST_WIDE_INT hv
, lv
;
449 if (GET_CODE (trueop
) == CONST_INT
)
450 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
452 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
454 if (op_mode
== VOIDmode
)
456 /* We don't know how to interpret negative-looking numbers in
457 this case, so don't try to fold those. */
461 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
464 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
466 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
467 d
= real_value_truncate (mode
, d
);
468 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
471 if (GET_CODE (trueop
) == CONST_INT
472 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
474 HOST_WIDE_INT arg0
= INTVAL (trueop
);
488 val
= (arg0
>= 0 ? arg0
: - arg0
);
492 /* Don't use ffs here. Instead, get low order bit and then its
493 number. If arg0 is zero, this will return 0, as desired. */
494 arg0
&= GET_MODE_MASK (mode
);
495 val
= exact_log2 (arg0
& (- arg0
)) + 1;
499 arg0
&= GET_MODE_MASK (mode
);
500 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
503 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
507 arg0
&= GET_MODE_MASK (mode
);
510 /* Even if the value at zero is undefined, we have to come
511 up with some replacement. Seems good enough. */
512 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
513 val
= GET_MODE_BITSIZE (mode
);
516 val
= exact_log2 (arg0
& -arg0
);
520 arg0
&= GET_MODE_MASK (mode
);
523 val
++, arg0
&= arg0
- 1;
527 arg0
&= GET_MODE_MASK (mode
);
530 val
++, arg0
&= arg0
- 1;
539 /* When zero-extending a CONST_INT, we need to know its
541 gcc_assert (op_mode
!= VOIDmode
);
542 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
544 /* If we were really extending the mode,
545 we would have to distinguish between zero-extension
546 and sign-extension. */
547 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
550 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
551 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
557 if (op_mode
== VOIDmode
)
559 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
561 /* If we were really extending the mode,
562 we would have to distinguish between zero-extension
563 and sign-extension. */
564 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
567 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
570 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
572 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
573 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
590 val
= trunc_int_for_mode (val
, mode
);
592 return GEN_INT (val
);
595 /* We can do some operations on integer CONST_DOUBLEs. Also allow
596 for a DImode operation on a CONST_INT. */
597 else if (GET_MODE (trueop
) == VOIDmode
598 && width
<= HOST_BITS_PER_WIDE_INT
* 2
599 && (GET_CODE (trueop
) == CONST_DOUBLE
600 || GET_CODE (trueop
) == CONST_INT
))
602 unsigned HOST_WIDE_INT l1
, lv
;
603 HOST_WIDE_INT h1
, hv
;
605 if (GET_CODE (trueop
) == CONST_DOUBLE
)
606 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
608 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
618 neg_double (l1
, h1
, &lv
, &hv
);
623 neg_double (l1
, h1
, &lv
, &hv
);
635 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
638 lv
= exact_log2 (l1
& -l1
) + 1;
644 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
645 - HOST_BITS_PER_WIDE_INT
;
647 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
648 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
649 lv
= GET_MODE_BITSIZE (mode
);
655 lv
= exact_log2 (l1
& -l1
);
657 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
658 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
659 lv
= GET_MODE_BITSIZE (mode
);
682 /* This is just a change-of-mode, so do nothing. */
687 gcc_assert (op_mode
!= VOIDmode
);
689 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
693 lv
= l1
& GET_MODE_MASK (op_mode
);
697 if (op_mode
== VOIDmode
698 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
702 lv
= l1
& GET_MODE_MASK (op_mode
);
703 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
704 && (lv
& ((HOST_WIDE_INT
) 1
705 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
706 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
708 hv
= HWI_SIGN_EXTEND (lv
);
719 return immed_double_const (lv
, hv
, mode
);
722 else if (GET_CODE (trueop
) == CONST_DOUBLE
723 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
725 REAL_VALUE_TYPE d
, t
;
726 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
731 if (HONOR_SNANS (mode
) && real_isnan (&d
))
733 real_sqrt (&t
, mode
, &d
);
737 d
= REAL_VALUE_ABS (d
);
740 d
= REAL_VALUE_NEGATE (d
);
743 d
= real_value_truncate (mode
, d
);
746 /* All this does is change the mode. */
749 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
756 real_to_target (tmp
, &d
, GET_MODE (trueop
));
757 for (i
= 0; i
< 4; i
++)
759 real_from_target (&d
, tmp
, mode
);
764 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
767 else if (GET_CODE (trueop
) == CONST_DOUBLE
768 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
769 && GET_MODE_CLASS (mode
) == MODE_INT
770 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
772 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
773 operators are intentionally left unspecified (to ease implementation
774 by target backends), for consistency, this routine implements the
775 same semantics for constant folding as used by the middle-end. */
777 HOST_WIDE_INT xh
, xl
, th
, tl
;
778 REAL_VALUE_TYPE x
, t
;
779 REAL_VALUE_FROM_CONST_DOUBLE (x
, trueop
);
783 if (REAL_VALUE_ISNAN (x
))
786 /* Test against the signed upper bound. */
787 if (width
> HOST_BITS_PER_WIDE_INT
)
789 th
= ((unsigned HOST_WIDE_INT
) 1
790 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
796 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
798 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
799 if (REAL_VALUES_LESS (t
, x
))
806 /* Test against the signed lower bound. */
807 if (width
> HOST_BITS_PER_WIDE_INT
)
809 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
815 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
817 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
818 if (REAL_VALUES_LESS (x
, t
))
824 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
828 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
831 /* Test against the unsigned upper bound. */
832 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
837 else if (width
>= HOST_BITS_PER_WIDE_INT
)
839 th
= ((unsigned HOST_WIDE_INT
) 1
840 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
846 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
848 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
849 if (REAL_VALUES_LESS (t
, x
))
856 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
862 return immed_double_const (xl
, xh
, mode
);
865 /* This was formerly used only for non-IEEE float.
866 eggert@twinsun.com says it is safe for IEEE also. */
869 enum rtx_code reversed
;
872 /* There are some simplifications we can do even if the operands
877 /* (not (not X)) == X. */
878 if (GET_CODE (op
) == NOT
)
881 /* (not (eq X Y)) == (ne X Y), etc. */
882 if (COMPARISON_P (op
)
883 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
884 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
886 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
887 XEXP (op
, 0), XEXP (op
, 1));
889 /* (not (plus X -1)) can become (neg X). */
890 if (GET_CODE (op
) == PLUS
891 && XEXP (op
, 1) == constm1_rtx
)
892 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
894 /* Similarly, (not (neg X)) is (plus X -1). */
895 if (GET_CODE (op
) == NEG
)
896 return plus_constant (XEXP (op
, 0), -1);
898 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
899 if (GET_CODE (op
) == XOR
900 && GET_CODE (XEXP (op
, 1)) == CONST_INT
901 && (temp
= simplify_unary_operation (NOT
, mode
,
904 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
906 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
907 if (GET_CODE (op
) == PLUS
908 && GET_CODE (XEXP (op
, 1)) == CONST_INT
909 && mode_signbit_p (mode
, XEXP (op
, 1))
910 && (temp
= simplify_unary_operation (NOT
, mode
,
913 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
917 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
918 operands other than 1, but that is not valid. We could do a
919 similar simplification for (not (lshiftrt C X)) where C is
920 just the sign bit, but this doesn't seem common enough to
922 if (GET_CODE (op
) == ASHIFT
923 && XEXP (op
, 0) == const1_rtx
)
925 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
926 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
929 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
930 by reversing the comparison code if valid. */
931 if (STORE_FLAG_VALUE
== -1
933 && (reversed
= reversed_comparison_code (op
, NULL_RTX
))
935 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
936 XEXP (op
, 0), XEXP (op
, 1));
938 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
939 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
940 so we can perform the above simplification. */
942 if (STORE_FLAG_VALUE
== -1
943 && GET_CODE (op
) == ASHIFTRT
944 && GET_CODE (XEXP (op
, 1)) == CONST_INT
945 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
946 return simplify_gen_relational (GE
, mode
, VOIDmode
,
947 XEXP (op
, 0), const0_rtx
);
952 /* (neg (neg X)) == X. */
953 if (GET_CODE (op
) == NEG
)
956 /* (neg (plus X 1)) can become (not X). */
957 if (GET_CODE (op
) == PLUS
958 && XEXP (op
, 1) == const1_rtx
)
959 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
961 /* Similarly, (neg (not X)) is (plus X 1). */
962 if (GET_CODE (op
) == NOT
)
963 return plus_constant (XEXP (op
, 0), 1);
965 /* (neg (minus X Y)) can become (minus Y X). This transformation
966 isn't safe for modes with signed zeros, since if X and Y are
967 both +0, (minus Y X) is the same as (minus X Y). If the
968 rounding mode is towards +infinity (or -infinity) then the two
969 expressions will be rounded differently. */
970 if (GET_CODE (op
) == MINUS
971 && !HONOR_SIGNED_ZEROS (mode
)
972 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
973 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1),
976 if (GET_CODE (op
) == PLUS
977 && !HONOR_SIGNED_ZEROS (mode
)
978 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
980 /* (neg (plus A C)) is simplified to (minus -C A). */
981 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
982 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
984 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1),
987 return simplify_gen_binary (MINUS
, mode
, temp
,
991 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
992 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
993 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
996 /* (neg (mult A B)) becomes (mult (neg A) B).
997 This works even for floating-point values. */
998 if (GET_CODE (op
) == MULT
999 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1001 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1002 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
1005 /* NEG commutes with ASHIFT since it is multiplication. Only do
1006 this if we can then eliminate the NEG (e.g., if the operand
1008 if (GET_CODE (op
) == ASHIFT
)
1010 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0),
1013 return simplify_gen_binary (ASHIFT
, mode
, temp
,
1017 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op
) == ASHIFTRT
1020 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1021 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
1022 return simplify_gen_binary (LSHIFTRT
, mode
,
1023 XEXP (op
, 0), XEXP (op
, 1));
1025 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1026 C is equal to the width of MODE minus 1. */
1027 if (GET_CODE (op
) == LSHIFTRT
1028 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1029 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
1030 return simplify_gen_binary (ASHIFTRT
, mode
,
1031 XEXP (op
, 0), XEXP (op
, 1));
1036 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1037 becomes just the MINUS if its mode is MODE. This allows
1038 folding switch statements on machines using casesi (such as
1040 if (GET_CODE (op
) == TRUNCATE
1041 && GET_MODE (XEXP (op
, 0)) == mode
1042 && GET_CODE (XEXP (op
, 0)) == MINUS
1043 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1044 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1045 return XEXP (op
, 0);
1047 /* Check for a sign extension of a subreg of a promoted
1048 variable, where the promotion is sign-extended, and the
1049 target mode is the same as the variable's promotion. */
1050 if (GET_CODE (op
) == SUBREG
1051 && SUBREG_PROMOTED_VAR_P (op
)
1052 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1053 && GET_MODE (XEXP (op
, 0)) == mode
)
1054 return XEXP (op
, 0);
1056 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1057 if (! POINTERS_EXTEND_UNSIGNED
1058 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1060 || (GET_CODE (op
) == SUBREG
1061 && REG_P (SUBREG_REG (op
))
1062 && REG_POINTER (SUBREG_REG (op
))
1063 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1064 return convert_memory_address (Pmode
, op
);
1069 /* Check for a zero extension of a subreg of a promoted
1070 variable, where the promotion is zero-extended, and the
1071 target mode is the same as the variable's promotion. */
1072 if (GET_CODE (op
) == SUBREG
1073 && SUBREG_PROMOTED_VAR_P (op
)
1074 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1075 && GET_MODE (XEXP (op
, 0)) == mode
)
1076 return XEXP (op
, 0);
1078 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1079 if (POINTERS_EXTEND_UNSIGNED
> 0
1080 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1082 || (GET_CODE (op
) == SUBREG
1083 && REG_P (SUBREG_REG (op
))
1084 && REG_POINTER (SUBREG_REG (op
))
1085 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1086 return convert_memory_address (Pmode
, op
);
1098 /* Subroutine of simplify_binary_operation to simplify a commutative,
1099 associative binary operation CODE with result mode MODE, operating
1100 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1101 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1102 canonicalization is possible. */
1105 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1110 /* Linearize the operator to the left. */
1111 if (GET_CODE (op1
) == code
)
1113 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1114 if (GET_CODE (op0
) == code
)
1116 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1117 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1120 /* "a op (b op c)" becomes "(b op c) op a". */
1121 if (! swap_commutative_operands_p (op1
, op0
))
1122 return simplify_gen_binary (code
, mode
, op1
, op0
);
1129 if (GET_CODE (op0
) == code
)
1131 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1132 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1134 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1135 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1138 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1139 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1140 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1141 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1143 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1145 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1146 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1147 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1148 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1150 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1156 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1157 and OP1. Return 0 if no simplification is possible.
1159 Don't use this for relational operations such as EQ or LT.
1160 Use simplify_relational_operation instead. */
1162 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1165 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
1167 unsigned int width
= GET_MODE_BITSIZE (mode
);
1168 rtx trueop0
, trueop1
;
1171 /* Relational operations don't work here. We must know the mode
1172 of the operands in order to do the comparison correctly.
1173 Assuming a full word can give incorrect results.
1174 Consider comparing 128 with -128 in QImode. */
1175 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1176 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1178 /* Make sure the constant is second. */
1179 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1180 && swap_commutative_operands_p (op0
, op1
))
1182 tem
= op0
, op0
= op1
, op1
= tem
;
1185 trueop0
= avoid_constant_pool_reference (op0
);
1186 trueop1
= avoid_constant_pool_reference (op1
);
1188 if (VECTOR_MODE_P (mode
)
1189 && code
!= VEC_CONCAT
1190 && GET_CODE (trueop0
) == CONST_VECTOR
1191 && GET_CODE (trueop1
) == CONST_VECTOR
)
1193 unsigned n_elts
= GET_MODE_NUNITS (mode
);
1194 enum machine_mode op0mode
= GET_MODE (trueop0
);
1195 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
1196 enum machine_mode op1mode
= GET_MODE (trueop1
);
1197 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
1198 rtvec v
= rtvec_alloc (n_elts
);
1201 gcc_assert (op0_n_elts
== n_elts
);
1202 gcc_assert (op1_n_elts
== n_elts
);
1203 for (i
= 0; i
< n_elts
; i
++)
1205 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
1206 CONST_VECTOR_ELT (trueop0
, i
),
1207 CONST_VECTOR_ELT (trueop1
, i
));
1210 RTVEC_ELT (v
, i
) = x
;
1213 return gen_rtx_CONST_VECTOR (mode
, v
);
1216 if (VECTOR_MODE_P (mode
)
1217 && code
== VEC_CONCAT
1218 && CONSTANT_P (trueop0
) && CONSTANT_P (trueop1
))
1220 unsigned n_elts
= GET_MODE_NUNITS (mode
);
1221 rtvec v
= rtvec_alloc (n_elts
);
1223 gcc_assert (n_elts
>= 2);
1226 gcc_assert (GET_CODE (trueop0
) != CONST_VECTOR
);
1227 gcc_assert (GET_CODE (trueop1
) != CONST_VECTOR
);
1229 RTVEC_ELT (v
, 0) = trueop0
;
1230 RTVEC_ELT (v
, 1) = trueop1
;
1234 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (trueop0
));
1235 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (trueop1
));
1238 gcc_assert (GET_CODE (trueop0
) == CONST_VECTOR
);
1239 gcc_assert (GET_CODE (trueop1
) == CONST_VECTOR
);
1240 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
1242 for (i
= 0; i
< op0_n_elts
; ++i
)
1243 RTVEC_ELT (v
, i
) = XVECEXP (trueop0
, 0, i
);
1244 for (i
= 0; i
< op1_n_elts
; ++i
)
1245 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (trueop1
, 0, i
);
1248 return gen_rtx_CONST_VECTOR (mode
, v
);
1251 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1252 && GET_CODE (trueop0
) == CONST_DOUBLE
1253 && GET_CODE (trueop1
) == CONST_DOUBLE
1254 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
1265 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
1267 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
1269 for (i
= 0; i
< 4; i
++)
1286 real_from_target (&r
, tmp0
, mode
);
1287 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
1291 REAL_VALUE_TYPE f0
, f1
, value
, result
;
1294 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
1295 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
1296 real_convert (&f0
, mode
, &f0
);
1297 real_convert (&f1
, mode
, &f1
);
1299 if (HONOR_SNANS (mode
)
1300 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
1304 && REAL_VALUES_EQUAL (f1
, dconst0
)
1305 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
1308 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
1309 && flag_trapping_math
1310 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
1312 int s0
= REAL_VALUE_NEGATIVE (f0
);
1313 int s1
= REAL_VALUE_NEGATIVE (f1
);
1318 /* Inf + -Inf = NaN plus exception. */
1323 /* Inf - Inf = NaN plus exception. */
1328 /* Inf / Inf = NaN plus exception. */
1335 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
1336 && flag_trapping_math
1337 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
1338 || (REAL_VALUE_ISINF (f1
)
1339 && REAL_VALUES_EQUAL (f0
, dconst0
))))
1340 /* Inf * 0 = NaN plus exception. */
1343 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
1345 real_convert (&result
, mode
, &value
);
1347 /* Don't constant fold this floating point operation if the
1348 result may dependent upon the run-time rounding mode and
1349 flag_rounding_math is set, or if GCC's software emulation
1350 is unable to accurately represent the result. */
1352 if ((flag_rounding_math
1353 || (REAL_MODE_FORMAT_COMPOSITE_P (mode
)
1354 && !flag_unsafe_math_optimizations
))
1355 && (inexact
|| !real_identical (&result
, &value
)))
1358 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
1362 /* We can fold some multi-word operations. */
1363 if (GET_MODE_CLASS (mode
) == MODE_INT
1364 && width
== HOST_BITS_PER_WIDE_INT
* 2
1365 && (GET_CODE (trueop0
) == CONST_DOUBLE
1366 || GET_CODE (trueop0
) == CONST_INT
)
1367 && (GET_CODE (trueop1
) == CONST_DOUBLE
1368 || GET_CODE (trueop1
) == CONST_INT
))
1370 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
1371 HOST_WIDE_INT h1
, h2
, hv
, ht
;
1373 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1374 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
1376 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
1378 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1379 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
1381 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
1386 /* A - B == A + (-B). */
1387 neg_double (l2
, h2
, &lv
, &hv
);
1390 /* Fall through.... */
1393 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1397 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1401 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
1402 &lv
, &hv
, <
, &ht
))
1407 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
1408 <
, &ht
, &lv
, &hv
))
1413 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
1414 &lv
, &hv
, <
, &ht
))
1419 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
1420 <
, &ht
, &lv
, &hv
))
1425 lv
= l1
& l2
, hv
= h1
& h2
;
1429 lv
= l1
| l2
, hv
= h1
| h2
;
1433 lv
= l1
^ l2
, hv
= h1
^ h2
;
1439 && ((unsigned HOST_WIDE_INT
) l1
1440 < (unsigned HOST_WIDE_INT
) l2
)))
1449 && ((unsigned HOST_WIDE_INT
) l1
1450 > (unsigned HOST_WIDE_INT
) l2
)))
1457 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1459 && ((unsigned HOST_WIDE_INT
) l1
1460 < (unsigned HOST_WIDE_INT
) l2
)))
1467 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1469 && ((unsigned HOST_WIDE_INT
) l1
1470 > (unsigned HOST_WIDE_INT
) l2
)))
1476 case LSHIFTRT
: case ASHIFTRT
:
1478 case ROTATE
: case ROTATERT
:
1479 if (SHIFT_COUNT_TRUNCATED
)
1480 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1482 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1485 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1486 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1488 else if (code
== ASHIFT
)
1489 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1490 else if (code
== ROTATE
)
1491 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1492 else /* code == ROTATERT */
1493 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1500 return immed_double_const (lv
, hv
, mode
);
1503 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1504 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1506 /* Even if we can't compute a constant result,
1507 there are some cases worth simplifying. */
1512 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1513 when x is NaN, infinite, or finite and nonzero. They aren't
1514 when x is -0 and the rounding mode is not towards -infinity,
1515 since (-0) + 0 is then 0. */
1516 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1519 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1520 transformations are safe even for IEEE. */
1521 if (GET_CODE (op0
) == NEG
)
1522 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1523 else if (GET_CODE (op1
) == NEG
)
1524 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1526 /* (~a) + 1 -> -a */
1527 if (INTEGRAL_MODE_P (mode
)
1528 && GET_CODE (op0
) == NOT
1529 && trueop1
== const1_rtx
)
1530 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1532 /* Handle both-operands-constant cases. We can only add
1533 CONST_INTs to constants since the sum of relocatable symbols
1534 can't be handled by most assemblers. Don't add CONST_INT
1535 to CONST_INT since overflow won't be computed properly if wider
1536 than HOST_BITS_PER_WIDE_INT. */
1538 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1539 && GET_CODE (op1
) == CONST_INT
)
1540 return plus_constant (op0
, INTVAL (op1
));
1541 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1542 && GET_CODE (op0
) == CONST_INT
)
1543 return plus_constant (op1
, INTVAL (op0
));
1545 /* See if this is something like X * C - X or vice versa or
1546 if the multiplication is written as a shift. If so, we can
1547 distribute and make a new multiply, shift, or maybe just
1548 have X (if C is 2 in the example above). But don't make
1549 something more expensive than we had before. */
1551 if (! FLOAT_MODE_P (mode
))
1553 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1554 rtx lhs
= op0
, rhs
= op1
;
1556 if (GET_CODE (lhs
) == NEG
)
1557 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1558 else if (GET_CODE (lhs
) == MULT
1559 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1561 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1563 else if (GET_CODE (lhs
) == ASHIFT
1564 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1565 && INTVAL (XEXP (lhs
, 1)) >= 0
1566 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1568 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1569 lhs
= XEXP (lhs
, 0);
1572 if (GET_CODE (rhs
) == NEG
)
1573 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1574 else if (GET_CODE (rhs
) == MULT
1575 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1577 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1579 else if (GET_CODE (rhs
) == ASHIFT
1580 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1581 && INTVAL (XEXP (rhs
, 1)) >= 0
1582 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1584 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1585 rhs
= XEXP (rhs
, 0);
1588 if (rtx_equal_p (lhs
, rhs
))
1590 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1591 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1592 GEN_INT (coeff0
+ coeff1
));
1593 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1598 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1599 if ((GET_CODE (op1
) == CONST_INT
1600 || GET_CODE (op1
) == CONST_DOUBLE
)
1601 && GET_CODE (op0
) == XOR
1602 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1603 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1604 && mode_signbit_p (mode
, op1
))
1605 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1606 simplify_gen_binary (XOR
, mode
, op1
,
1609 /* If one of the operands is a PLUS or a MINUS, see if we can
1610 simplify this by the associative law.
1611 Don't use the associative law for floating point.
1612 The inaccuracy makes it nonassociative,
1613 and subtle programs can break if operations are associated. */
1615 if (INTEGRAL_MODE_P (mode
)
1616 && (plus_minus_operand_p (op0
)
1617 || plus_minus_operand_p (op1
))
1618 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1621 /* Reassociate floating point addition only when the user
1622 specifies unsafe math optimizations. */
1623 if (FLOAT_MODE_P (mode
)
1624 && flag_unsafe_math_optimizations
)
1626 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1634 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1635 using cc0, in which case we want to leave it as a COMPARE
1636 so we can distinguish it from a register-register-copy.
1638 In IEEE floating point, x-0 is not the same as x. */
1640 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1641 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1642 && trueop1
== CONST0_RTX (mode
))
1646 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1647 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1648 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1649 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1651 rtx xop00
= XEXP (op0
, 0);
1652 rtx xop10
= XEXP (op1
, 0);
1655 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1657 if (REG_P (xop00
) && REG_P (xop10
)
1658 && GET_MODE (xop00
) == GET_MODE (xop10
)
1659 && REGNO (xop00
) == REGNO (xop10
)
1660 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1661 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1668 /* We can't assume x-x is 0 even with non-IEEE floating point,
1669 but since it is zero except in very strange circumstances, we
1670 will treat it as zero with -funsafe-math-optimizations. */
1671 if (rtx_equal_p (trueop0
, trueop1
)
1672 && ! side_effects_p (op0
)
1673 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1674 return CONST0_RTX (mode
);
1676 /* Change subtraction from zero into negation. (0 - x) is the
1677 same as -x when x is NaN, infinite, or finite and nonzero.
1678 But if the mode has signed zeros, and does not round towards
1679 -infinity, then 0 - 0 is 0, not -0. */
1680 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1681 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1683 /* (-1 - a) is ~a. */
1684 if (trueop0
== constm1_rtx
)
1685 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1687 /* Subtracting 0 has no effect unless the mode has signed zeros
1688 and supports rounding towards -infinity. In such a case,
1690 if (!(HONOR_SIGNED_ZEROS (mode
)
1691 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1692 && trueop1
== CONST0_RTX (mode
))
1695 /* See if this is something like X * C - X or vice versa or
1696 if the multiplication is written as a shift. If so, we can
1697 distribute and make a new multiply, shift, or maybe just
1698 have X (if C is 2 in the example above). But don't make
1699 something more expensive than we had before. */
1701 if (! FLOAT_MODE_P (mode
))
1703 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1704 rtx lhs
= op0
, rhs
= op1
;
1706 if (GET_CODE (lhs
) == NEG
)
1707 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1708 else if (GET_CODE (lhs
) == MULT
1709 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1711 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1713 else if (GET_CODE (lhs
) == ASHIFT
1714 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1715 && INTVAL (XEXP (lhs
, 1)) >= 0
1716 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1718 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1719 lhs
= XEXP (lhs
, 0);
1722 if (GET_CODE (rhs
) == NEG
)
1723 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1724 else if (GET_CODE (rhs
) == MULT
1725 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1727 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1729 else if (GET_CODE (rhs
) == ASHIFT
1730 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1731 && INTVAL (XEXP (rhs
, 1)) >= 0
1732 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1734 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1735 rhs
= XEXP (rhs
, 0);
1738 if (rtx_equal_p (lhs
, rhs
))
1740 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1741 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1742 GEN_INT (coeff0
- coeff1
));
1743 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1748 /* (a - (-b)) -> (a + b). True even for IEEE. */
1749 if (GET_CODE (op1
) == NEG
)
1750 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1752 /* (-x - c) may be simplified as (-c - x). */
1753 if (GET_CODE (op0
) == NEG
1754 && (GET_CODE (op1
) == CONST_INT
1755 || GET_CODE (op1
) == CONST_DOUBLE
))
1757 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1759 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1762 /* If one of the operands is a PLUS or a MINUS, see if we can
1763 simplify this by the associative law.
1764 Don't use the associative law for floating point.
1765 The inaccuracy makes it nonassociative,
1766 and subtle programs can break if operations are associated. */
1768 if (INTEGRAL_MODE_P (mode
)
1769 && (plus_minus_operand_p (op0
)
1770 || plus_minus_operand_p (op1
))
1771 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1774 /* Don't let a relocatable value get a negative coeff. */
1775 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1776 return simplify_gen_binary (PLUS
, mode
,
1778 neg_const_int (mode
, op1
));
1780 /* (x - (x & y)) -> (x & ~y) */
1781 if (GET_CODE (op1
) == AND
)
1783 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1785 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1786 GET_MODE (XEXP (op1
, 1)));
1787 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1789 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1791 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1792 GET_MODE (XEXP (op1
, 0)));
1793 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1799 if (trueop1
== constm1_rtx
)
1800 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1802 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1803 x is NaN, since x * 0 is then also NaN. Nor is it valid
1804 when the mode has signed zeros, since multiplying a negative
1805 number by 0 will give -0, not 0. */
1806 if (!HONOR_NANS (mode
)
1807 && !HONOR_SIGNED_ZEROS (mode
)
1808 && trueop1
== CONST0_RTX (mode
)
1809 && ! side_effects_p (op0
))
1812 /* In IEEE floating point, x*1 is not equivalent to x for
1814 if (!HONOR_SNANS (mode
)
1815 && trueop1
== CONST1_RTX (mode
))
1818 /* Convert multiply by constant power of two into shift unless
1819 we are still generating RTL. This test is a kludge. */
1820 if (GET_CODE (trueop1
) == CONST_INT
1821 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1822 /* If the mode is larger than the host word size, and the
1823 uppermost bit is set, then this isn't a power of two due
1824 to implicit sign extension. */
1825 && (width
<= HOST_BITS_PER_WIDE_INT
1826 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1827 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1829 /* x*2 is x+x and x*(-1) is -x */
1830 if (GET_CODE (trueop1
) == CONST_DOUBLE
1831 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1832 && GET_MODE (op0
) == mode
)
1835 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1837 if (REAL_VALUES_EQUAL (d
, dconst2
))
1838 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1840 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1841 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1844 /* Reassociate multiplication, but for floating point MULTs
1845 only when the user specifies unsafe math optimizations. */
1846 if (! FLOAT_MODE_P (mode
)
1847 || flag_unsafe_math_optimizations
)
1849 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1856 if (trueop1
== const0_rtx
)
1858 if (GET_CODE (trueop1
) == CONST_INT
1859 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1860 == GET_MODE_MASK (mode
)))
1862 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1864 /* A | (~A) -> -1 */
1865 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1866 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1867 && ! side_effects_p (op0
)
1868 && GET_MODE_CLASS (mode
) != MODE_CC
)
1870 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1876 if (trueop1
== const0_rtx
)
1878 if (GET_CODE (trueop1
) == CONST_INT
1879 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1880 == GET_MODE_MASK (mode
)))
1881 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1882 if (trueop0
== trueop1
1883 && ! side_effects_p (op0
)
1884 && GET_MODE_CLASS (mode
) != MODE_CC
)
1887 /* Canonicalize XOR of the most significant bit to PLUS. */
1888 if ((GET_CODE (op1
) == CONST_INT
1889 || GET_CODE (op1
) == CONST_DOUBLE
)
1890 && mode_signbit_p (mode
, op1
))
1891 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
1892 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1893 if ((GET_CODE (op1
) == CONST_INT
1894 || GET_CODE (op1
) == CONST_DOUBLE
)
1895 && GET_CODE (op0
) == PLUS
1896 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1897 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1898 && mode_signbit_p (mode
, XEXP (op0
, 1)))
1899 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1900 simplify_gen_binary (XOR
, mode
, op1
,
1903 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1909 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1911 /* If we are turning off bits already known off in OP0, we need
1913 if (GET_CODE (trueop1
) == CONST_INT
1914 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1915 && (nonzero_bits (trueop0
, mode
) & ~INTVAL (trueop1
)) == 0)
1917 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1918 && GET_MODE_CLASS (mode
) != MODE_CC
)
1921 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1922 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1923 && ! side_effects_p (op0
)
1924 && GET_MODE_CLASS (mode
) != MODE_CC
)
1927 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1928 there are no non-zero bits of C outside of X's mode. */
1929 if ((GET_CODE (op0
) == SIGN_EXTEND
1930 || GET_CODE (op0
) == ZERO_EXTEND
)
1931 && GET_CODE (trueop1
) == CONST_INT
1932 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1933 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
1934 & INTVAL (trueop1
)) == 0)
1936 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
1937 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
1938 gen_int_mode (INTVAL (trueop1
),
1940 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
1943 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1944 ((A & N) + B) & M -> (A + B) & M
1945 Similarly if (N & M) == 0,
1946 ((A | N) + B) & M -> (A + B) & M
1947 and for - instead of + and/or ^ instead of |. */
1948 if (GET_CODE (trueop1
) == CONST_INT
1949 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1950 && ~INTVAL (trueop1
)
1951 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
1952 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
1957 pmop
[0] = XEXP (op0
, 0);
1958 pmop
[1] = XEXP (op0
, 1);
1960 for (which
= 0; which
< 2; which
++)
1963 switch (GET_CODE (tem
))
1966 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
1967 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
1968 == INTVAL (trueop1
))
1969 pmop
[which
] = XEXP (tem
, 0);
1973 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
1974 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
1975 pmop
[which
] = XEXP (tem
, 0);
1982 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
1984 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
1986 return simplify_gen_binary (code
, mode
, tem
, op1
);
1989 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1995 /* 0/x is 0 (or x&0 if x has side-effects). */
1996 if (trueop0
== const0_rtx
)
1997 return side_effects_p (op1
)
1998 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
2001 if (trueop1
== const1_rtx
)
2003 /* Handle narrowing UDIV. */
2004 rtx x
= gen_lowpart_common (mode
, op0
);
2007 if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
2008 return gen_lowpart_SUBREG (mode
, op0
);
2011 /* Convert divide by power of two into shift. */
2012 if (GET_CODE (trueop1
) == CONST_INT
2013 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
2014 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (arg1
));
2018 /* Handle floating point and integers separately. */
2019 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2021 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2022 safe for modes with NaNs, since 0.0 / 0.0 will then be
2023 NaN rather than 0.0. Nor is it safe for modes with signed
2024 zeros, since dividing 0 by a negative number gives -0.0 */
2025 if (trueop0
== CONST0_RTX (mode
)
2026 && !HONOR_NANS (mode
)
2027 && !HONOR_SIGNED_ZEROS (mode
)
2028 && ! side_effects_p (op1
))
2031 if (trueop1
== CONST1_RTX (mode
)
2032 && !HONOR_SNANS (mode
))
2035 if (GET_CODE (trueop1
) == CONST_DOUBLE
2036 && trueop1
!= CONST0_RTX (mode
))
2039 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2042 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2043 && !HONOR_SNANS (mode
))
2044 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2046 /* Change FP division by a constant into multiplication.
2047 Only do this with -funsafe-math-optimizations. */
2048 if (flag_unsafe_math_optimizations
2049 && !REAL_VALUES_EQUAL (d
, dconst0
))
2051 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2052 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2053 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2059 /* 0/x is 0 (or x&0 if x has side-effects). */
2060 if (trueop0
== const0_rtx
)
2061 return side_effects_p (op1
)
2062 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
2065 if (trueop1
== const1_rtx
)
2067 /* Handle narrowing DIV. */
2068 rtx x
= gen_lowpart_common (mode
, op0
);
2071 if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
2072 return gen_lowpart_SUBREG (mode
, op0
);
2076 if (trueop1
== constm1_rtx
)
2078 rtx x
= gen_lowpart_common (mode
, op0
);
2080 x
= (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
2081 ? gen_lowpart_SUBREG (mode
, op0
) : op0
;
2082 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2088 /* 0%x is 0 (or x&0 if x has side-effects). */
2089 if (trueop0
== const0_rtx
)
2090 return side_effects_p (op1
)
2091 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
2093 /* x%1 is 0 (of x&0 if x has side-effects). */
2094 if (trueop1
== const1_rtx
)
2095 return side_effects_p (op0
)
2096 ? simplify_gen_binary (AND
, mode
, op0
, const0_rtx
)
2098 /* Implement modulus by power of two as AND. */
2099 if (GET_CODE (trueop1
) == CONST_INT
2100 && exact_log2 (INTVAL (trueop1
)) > 0)
2101 return simplify_gen_binary (AND
, mode
, op0
,
2102 GEN_INT (INTVAL (op1
) - 1));
2106 /* 0%x is 0 (or x&0 if x has side-effects). */
2107 if (trueop0
== const0_rtx
)
2108 return side_effects_p (op1
)
2109 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
2111 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2112 if (trueop1
== const1_rtx
|| trueop1
== constm1_rtx
)
2113 return side_effects_p (op0
)
2114 ? simplify_gen_binary (AND
, mode
, op0
, const0_rtx
)
2121 /* Rotating ~0 always results in ~0. */
2122 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
2123 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2124 && ! side_effects_p (op1
))
2127 /* Fall through.... */
2131 if (trueop1
== const0_rtx
)
2133 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
2138 if (width
<= HOST_BITS_PER_WIDE_INT
2139 && GET_CODE (trueop1
) == CONST_INT
2140 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2141 && ! side_effects_p (op0
))
2143 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2145 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2151 if (width
<= HOST_BITS_PER_WIDE_INT
2152 && GET_CODE (trueop1
) == CONST_INT
2153 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2154 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2155 && ! side_effects_p (op0
))
2157 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2159 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2165 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
2167 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2169 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2175 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2177 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2179 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2188 /* ??? There are simplifications that can be done. */
2192 if (!VECTOR_MODE_P (mode
))
2194 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2195 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2196 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2197 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2198 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
2200 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2201 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2206 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2207 gcc_assert (GET_MODE_INNER (mode
)
2208 == GET_MODE_INNER (GET_MODE (trueop0
)));
2209 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2211 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2213 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2214 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2215 rtvec v
= rtvec_alloc (n_elts
);
2218 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2219 for (i
= 0; i
< n_elts
; i
++)
2221 rtx x
= XVECEXP (trueop1
, 0, i
);
2223 gcc_assert (GET_CODE (x
) == CONST_INT
);
2224 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2228 return gen_rtx_CONST_VECTOR (mode
, v
);
2234 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2235 ? GET_MODE (trueop0
)
2236 : GET_MODE_INNER (mode
));
2237 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2238 ? GET_MODE (trueop1
)
2239 : GET_MODE_INNER (mode
));
2241 gcc_assert (VECTOR_MODE_P (mode
));
2242 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2243 == GET_MODE_SIZE (mode
));
2245 if (VECTOR_MODE_P (op0_mode
))
2246 gcc_assert (GET_MODE_INNER (mode
)
2247 == GET_MODE_INNER (op0_mode
));
2249 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2251 if (VECTOR_MODE_P (op1_mode
))
2252 gcc_assert (GET_MODE_INNER (mode
)
2253 == GET_MODE_INNER (op1_mode
));
2255 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2257 if ((GET_CODE (trueop0
) == CONST_VECTOR
2258 || GET_CODE (trueop0
) == CONST_INT
2259 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2260 && (GET_CODE (trueop1
) == CONST_VECTOR
2261 || GET_CODE (trueop1
) == CONST_INT
2262 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2264 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2265 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2266 rtvec v
= rtvec_alloc (n_elts
);
2268 unsigned in_n_elts
= 1;
2270 if (VECTOR_MODE_P (op0_mode
))
2271 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2272 for (i
= 0; i
< n_elts
; i
++)
2276 if (!VECTOR_MODE_P (op0_mode
))
2277 RTVEC_ELT (v
, i
) = trueop0
;
2279 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2283 if (!VECTOR_MODE_P (op1_mode
))
2284 RTVEC_ELT (v
, i
) = trueop1
;
2286 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2291 return gen_rtx_CONST_VECTOR (mode
, v
);
2303 /* Get the integer argument values in two forms:
2304 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2306 arg0
= INTVAL (trueop0
);
2307 arg1
= INTVAL (trueop1
);
2309 if (width
< HOST_BITS_PER_WIDE_INT
)
2311 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2312 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2315 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2316 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2319 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2320 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2328 /* Compute the value of the arithmetic. */
2333 val
= arg0s
+ arg1s
;
2337 val
= arg0s
- arg1s
;
2341 val
= arg0s
* arg1s
;
2346 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2349 val
= arg0s
/ arg1s
;
2354 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2357 val
= arg0s
% arg1s
;
2362 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2365 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2370 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2373 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2391 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
2392 value is in range. We can't return any old value for out-of-range
2393 arguments because either the middle-end (via shift_truncation_mask)
2394 or the back-end might be relying on target-specific knowledge.
2395 Nor can we rely on shift_truncation_mask, since the shift might
2396 not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
2397 if (SHIFT_COUNT_TRUNCATED
)
2398 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
2399 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
2402 val
= (code
== ASHIFT
2403 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
2404 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
2406 /* Sign-extend the result for arithmetic right shifts. */
2407 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
2408 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
2416 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2417 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2425 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2426 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2430 /* Do nothing here. */
2434 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2438 val
= ((unsigned HOST_WIDE_INT
) arg0
2439 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2443 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2447 val
= ((unsigned HOST_WIDE_INT
) arg0
2448 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2455 /* ??? There are simplifications that can be done. */
2462 val
= trunc_int_for_mode (val
, mode
);
2464 return GEN_INT (val
);
2467 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2470 Rather than test for specific case, we do this by a brute-force method
2471 and do all possible simplifications until no more changes occur. Then
2472 we rebuild the operation.
2474 If FORCE is true, then always generate the rtx. This is used to
2475 canonicalize stuff emitted from simplify_gen_binary. Note that this
2476 can still fail if the rtx is too complex. It won't fail just because
2477 the result is not 'simpler' than the input, however. */
2479 struct simplify_plus_minus_op_data
2486 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2488 const struct simplify_plus_minus_op_data
*d1
= p1
;
2489 const struct simplify_plus_minus_op_data
*d2
= p2
;
2491 return (commutative_operand_precedence (d2
->op
)
2492 - commutative_operand_precedence (d1
->op
));
2496 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2499 struct simplify_plus_minus_op_data ops
[8];
2501 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2505 memset (ops
, 0, sizeof ops
);
2507 /* Set up the two operands and then expand them until nothing has been
2508 changed. If we run out of room in our array, give up; this should
2509 almost never happen. */
2514 ops
[1].neg
= (code
== MINUS
);
2520 for (i
= 0; i
< n_ops
; i
++)
2522 rtx this_op
= ops
[i
].op
;
2523 int this_neg
= ops
[i
].neg
;
2524 enum rtx_code this_code
= GET_CODE (this_op
);
2533 ops
[n_ops
].op
= XEXP (this_op
, 1);
2534 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2537 ops
[i
].op
= XEXP (this_op
, 0);
2543 ops
[i
].op
= XEXP (this_op
, 0);
2544 ops
[i
].neg
= ! this_neg
;
2550 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2551 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2552 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2554 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2555 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2556 ops
[n_ops
].neg
= this_neg
;
2564 /* ~a -> (-a - 1) */
2567 ops
[n_ops
].op
= constm1_rtx
;
2568 ops
[n_ops
++].neg
= this_neg
;
2569 ops
[i
].op
= XEXP (this_op
, 0);
2570 ops
[i
].neg
= !this_neg
;
2578 ops
[i
].op
= neg_const_int (mode
, this_op
);
2591 /* If we only have two operands, we can't do anything. */
2592 if (n_ops
<= 2 && !force
)
2595 /* Count the number of CONSTs we didn't split above. */
2596 for (i
= 0; i
< n_ops
; i
++)
2597 if (GET_CODE (ops
[i
].op
) == CONST
)
2600 /* Now simplify each pair of operands until nothing changes. The first
2601 time through just simplify constants against each other. */
2608 for (i
= 0; i
< n_ops
- 1; i
++)
2609 for (j
= i
+ 1; j
< n_ops
; j
++)
2611 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2612 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2614 if (lhs
!= 0 && rhs
!= 0
2615 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2617 enum rtx_code ncode
= PLUS
;
2623 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2625 else if (swap_commutative_operands_p (lhs
, rhs
))
2626 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2628 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2630 /* Reject "simplifications" that just wrap the two
2631 arguments in a CONST. Failure to do so can result
2632 in infinite recursion with simplify_binary_operation
2633 when it calls us to simplify CONST operations. */
2635 && ! (GET_CODE (tem
) == CONST
2636 && GET_CODE (XEXP (tem
, 0)) == ncode
2637 && XEXP (XEXP (tem
, 0), 0) == lhs
2638 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2639 /* Don't allow -x + -1 -> ~x simplifications in the
2640 first pass. This allows us the chance to combine
2641 the -1 with other constants. */
2643 && GET_CODE (tem
) == NOT
2644 && XEXP (tem
, 0) == rhs
))
2647 if (GET_CODE (tem
) == NEG
)
2648 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2649 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2650 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2654 ops
[j
].op
= NULL_RTX
;
2664 /* Pack all the operands to the lower-numbered entries. */
2665 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2670 /* Sort the operations based on swap_commutative_operands_p. */
2671 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2673 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2675 && GET_CODE (ops
[1].op
) == CONST_INT
2676 && CONSTANT_P (ops
[0].op
)
2678 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
2680 /* We suppressed creation of trivial CONST expressions in the
2681 combination loop to avoid recursion. Create one manually now.
2682 The combination loop should have ensured that there is exactly
2683 one CONST_INT, and the sort will have ensured that it is last
2684 in the array and that any other constant will be next-to-last. */
2687 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2688 && CONSTANT_P (ops
[n_ops
- 2].op
))
2690 rtx value
= ops
[n_ops
- 1].op
;
2691 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2692 value
= neg_const_int (mode
, value
);
2693 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2697 /* Count the number of CONSTs that we generated. */
2699 for (i
= 0; i
< n_ops
; i
++)
2700 if (GET_CODE (ops
[i
].op
) == CONST
)
2703 /* Give up if we didn't reduce the number of operands we had. Make
2704 sure we count a CONST as two operands. If we have the same
2705 number of operands, but have made more CONSTs than before, this
2706 is also an improvement, so accept it. */
2708 && (n_ops
+ n_consts
> input_ops
2709 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2712 /* Put a non-negated operand first, if possible. */
2714 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2717 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
2726 /* Now make the result by performing the requested operations. */
2728 for (i
= 1; i
< n_ops
; i
++)
2729 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2730 mode
, result
, ops
[i
].op
);
2735 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2737 plus_minus_operand_p (rtx x
)
2739 return GET_CODE (x
) == PLUS
2740 || GET_CODE (x
) == MINUS
2741 || (GET_CODE (x
) == CONST
2742 && GET_CODE (XEXP (x
, 0)) == PLUS
2743 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
2744 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
2747 /* Like simplify_binary_operation except used for relational operators.
2748 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2749 not also be VOIDmode.
2751 CMP_MODE specifies in which mode the comparison is done in, so it is
2752 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2753 the operands or, if both are VOIDmode, the operands are compared in
2754 "infinite precision". */
2756 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2757 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2759 rtx tem
, trueop0
, trueop1
;
2761 if (cmp_mode
== VOIDmode
)
2762 cmp_mode
= GET_MODE (op0
);
2763 if (cmp_mode
== VOIDmode
)
2764 cmp_mode
= GET_MODE (op1
);
2766 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
2769 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2771 if (tem
== const0_rtx
)
2772 return CONST0_RTX (mode
);
2773 #ifdef FLOAT_STORE_FLAG_VALUE
2775 REAL_VALUE_TYPE val
;
2776 val
= FLOAT_STORE_FLAG_VALUE (mode
);
2777 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
2783 if (VECTOR_MODE_P (mode
))
2785 if (tem
== const0_rtx
)
2786 return CONST0_RTX (mode
);
2787 #ifdef VECTOR_STORE_FLAG_VALUE
2792 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
2793 if (val
== NULL_RTX
)
2795 if (val
== const1_rtx
)
2796 return CONST1_RTX (mode
);
2798 units
= GET_MODE_NUNITS (mode
);
2799 v
= rtvec_alloc (units
);
2800 for (i
= 0; i
< units
; i
++)
2801 RTVEC_ELT (v
, i
) = val
;
2802 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
2812 /* For the following tests, ensure const0_rtx is op1. */
2813 if (swap_commutative_operands_p (op0
, op1
)
2814 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
2815 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
2817 /* If op0 is a compare, extract the comparison arguments from it. */
2818 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2819 return simplify_relational_operation (code
, mode
, VOIDmode
,
2820 XEXP (op0
, 0), XEXP (op0
, 1));
2822 if (mode
== VOIDmode
2823 || GET_MODE_CLASS (cmp_mode
) == MODE_CC
2827 trueop0
= avoid_constant_pool_reference (op0
);
2828 trueop1
= avoid_constant_pool_reference (op1
);
2829 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
2833 /* This part of simplify_relational_operation is only used when CMP_MODE
2834 is not in class MODE_CC (i.e. it is a real comparison).
2836 MODE is the mode of the result, while CMP_MODE specifies in which
2837 mode the comparison is done in, so it is the mode of the operands. */
2840 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
2841 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2843 enum rtx_code op0code
= GET_CODE (op0
);
2845 if (GET_CODE (op1
) == CONST_INT
)
2847 if (INTVAL (op1
) == 0 && COMPARISON_P (op0
))
2849 /* If op0 is a comparison, extract the comparison arguments form it. */
2852 if (GET_MODE (op0
) == cmp_mode
)
2853 return simplify_rtx (op0
);
2855 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
2856 XEXP (op0
, 0), XEXP (op0
, 1));
2858 else if (code
== EQ
)
2860 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
2861 if (new_code
!= UNKNOWN
)
2862 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
2863 XEXP (op0
, 0), XEXP (op0
, 1));
2868 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2869 if ((code
== EQ
|| code
== NE
)
2870 && (op0code
== PLUS
|| op0code
== MINUS
)
2872 && CONSTANT_P (XEXP (op0
, 1))
2873 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
2875 rtx x
= XEXP (op0
, 0);
2876 rtx c
= XEXP (op0
, 1);
2878 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
2880 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
2886 /* Check if the given comparison (done in the given MODE) is actually a
2887 tautology or a contradiction.
2888 If no simplification is possible, this function returns zero.
2889 Otherwise, it returns either const_true_rtx or const0_rtx. */
2892 simplify_const_relational_operation (enum rtx_code code
,
2893 enum machine_mode mode
,
2896 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2901 gcc_assert (mode
!= VOIDmode
2902 || (GET_MODE (op0
) == VOIDmode
2903 && GET_MODE (op1
) == VOIDmode
));
2905 /* If op0 is a compare, extract the comparison arguments from it. */
2906 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2907 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2909 /* We can't simplify MODE_CC values since we don't know what the
2910 actual comparison is. */
2911 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2914 /* Make sure the constant is second. */
2915 if (swap_commutative_operands_p (op0
, op1
))
2917 tem
= op0
, op0
= op1
, op1
= tem
;
2918 code
= swap_condition (code
);
2921 trueop0
= avoid_constant_pool_reference (op0
);
2922 trueop1
= avoid_constant_pool_reference (op1
);
2924 /* For integer comparisons of A and B maybe we can simplify A - B and can
2925 then simplify a comparison of that with zero. If A and B are both either
2926 a register or a CONST_INT, this can't help; testing for these cases will
2927 prevent infinite recursion here and speed things up.
2929 If CODE is an unsigned comparison, then we can never do this optimization,
2930 because it gives an incorrect result if the subtraction wraps around zero.
2931 ANSI C defines unsigned operations such that they never overflow, and
2932 thus such cases can not be ignored; but we cannot do it even for
2933 signed comparisons for languages such as Java, so test flag_wrapv. */
2935 if (!flag_wrapv
&& INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2936 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
2937 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
2938 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2939 /* We cannot do this for == or != if tem is a nonzero address. */
2940 && ((code
!= EQ
&& code
!= NE
) || ! nonzero_address_p (tem
))
2941 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2942 return simplify_const_relational_operation (signed_condition (code
),
2943 mode
, tem
, const0_rtx
);
2945 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2946 return const_true_rtx
;
2948 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2951 /* For modes without NaNs, if the two operands are equal, we know the
2952 result except if they have side-effects. */
2953 if (! HONOR_NANS (GET_MODE (trueop0
))
2954 && rtx_equal_p (trueop0
, trueop1
)
2955 && ! side_effects_p (trueop0
))
2956 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2958 /* If the operands are floating-point constants, see if we can fold
2960 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2961 && GET_CODE (trueop1
) == CONST_DOUBLE
2962 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2964 REAL_VALUE_TYPE d0
, d1
;
2966 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2967 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2969 /* Comparisons are unordered iff at least one of the values is NaN. */
2970 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2980 return const_true_rtx
;
2993 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2994 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2995 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2998 /* Otherwise, see if the operands are both integers. */
2999 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
3000 && (GET_CODE (trueop0
) == CONST_DOUBLE
3001 || GET_CODE (trueop0
) == CONST_INT
)
3002 && (GET_CODE (trueop1
) == CONST_DOUBLE
3003 || GET_CODE (trueop1
) == CONST_INT
))
3005 int width
= GET_MODE_BITSIZE (mode
);
3006 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
3007 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
3009 /* Get the two words comprising each integer constant. */
3010 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
3012 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
3013 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
3017 l0u
= l0s
= INTVAL (trueop0
);
3018 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
3021 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
3023 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
3024 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
3028 l1u
= l1s
= INTVAL (trueop1
);
3029 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
3032 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3033 we have to sign or zero-extend the values. */
3034 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
3036 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3037 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3039 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3040 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3042 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3043 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3045 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
3046 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
3048 equal
= (h0u
== h1u
&& l0u
== l1u
);
3049 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
3050 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
3051 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
3052 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
3055 /* Otherwise, there are some code-specific tests we can make. */
3058 /* Optimize comparisons with upper and lower bounds. */
3059 if (SCALAR_INT_MODE_P (mode
)
3060 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3073 get_mode_bounds (mode
, sign
, mode
, &mmin
, &mmax
);
3080 /* x >= min is always true. */
3081 if (rtx_equal_p (trueop1
, mmin
))
3082 tem
= const_true_rtx
;
3088 /* x <= max is always true. */
3089 if (rtx_equal_p (trueop1
, mmax
))
3090 tem
= const_true_rtx
;
3095 /* x > max is always false. */
3096 if (rtx_equal_p (trueop1
, mmax
))
3102 /* x < min is always false. */
3103 if (rtx_equal_p (trueop1
, mmin
))
3110 if (tem
== const0_rtx
3111 || tem
== const_true_rtx
)
3118 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3123 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3124 return const_true_rtx
;
3128 /* Optimize abs(x) < 0.0. */
3129 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
3131 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3133 if (GET_CODE (tem
) == ABS
)
3139 /* Optimize abs(x) >= 0.0. */
3140 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
3142 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3144 if (GET_CODE (tem
) == ABS
)
3145 return const_true_rtx
;
3150 /* Optimize ! (abs(x) < 0.0). */
3151 if (trueop1
== CONST0_RTX (mode
))
3153 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3155 if (GET_CODE (tem
) == ABS
)
3156 return const_true_rtx
;
3167 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3173 return equal
? const_true_rtx
: const0_rtx
;
3176 return ! equal
? const_true_rtx
: const0_rtx
;
3179 return op0lt
? const_true_rtx
: const0_rtx
;
3182 return op1lt
? const_true_rtx
: const0_rtx
;
3184 return op0ltu
? const_true_rtx
: const0_rtx
;
3186 return op1ltu
? const_true_rtx
: const0_rtx
;
3189 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
3192 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
3194 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
3196 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
3198 return const_true_rtx
;
3206 /* Simplify CODE, an operation with result mode MODE and three operands,
3207 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3208 a constant. Return 0 if no simplifications is possible. */
3211 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
3212 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
3215 unsigned int width
= GET_MODE_BITSIZE (mode
);
3217 /* VOIDmode means "infinite" precision. */
3219 width
= HOST_BITS_PER_WIDE_INT
;
3225 if (GET_CODE (op0
) == CONST_INT
3226 && GET_CODE (op1
) == CONST_INT
3227 && GET_CODE (op2
) == CONST_INT
3228 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
3229 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
3231 /* Extracting a bit-field from a constant */
3232 HOST_WIDE_INT val
= INTVAL (op0
);
3234 if (BITS_BIG_ENDIAN
)
3235 val
>>= (GET_MODE_BITSIZE (op0_mode
)
3236 - INTVAL (op2
) - INTVAL (op1
));
3238 val
>>= INTVAL (op2
);
3240 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
3242 /* First zero-extend. */
3243 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
3244 /* If desired, propagate sign bit. */
3245 if (code
== SIGN_EXTRACT
3246 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
3247 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
3250 /* Clear the bits that don't belong in our mode,
3251 unless they and our sign bit are all one.
3252 So we get either a reasonable negative value or a reasonable
3253 unsigned value for this mode. */
3254 if (width
< HOST_BITS_PER_WIDE_INT
3255 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
3256 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
3257 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3259 return gen_int_mode (val
, mode
);
3264 if (GET_CODE (op0
) == CONST_INT
)
3265 return op0
!= const0_rtx
? op1
: op2
;
3267 /* Convert c ? a : a into "a". */
3268 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
3271 /* Convert a != b ? a : b into "a". */
3272 if (GET_CODE (op0
) == NE
3273 && ! side_effects_p (op0
)
3274 && ! HONOR_NANS (mode
)
3275 && ! HONOR_SIGNED_ZEROS (mode
)
3276 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3277 && rtx_equal_p (XEXP (op0
, 1), op2
))
3278 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3279 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3282 /* Convert a == b ? a : b into "b". */
3283 if (GET_CODE (op0
) == EQ
3284 && ! side_effects_p (op0
)
3285 && ! HONOR_NANS (mode
)
3286 && ! HONOR_SIGNED_ZEROS (mode
)
3287 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3288 && rtx_equal_p (XEXP (op0
, 1), op2
))
3289 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3290 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3293 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
3295 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
3296 ? GET_MODE (XEXP (op0
, 1))
3297 : GET_MODE (XEXP (op0
, 0)));
3300 /* Look for happy constants in op1 and op2. */
3301 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
3303 HOST_WIDE_INT t
= INTVAL (op1
);
3304 HOST_WIDE_INT f
= INTVAL (op2
);
3306 if (t
== STORE_FLAG_VALUE
&& f
== 0)
3307 code
= GET_CODE (op0
);
3308 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
3311 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
3319 return simplify_gen_relational (code
, mode
, cmp_mode
,
3320 XEXP (op0
, 0), XEXP (op0
, 1));
3323 if (cmp_mode
== VOIDmode
)
3324 cmp_mode
= op0_mode
;
3325 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
3326 cmp_mode
, XEXP (op0
, 0),
3329 /* See if any simplifications were possible. */
3332 if (GET_CODE (temp
) == CONST_INT
)
3333 return temp
== const0_rtx
? op2
: op1
;
3335 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
3341 gcc_assert (GET_MODE (op0
) == mode
);
3342 gcc_assert (GET_MODE (op1
) == mode
);
3343 gcc_assert (VECTOR_MODE_P (mode
));
3344 op2
= avoid_constant_pool_reference (op2
);
3345 if (GET_CODE (op2
) == CONST_INT
)
3347 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3348 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3349 int mask
= (1 << n_elts
) - 1;
3351 if (!(INTVAL (op2
) & mask
))
3353 if ((INTVAL (op2
) & mask
) == mask
)
3356 op0
= avoid_constant_pool_reference (op0
);
3357 op1
= avoid_constant_pool_reference (op1
);
3358 if (GET_CODE (op0
) == CONST_VECTOR
3359 && GET_CODE (op1
) == CONST_VECTOR
)
3361 rtvec v
= rtvec_alloc (n_elts
);
3364 for (i
= 0; i
< n_elts
; i
++)
3365 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
3366 ? CONST_VECTOR_ELT (op0
, i
)
3367 : CONST_VECTOR_ELT (op1
, i
));
3368 return gen_rtx_CONST_VECTOR (mode
, v
);
3380 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3381 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3383 Works by unpacking OP into a collection of 8-bit values
3384 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3385 and then repacking them again for OUTERMODE. */
3388 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
3389 enum machine_mode innermode
, unsigned int byte
)
3391 /* We support up to 512-bit values (for V8DFmode). */
3395 value_mask
= (1 << value_bit
) - 1
3397 unsigned char value
[max_bitsize
/ value_bit
];
3406 rtvec result_v
= NULL
;
3407 enum mode_class outer_class
;
3408 enum machine_mode outer_submode
;
3410 /* Some ports misuse CCmode. */
3411 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
3414 /* We have no way to represent a complex constant at the rtl level. */
3415 if (COMPLEX_MODE_P (outermode
))
3418 /* Unpack the value. */
3420 if (GET_CODE (op
) == CONST_VECTOR
)
3422 num_elem
= CONST_VECTOR_NUNITS (op
);
3423 elems
= &CONST_VECTOR_ELT (op
, 0);
3424 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
3430 elem_bitsize
= max_bitsize
;
3432 /* If this asserts, it is too complicated; reducing value_bit may help. */
3433 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
3434 /* I don't know how to handle endianness of sub-units. */
3435 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
3437 for (elem
= 0; elem
< num_elem
; elem
++)
3440 rtx el
= elems
[elem
];
3442 /* Vectors are kept in target memory order. (This is probably
3445 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3446 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3448 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3449 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3450 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3451 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3452 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3455 switch (GET_CODE (el
))
3459 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3461 *vp
++ = INTVAL (el
) >> i
;
3462 /* CONST_INTs are always logically sign-extended. */
3463 for (; i
< elem_bitsize
; i
+= value_bit
)
3464 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
3468 if (GET_MODE (el
) == VOIDmode
)
3470 /* If this triggers, someone should have generated a
3471 CONST_INT instead. */
3472 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
3474 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
3475 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
3476 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
3479 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
3482 /* It shouldn't matter what's done here, so fill it with
3484 for (; i
< max_bitsize
; i
+= value_bit
)
3489 long tmp
[max_bitsize
/ 32];
3490 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
3492 gcc_assert (GET_MODE_CLASS (GET_MODE (el
)) == MODE_FLOAT
);
3493 gcc_assert (bitsize
<= elem_bitsize
);
3494 gcc_assert (bitsize
% value_bit
== 0);
3496 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
3499 /* real_to_target produces its result in words affected by
3500 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3501 and use WORDS_BIG_ENDIAN instead; see the documentation
3502 of SUBREG in rtl.texi. */
3503 for (i
= 0; i
< bitsize
; i
+= value_bit
)
3506 if (WORDS_BIG_ENDIAN
)
3507 ibase
= bitsize
- 1 - i
;
3510 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
3513 /* It shouldn't matter what's done here, so fill it with
3515 for (; i
< elem_bitsize
; i
+= value_bit
)
3525 /* Now, pick the right byte to start with. */
3526 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3527 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3528 will already have offset 0. */
3529 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
3531 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
3533 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3534 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3535 byte
= (subword_byte
% UNITS_PER_WORD
3536 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3539 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3540 so if it's become negative it will instead be very large.) */
3541 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
3543 /* Convert from bytes to chunks of size value_bit. */
3544 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
3546 /* Re-pack the value. */
3548 if (VECTOR_MODE_P (outermode
))
3550 num_elem
= GET_MODE_NUNITS (outermode
);
3551 result_v
= rtvec_alloc (num_elem
);
3552 elems
= &RTVEC_ELT (result_v
, 0);
3553 outer_submode
= GET_MODE_INNER (outermode
);
3559 outer_submode
= outermode
;
3562 outer_class
= GET_MODE_CLASS (outer_submode
);
3563 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
3565 gcc_assert (elem_bitsize
% value_bit
== 0);
3566 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
3568 for (elem
= 0; elem
< num_elem
; elem
++)
3572 /* Vectors are stored in target memory order. (This is probably
3575 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3576 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3578 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3579 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3580 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3581 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3582 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3585 switch (outer_class
)
3588 case MODE_PARTIAL_INT
:
3590 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
3593 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3595 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
3596 for (; i
< elem_bitsize
; i
+= value_bit
)
3597 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
3598 << (i
- HOST_BITS_PER_WIDE_INT
));
3600 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3602 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3603 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
3605 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
3612 long tmp
[max_bitsize
/ 32];
3614 /* real_from_target wants its input in words affected by
3615 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3616 and use WORDS_BIG_ENDIAN instead; see the documentation
3617 of SUBREG in rtl.texi. */
3618 for (i
= 0; i
< max_bitsize
/ 32; i
++)
3620 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
3623 if (WORDS_BIG_ENDIAN
)
3624 ibase
= elem_bitsize
- 1 - i
;
3627 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
3630 real_from_target (&r
, tmp
, outer_submode
);
3631 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
3639 if (VECTOR_MODE_P (outermode
))
3640 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
3645 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3646 Return 0 if no simplifications are possible. */
3648 simplify_subreg (enum machine_mode outermode
, rtx op
,
3649 enum machine_mode innermode
, unsigned int byte
)
3651 /* Little bit of sanity checking. */
3652 gcc_assert (innermode
!= VOIDmode
);
3653 gcc_assert (outermode
!= VOIDmode
);
3654 gcc_assert (innermode
!= BLKmode
);
3655 gcc_assert (outermode
!= BLKmode
);
3657 gcc_assert (GET_MODE (op
) == innermode
3658 || GET_MODE (op
) == VOIDmode
);
3660 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
3661 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
3663 if (outermode
== innermode
&& !byte
)
3666 if (GET_CODE (op
) == CONST_INT
3667 || GET_CODE (op
) == CONST_DOUBLE
3668 || GET_CODE (op
) == CONST_VECTOR
)
3669 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
3671 /* Changing mode twice with SUBREG => just change it once,
3672 or not at all if changing back op starting mode. */
3673 if (GET_CODE (op
) == SUBREG
)
3675 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3676 int final_offset
= byte
+ SUBREG_BYTE (op
);
3679 if (outermode
== innermostmode
3680 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3681 return SUBREG_REG (op
);
3683 /* The SUBREG_BYTE represents offset, as if the value were stored
3684 in memory. Irritating exception is paradoxical subreg, where
3685 we define SUBREG_BYTE to be 0. On big endian machines, this
3686 value should be negative. For a moment, undo this exception. */
3687 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3689 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3690 if (WORDS_BIG_ENDIAN
)
3691 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3692 if (BYTES_BIG_ENDIAN
)
3693 final_offset
+= difference
% UNITS_PER_WORD
;
3695 if (SUBREG_BYTE (op
) == 0
3696 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3698 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3699 if (WORDS_BIG_ENDIAN
)
3700 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3701 if (BYTES_BIG_ENDIAN
)
3702 final_offset
+= difference
% UNITS_PER_WORD
;
3705 /* See whether resulting subreg will be paradoxical. */
3706 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3708 /* In nonparadoxical subregs we can't handle negative offsets. */
3709 if (final_offset
< 0)
3711 /* Bail out in case resulting subreg would be incorrect. */
3712 if (final_offset
% GET_MODE_SIZE (outermode
)
3713 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3719 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3721 /* In paradoxical subreg, see if we are still looking on lower part.
3722 If so, our SUBREG_BYTE will be 0. */
3723 if (WORDS_BIG_ENDIAN
)
3724 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3725 if (BYTES_BIG_ENDIAN
)
3726 offset
+= difference
% UNITS_PER_WORD
;
3727 if (offset
== final_offset
)
3733 /* Recurse for further possible simplifications. */
3734 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
3738 if (validate_subreg (outermode
, innermostmode
,
3739 SUBREG_REG (op
), final_offset
))
3740 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3744 /* SUBREG of a hard register => just change the register number
3745 and/or mode. If the hard register is not valid in that mode,
3746 suppress this simplification. If the hard register is the stack,
3747 frame, or argument pointer, leave this as a SUBREG. */
3750 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3751 #ifdef CANNOT_CHANGE_MODE_CLASS
3752 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3753 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3754 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3756 && ((reload_completed
&& !frame_pointer_needed
)
3757 || (REGNO (op
) != FRAME_POINTER_REGNUM
3758 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3759 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3762 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3763 && REGNO (op
) != ARG_POINTER_REGNUM
3765 && REGNO (op
) != STACK_POINTER_REGNUM
3766 && subreg_offset_representable_p (REGNO (op
), innermode
,
3769 unsigned int regno
= REGNO (op
);
3770 unsigned int final_regno
3771 = regno
+ subreg_regno_offset (regno
, innermode
, byte
, outermode
);
3773 /* ??? We do allow it if the current REG is not valid for
3774 its mode. This is a kludge to work around how float/complex
3775 arguments are passed on 32-bit SPARC and should be fixed. */
3776 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3777 || ! HARD_REGNO_MODE_OK (regno
, innermode
))
3779 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3781 /* Propagate original regno. We don't have any way to specify
3782 the offset inside original regno, so do so only for lowpart.
3783 The information is used only by alias analysis that can not
3784 grog partial register anyway. */
3786 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3787 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3792 /* If we have a SUBREG of a register that we are replacing and we are
3793 replacing it with a MEM, make a new MEM and try replacing the
3794 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3795 or if we would be widening it. */
3798 && ! mode_dependent_address_p (XEXP (op
, 0))
3799 /* Allow splitting of volatile memory references in case we don't
3800 have instruction to move the whole thing. */
3801 && (! MEM_VOLATILE_P (op
)
3802 || ! have_insn_for (SET
, innermode
))
3803 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3804 return adjust_address_nv (op
, outermode
, byte
);
3806 /* Handle complex values represented as CONCAT
3807 of real and imaginary part. */
3808 if (GET_CODE (op
) == CONCAT
)
3810 unsigned int inner_size
, final_offset
;
3813 inner_size
= GET_MODE_UNIT_SIZE (innermode
);
3814 part
= byte
< inner_size
? XEXP (op
, 0) : XEXP (op
, 1);
3815 final_offset
= byte
% inner_size
;
3816 if (final_offset
+ GET_MODE_SIZE (outermode
) > inner_size
)
3819 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3822 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
3823 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3827 /* Optimize SUBREG truncations of zero and sign extended values. */
3828 if ((GET_CODE (op
) == ZERO_EXTEND
3829 || GET_CODE (op
) == SIGN_EXTEND
)
3830 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
3832 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
3834 /* If we're requesting the lowpart of a zero or sign extension,
3835 there are three possibilities. If the outermode is the same
3836 as the origmode, we can omit both the extension and the subreg.
3837 If the outermode is not larger than the origmode, we can apply
3838 the truncation without the extension. Finally, if the outermode
3839 is larger than the origmode, but both are integer modes, we
3840 can just extend to the appropriate mode. */
3843 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
3844 if (outermode
== origmode
)
3845 return XEXP (op
, 0);
3846 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
3847 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
3848 subreg_lowpart_offset (outermode
,
3850 if (SCALAR_INT_MODE_P (outermode
))
3851 return simplify_gen_unary (GET_CODE (op
), outermode
,
3852 XEXP (op
, 0), origmode
);
3855 /* A SUBREG resulting from a zero extension may fold to zero if
3856 it extracts higher bits that the ZERO_EXTEND's source bits. */
3857 if (GET_CODE (op
) == ZERO_EXTEND
3858 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
3859 return CONST0_RTX (outermode
);
3862 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
3863 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
3864 the outer subreg is effectively a truncation to the original mode. */
3865 if ((GET_CODE (op
) == LSHIFTRT
3866 || GET_CODE (op
) == ASHIFTRT
)
3867 && SCALAR_INT_MODE_P (outermode
)
3868 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
3869 to avoid the possibility that an outer LSHIFTRT shifts by more
3870 than the sign extension's sign_bit_copies and introduces zeros
3871 into the high bits of the result. */
3872 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
3873 && GET_CODE (XEXP (op
, 1)) == CONST_INT
3874 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
3875 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
3876 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
3877 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
3878 return simplify_gen_binary (ASHIFTRT
, outermode
,
3879 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
3881 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
3882 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
3883 the outer subreg is effectively a truncation to the original mode. */
3884 if ((GET_CODE (op
) == LSHIFTRT
3885 || GET_CODE (op
) == ASHIFTRT
)
3886 && SCALAR_INT_MODE_P (outermode
)
3887 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
3888 && GET_CODE (XEXP (op
, 1)) == CONST_INT
3889 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
3890 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
3891 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
3892 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
3893 return simplify_gen_binary (LSHIFTRT
, outermode
,
3894 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
3896 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
3897 to (ashift:QI (x:QI) C), where C is a suitable small constant and
3898 the outer subreg is effectively a truncation to the original mode. */
3899 if (GET_CODE (op
) == ASHIFT
3900 && SCALAR_INT_MODE_P (outermode
)
3901 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
3902 && GET_CODE (XEXP (op
, 1)) == CONST_INT
3903 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
3904 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
3905 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
3906 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
3907 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
3908 return simplify_gen_binary (ASHIFT
, outermode
,
3909 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
3914 /* Make a SUBREG operation or equivalent if it folds. */
3917 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
3918 enum machine_mode innermode
, unsigned int byte
)
3922 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
3926 if (GET_CODE (op
) == SUBREG
3927 || GET_CODE (op
) == CONCAT
3928 || GET_MODE (op
) == VOIDmode
)
3931 if (validate_subreg (outermode
, innermode
, op
, byte
))
3932 return gen_rtx_SUBREG (outermode
, op
, byte
);
3937 /* Simplify X, an rtx expression.
3939 Return the simplified expression or NULL if no simplifications
3942 This is the preferred entry point into the simplification routines;
3943 however, we still allow passes to call the more specific routines.
3945 Right now GCC has three (yes, three) major bodies of RTL simplification
3946 code that need to be unified.
3948 1. fold_rtx in cse.c. This code uses various CSE specific
3949 information to aid in RTL simplification.
3951 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3952 it uses combine specific information to aid in RTL
3955 3. The routines in this file.
3958 Long term we want to only have one body of simplification code; to
3959 get to that state I recommend the following steps:
3961 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3962 which are not pass dependent state into these routines.
3964 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3965 use this routine whenever possible.
3967 3. Allow for pass dependent state to be provided to these
3968 routines and add simplifications based on the pass dependent
3969 state. Remove code from cse.c & combine.c that becomes
3972 It will take time, but ultimately the compiler will be easier to
3973 maintain and improve. It's totally silly that when we add a
3974 simplification that it needs to be added to 4 places (3 for RTL
3975 simplification and 1 for tree simplification. */
3978 simplify_rtx (rtx x
)
3980 enum rtx_code code
= GET_CODE (x
);
3981 enum machine_mode mode
= GET_MODE (x
);
3983 switch (GET_RTX_CLASS (code
))
3986 return simplify_unary_operation (code
, mode
,
3987 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3988 case RTX_COMM_ARITH
:
3989 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3990 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
3992 /* Fall through.... */
3995 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3998 case RTX_BITFIELD_OPS
:
3999 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
4000 XEXP (x
, 0), XEXP (x
, 1),
4004 case RTX_COMM_COMPARE
:
4005 return simplify_relational_operation (code
, mode
,
4006 ((GET_MODE (XEXP (x
, 0))
4008 ? GET_MODE (XEXP (x
, 0))
4009 : GET_MODE (XEXP (x
, 1))),
4015 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
4016 GET_MODE (SUBREG_REG (x
)),
4023 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4024 if (GET_CODE (XEXP (x
, 0)) == HIGH
4025 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))