1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static bool plus_minus_operand_p (rtx
);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
57 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
59 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
61 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
62 enum machine_mode
, rtx
, rtx
);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
67 neg_const_int (enum machine_mode mode
, rtx i
)
69 return gen_int_mode (- INTVAL (i
), mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (enum machine_mode mode
, rtx x
)
78 unsigned HOST_WIDE_INT val
;
81 if (GET_MODE_CLASS (mode
) != MODE_INT
)
84 width
= GET_MODE_BITSIZE (mode
);
88 if (width
<= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x
) == CONST_INT
)
91 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x
) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x
) == 0)
95 val
= CONST_DOUBLE_HIGH (x
);
96 width
-= HOST_BITS_PER_WIDE_INT
;
101 if (width
< HOST_BITS_PER_WIDE_INT
)
102 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
103 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
110 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0
, op1
))
118 tem
= op0
, op0
= op1
, op1
= tem
;
120 /* If this simplifies, do it. */
121 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
125 /* Handle addition and subtraction specially. Otherwise, just form
128 if (code
== PLUS
|| code
== MINUS
)
130 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
135 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
141 avoid_constant_pool_reference (rtx x
)
144 enum machine_mode cmode
;
146 switch (GET_CODE (x
))
152 /* Handle float extensions of constant pool references. */
154 c
= avoid_constant_pool_reference (tmp
);
155 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
159 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr
= targetm
.delegitimize_address (addr
);
173 if (GET_CODE (addr
) == LO_SUM
)
174 addr
= XEXP (addr
, 1);
176 if (GET_CODE (addr
) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr
))
180 c
= get_pool_constant (addr
);
181 cmode
= get_pool_mode (addr
);
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode
!= GET_MODE (x
))
188 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
199 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
200 enum machine_mode op_mode
)
204 /* If this simplifies, use it. */
205 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
208 return gen_rtx_fmt_e (code
, mode
, op
);
211 /* Likewise for ternary operations. */
214 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
215 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
219 /* If this simplifies, use it. */
220 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
224 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
231 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
232 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
236 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
240 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
243 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
247 simplify_replace_rtx (rtx x
, rtx old_rtx
, rtx new_rtx
)
249 enum rtx_code code
= GET_CODE (x
);
250 enum machine_mode mode
= GET_MODE (x
);
251 enum machine_mode op_mode
;
254 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
261 switch (GET_RTX_CLASS (code
))
265 op_mode
= GET_MODE (op0
);
266 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
267 if (op0
== XEXP (x
, 0))
269 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
273 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
274 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
275 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
277 return simplify_gen_binary (code
, mode
, op0
, op1
);
280 case RTX_COMM_COMPARE
:
283 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
284 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
285 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
286 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
288 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
291 case RTX_BITFIELD_OPS
:
293 op_mode
= GET_MODE (op0
);
294 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
295 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
296 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
297 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
299 if (op_mode
== VOIDmode
)
300 op_mode
= GET_MODE (op0
);
301 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
304 /* The only case we try to handle is a SUBREG. */
307 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
308 if (op0
== SUBREG_REG (x
))
310 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
311 GET_MODE (SUBREG_REG (x
)),
313 return op0
? op0
: x
;
320 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
321 if (op0
== XEXP (x
, 0))
323 return replace_equiv_address_nv (x
, op0
);
325 else if (code
== LO_SUM
)
327 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
328 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
334 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
336 return gen_rtx_LO_SUM (mode
, op0
, op1
);
338 else if (code
== REG
)
340 if (rtx_equal_p (x
, old_rtx
))
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
355 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
356 rtx op
, enum machine_mode op_mode
)
358 unsigned int width
= GET_MODE_BITSIZE (mode
);
359 rtx trueop
= avoid_constant_pool_reference (op
);
361 if (code
== VEC_DUPLICATE
)
363 gcc_assert (VECTOR_MODE_P (mode
));
364 if (GET_MODE (trueop
) != VOIDmode
)
366 if (!VECTOR_MODE_P (GET_MODE (trueop
)))
367 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (trueop
));
369 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
370 (GET_MODE (trueop
)));
372 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
373 || GET_CODE (trueop
) == CONST_VECTOR
)
375 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
376 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
377 rtvec v
= rtvec_alloc (n_elts
);
380 if (GET_CODE (trueop
) != CONST_VECTOR
)
381 for (i
= 0; i
< n_elts
; i
++)
382 RTVEC_ELT (v
, i
) = trueop
;
385 enum machine_mode inmode
= GET_MODE (trueop
);
386 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
387 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
389 gcc_assert (in_n_elts
< n_elts
);
390 gcc_assert ((n_elts
% in_n_elts
) == 0);
391 for (i
= 0; i
< n_elts
; i
++)
392 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
394 return gen_rtx_CONST_VECTOR (mode
, v
);
397 else if (GET_CODE (op
) == CONST
)
398 return simplify_unary_operation (code
, mode
, XEXP (op
, 0), op_mode
);
400 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
402 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
403 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
404 enum machine_mode opmode
= GET_MODE (trueop
);
405 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
406 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
407 rtvec v
= rtvec_alloc (n_elts
);
410 gcc_assert (op_n_elts
== n_elts
);
411 for (i
= 0; i
< n_elts
; i
++)
413 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
414 CONST_VECTOR_ELT (trueop
, i
),
415 GET_MODE_INNER (opmode
));
418 RTVEC_ELT (v
, i
) = x
;
420 return gen_rtx_CONST_VECTOR (mode
, v
);
423 /* The order of these tests is critical so that, for example, we don't
424 check the wrong mode (input vs. output) for a conversion operation,
425 such as FIX. At some point, this should be simplified. */
427 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
428 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
430 HOST_WIDE_INT hv
, lv
;
433 if (GET_CODE (trueop
) == CONST_INT
)
434 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
436 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
438 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
439 d
= real_value_truncate (mode
, d
);
440 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
442 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
443 && (GET_CODE (trueop
) == CONST_DOUBLE
444 || GET_CODE (trueop
) == CONST_INT
))
446 HOST_WIDE_INT hv
, lv
;
449 if (GET_CODE (trueop
) == CONST_INT
)
450 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
452 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
454 if (op_mode
== VOIDmode
)
456 /* We don't know how to interpret negative-looking numbers in
457 this case, so don't try to fold those. */
461 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
464 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
466 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
467 d
= real_value_truncate (mode
, d
);
468 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
471 if (GET_CODE (trueop
) == CONST_INT
472 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
474 HOST_WIDE_INT arg0
= INTVAL (trueop
);
488 val
= (arg0
>= 0 ? arg0
: - arg0
);
492 /* Don't use ffs here. Instead, get low order bit and then its
493 number. If arg0 is zero, this will return 0, as desired. */
494 arg0
&= GET_MODE_MASK (mode
);
495 val
= exact_log2 (arg0
& (- arg0
)) + 1;
499 arg0
&= GET_MODE_MASK (mode
);
500 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
503 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
507 arg0
&= GET_MODE_MASK (mode
);
510 /* Even if the value at zero is undefined, we have to come
511 up with some replacement. Seems good enough. */
512 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
513 val
= GET_MODE_BITSIZE (mode
);
516 val
= exact_log2 (arg0
& -arg0
);
520 arg0
&= GET_MODE_MASK (mode
);
523 val
++, arg0
&= arg0
- 1;
527 arg0
&= GET_MODE_MASK (mode
);
530 val
++, arg0
&= arg0
- 1;
539 /* When zero-extending a CONST_INT, we need to know its
541 gcc_assert (op_mode
!= VOIDmode
);
542 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
544 /* If we were really extending the mode,
545 we would have to distinguish between zero-extension
546 and sign-extension. */
547 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
550 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
551 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
557 if (op_mode
== VOIDmode
)
559 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
561 /* If we were really extending the mode,
562 we would have to distinguish between zero-extension
563 and sign-extension. */
564 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
567 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
570 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
572 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
573 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
590 val
= trunc_int_for_mode (val
, mode
);
592 return GEN_INT (val
);
595 /* We can do some operations on integer CONST_DOUBLEs. Also allow
596 for a DImode operation on a CONST_INT. */
597 else if (GET_MODE (trueop
) == VOIDmode
598 && width
<= HOST_BITS_PER_WIDE_INT
* 2
599 && (GET_CODE (trueop
) == CONST_DOUBLE
600 || GET_CODE (trueop
) == CONST_INT
))
602 unsigned HOST_WIDE_INT l1
, lv
;
603 HOST_WIDE_INT h1
, hv
;
605 if (GET_CODE (trueop
) == CONST_DOUBLE
)
606 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
608 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
618 neg_double (l1
, h1
, &lv
, &hv
);
623 neg_double (l1
, h1
, &lv
, &hv
);
635 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
638 lv
= exact_log2 (l1
& -l1
) + 1;
644 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
645 - HOST_BITS_PER_WIDE_INT
;
647 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
648 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
649 lv
= GET_MODE_BITSIZE (mode
);
655 lv
= exact_log2 (l1
& -l1
);
657 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
658 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
659 lv
= GET_MODE_BITSIZE (mode
);
682 /* This is just a change-of-mode, so do nothing. */
687 gcc_assert (op_mode
!= VOIDmode
);
689 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
693 lv
= l1
& GET_MODE_MASK (op_mode
);
697 if (op_mode
== VOIDmode
698 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
702 lv
= l1
& GET_MODE_MASK (op_mode
);
703 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
704 && (lv
& ((HOST_WIDE_INT
) 1
705 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
706 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
708 hv
= HWI_SIGN_EXTEND (lv
);
719 return immed_double_const (lv
, hv
, mode
);
722 else if (GET_CODE (trueop
) == CONST_DOUBLE
723 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
725 REAL_VALUE_TYPE d
, t
;
726 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
731 if (HONOR_SNANS (mode
) && real_isnan (&d
))
733 real_sqrt (&t
, mode
, &d
);
737 d
= REAL_VALUE_ABS (d
);
740 d
= REAL_VALUE_NEGATE (d
);
743 d
= real_value_truncate (mode
, d
);
746 /* All this does is change the mode. */
749 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
756 real_to_target (tmp
, &d
, GET_MODE (trueop
));
757 for (i
= 0; i
< 4; i
++)
759 real_from_target (&d
, tmp
, mode
);
764 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
767 else if (GET_CODE (trueop
) == CONST_DOUBLE
768 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
769 && GET_MODE_CLASS (mode
) == MODE_INT
770 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
772 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
773 operators are intentionally left unspecified (to ease implementation
774 by target backends), for consistency, this routine implements the
775 same semantics for constant folding as used by the middle-end. */
777 HOST_WIDE_INT xh
, xl
, th
, tl
;
778 REAL_VALUE_TYPE x
, t
;
779 REAL_VALUE_FROM_CONST_DOUBLE (x
, trueop
);
783 if (REAL_VALUE_ISNAN (x
))
786 /* Test against the signed upper bound. */
787 if (width
> HOST_BITS_PER_WIDE_INT
)
789 th
= ((unsigned HOST_WIDE_INT
) 1
790 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
796 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
798 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
799 if (REAL_VALUES_LESS (t
, x
))
806 /* Test against the signed lower bound. */
807 if (width
> HOST_BITS_PER_WIDE_INT
)
809 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
815 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
817 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
818 if (REAL_VALUES_LESS (x
, t
))
824 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
828 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
831 /* Test against the unsigned upper bound. */
832 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
837 else if (width
>= HOST_BITS_PER_WIDE_INT
)
839 th
= ((unsigned HOST_WIDE_INT
) 1
840 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
846 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
848 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
849 if (REAL_VALUES_LESS (t
, x
))
856 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
862 return immed_double_const (xl
, xh
, mode
);
865 /* This was formerly used only for non-IEEE float.
866 eggert@twinsun.com says it is safe for IEEE also. */
869 enum rtx_code reversed
;
872 /* There are some simplifications we can do even if the operands
877 /* (not (not X)) == X. */
878 if (GET_CODE (op
) == NOT
)
881 /* (not (eq X Y)) == (ne X Y), etc. */
882 if (COMPARISON_P (op
)
883 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
884 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
886 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
887 XEXP (op
, 0), XEXP (op
, 1));
889 /* (not (plus X -1)) can become (neg X). */
890 if (GET_CODE (op
) == PLUS
891 && XEXP (op
, 1) == constm1_rtx
)
892 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
894 /* Similarly, (not (neg X)) is (plus X -1). */
895 if (GET_CODE (op
) == NEG
)
896 return plus_constant (XEXP (op
, 0), -1);
898 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
899 if (GET_CODE (op
) == XOR
900 && GET_CODE (XEXP (op
, 1)) == CONST_INT
901 && (temp
= simplify_unary_operation (NOT
, mode
,
904 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
906 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
907 if (GET_CODE (op
) == PLUS
908 && GET_CODE (XEXP (op
, 1)) == CONST_INT
909 && mode_signbit_p (mode
, XEXP (op
, 1))
910 && (temp
= simplify_unary_operation (NOT
, mode
,
913 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
917 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
918 operands other than 1, but that is not valid. We could do a
919 similar simplification for (not (lshiftrt C X)) where C is
920 just the sign bit, but this doesn't seem common enough to
922 if (GET_CODE (op
) == ASHIFT
923 && XEXP (op
, 0) == const1_rtx
)
925 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
926 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
929 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
930 by reversing the comparison code if valid. */
931 if (STORE_FLAG_VALUE
== -1
933 && (reversed
= reversed_comparison_code (op
, NULL_RTX
))
935 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
936 XEXP (op
, 0), XEXP (op
, 1));
938 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
939 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
940 so we can perform the above simplification. */
942 if (STORE_FLAG_VALUE
== -1
943 && GET_CODE (op
) == ASHIFTRT
944 && GET_CODE (XEXP (op
, 1)) == CONST_INT
945 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
946 return simplify_gen_relational (GE
, mode
, VOIDmode
,
947 XEXP (op
, 0), const0_rtx
);
952 /* (neg (neg X)) == X. */
953 if (GET_CODE (op
) == NEG
)
956 /* (neg (plus X 1)) can become (not X). */
957 if (GET_CODE (op
) == PLUS
958 && XEXP (op
, 1) == const1_rtx
)
959 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
961 /* Similarly, (neg (not X)) is (plus X 1). */
962 if (GET_CODE (op
) == NOT
)
963 return plus_constant (XEXP (op
, 0), 1);
965 /* (neg (minus X Y)) can become (minus Y X). This transformation
966 isn't safe for modes with signed zeros, since if X and Y are
967 both +0, (minus Y X) is the same as (minus X Y). If the
968 rounding mode is towards +infinity (or -infinity) then the two
969 expressions will be rounded differently. */
970 if (GET_CODE (op
) == MINUS
971 && !HONOR_SIGNED_ZEROS (mode
)
972 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
973 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1),
976 if (GET_CODE (op
) == PLUS
977 && !HONOR_SIGNED_ZEROS (mode
)
978 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
980 /* (neg (plus A C)) is simplified to (minus -C A). */
981 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
982 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
984 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1),
987 return simplify_gen_binary (MINUS
, mode
, temp
,
991 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
992 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
993 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
996 /* (neg (mult A B)) becomes (mult (neg A) B).
997 This works even for floating-point values. */
998 if (GET_CODE (op
) == MULT
999 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1001 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1002 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
1005 /* NEG commutes with ASHIFT since it is multiplication. Only do
1006 this if we can then eliminate the NEG (e.g., if the operand
1008 if (GET_CODE (op
) == ASHIFT
)
1010 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0),
1013 return simplify_gen_binary (ASHIFT
, mode
, temp
,
1017 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op
) == ASHIFTRT
1020 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1021 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
1022 return simplify_gen_binary (LSHIFTRT
, mode
,
1023 XEXP (op
, 0), XEXP (op
, 1));
1025 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1026 C is equal to the width of MODE minus 1. */
1027 if (GET_CODE (op
) == LSHIFTRT
1028 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1029 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
1030 return simplify_gen_binary (ASHIFTRT
, mode
,
1031 XEXP (op
, 0), XEXP (op
, 1));
1036 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1037 becomes just the MINUS if its mode is MODE. This allows
1038 folding switch statements on machines using casesi (such as
1040 if (GET_CODE (op
) == TRUNCATE
1041 && GET_MODE (XEXP (op
, 0)) == mode
1042 && GET_CODE (XEXP (op
, 0)) == MINUS
1043 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1044 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1045 return XEXP (op
, 0);
1047 /* Check for a sign extension of a subreg of a promoted
1048 variable, where the promotion is sign-extended, and the
1049 target mode is the same as the variable's promotion. */
1050 if (GET_CODE (op
) == SUBREG
1051 && SUBREG_PROMOTED_VAR_P (op
)
1052 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1053 && GET_MODE (XEXP (op
, 0)) == mode
)
1054 return XEXP (op
, 0);
1056 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1057 if (! POINTERS_EXTEND_UNSIGNED
1058 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1060 || (GET_CODE (op
) == SUBREG
1061 && REG_P (SUBREG_REG (op
))
1062 && REG_POINTER (SUBREG_REG (op
))
1063 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1064 return convert_memory_address (Pmode
, op
);
1069 /* Check for a zero extension of a subreg of a promoted
1070 variable, where the promotion is zero-extended, and the
1071 target mode is the same as the variable's promotion. */
1072 if (GET_CODE (op
) == SUBREG
1073 && SUBREG_PROMOTED_VAR_P (op
)
1074 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1075 && GET_MODE (XEXP (op
, 0)) == mode
)
1076 return XEXP (op
, 0);
1078 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1079 if (POINTERS_EXTEND_UNSIGNED
> 0
1080 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1082 || (GET_CODE (op
) == SUBREG
1083 && REG_P (SUBREG_REG (op
))
1084 && REG_POINTER (SUBREG_REG (op
))
1085 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1086 return convert_memory_address (Pmode
, op
);
1098 /* Subroutine of simplify_binary_operation to simplify a commutative,
1099 associative binary operation CODE with result mode MODE, operating
1100 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1101 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1102 canonicalization is possible. */
1105 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1110 /* Linearize the operator to the left. */
1111 if (GET_CODE (op1
) == code
)
1113 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1114 if (GET_CODE (op0
) == code
)
1116 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1117 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1120 /* "a op (b op c)" becomes "(b op c) op a". */
1121 if (! swap_commutative_operands_p (op1
, op0
))
1122 return simplify_gen_binary (code
, mode
, op1
, op0
);
1129 if (GET_CODE (op0
) == code
)
1131 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1132 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1134 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1135 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1138 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1139 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1140 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1141 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1143 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1145 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1146 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1147 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1148 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1150 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1156 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1157 and OP1. Return 0 if no simplification is possible.
1159 Don't use this for relational operations such as EQ or LT.
1160 Use simplify_relational_operation instead. */
1162 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1165 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
1167 unsigned int width
= GET_MODE_BITSIZE (mode
);
1168 rtx trueop0
, trueop1
;
1171 /* Relational operations don't work here. We must know the mode
1172 of the operands in order to do the comparison correctly.
1173 Assuming a full word can give incorrect results.
1174 Consider comparing 128 with -128 in QImode. */
1175 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1176 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1178 /* Make sure the constant is second. */
1179 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1180 && swap_commutative_operands_p (op0
, op1
))
1182 tem
= op0
, op0
= op1
, op1
= tem
;
1185 trueop0
= avoid_constant_pool_reference (op0
);
1186 trueop1
= avoid_constant_pool_reference (op1
);
1188 if (VECTOR_MODE_P (mode
)
1189 && code
!= VEC_CONCAT
1190 && GET_CODE (trueop0
) == CONST_VECTOR
1191 && GET_CODE (trueop1
) == CONST_VECTOR
)
1193 unsigned n_elts
= GET_MODE_NUNITS (mode
);
1194 enum machine_mode op0mode
= GET_MODE (trueop0
);
1195 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
1196 enum machine_mode op1mode
= GET_MODE (trueop1
);
1197 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
1198 rtvec v
= rtvec_alloc (n_elts
);
1201 gcc_assert (op0_n_elts
== n_elts
);
1202 gcc_assert (op1_n_elts
== n_elts
);
1203 for (i
= 0; i
< n_elts
; i
++)
1205 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
1206 CONST_VECTOR_ELT (trueop0
, i
),
1207 CONST_VECTOR_ELT (trueop1
, i
));
1210 RTVEC_ELT (v
, i
) = x
;
1213 return gen_rtx_CONST_VECTOR (mode
, v
);
1216 if (VECTOR_MODE_P (mode
)
1217 && code
== VEC_CONCAT
1218 && CONSTANT_P (trueop0
) && CONSTANT_P (trueop1
))
1220 unsigned n_elts
= GET_MODE_NUNITS (mode
);
1221 rtvec v
= rtvec_alloc (n_elts
);
1223 gcc_assert (n_elts
>= 2);
1226 gcc_assert (GET_CODE (trueop0
) != CONST_VECTOR
);
1227 gcc_assert (GET_CODE (trueop1
) != CONST_VECTOR
);
1229 RTVEC_ELT (v
, 0) = trueop0
;
1230 RTVEC_ELT (v
, 1) = trueop1
;
1234 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (trueop0
));
1235 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (trueop1
));
1238 gcc_assert (GET_CODE (trueop0
) == CONST_VECTOR
);
1239 gcc_assert (GET_CODE (trueop1
) == CONST_VECTOR
);
1240 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
1242 for (i
= 0; i
< op0_n_elts
; ++i
)
1243 RTVEC_ELT (v
, i
) = XVECEXP (trueop0
, 0, i
);
1244 for (i
= 0; i
< op1_n_elts
; ++i
)
1245 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (trueop1
, 0, i
);
1248 return gen_rtx_CONST_VECTOR (mode
, v
);
1251 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1252 && GET_CODE (trueop0
) == CONST_DOUBLE
1253 && GET_CODE (trueop1
) == CONST_DOUBLE
1254 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
1265 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
1267 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
1269 for (i
= 0; i
< 4; i
++)
1286 real_from_target (&r
, tmp0
, mode
);
1287 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
1291 REAL_VALUE_TYPE f0
, f1
, value
, result
;
1294 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
1295 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
1296 real_convert (&f0
, mode
, &f0
);
1297 real_convert (&f1
, mode
, &f1
);
1299 if (HONOR_SNANS (mode
)
1300 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
1304 && REAL_VALUES_EQUAL (f1
, dconst0
)
1305 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
1308 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
1309 && flag_trapping_math
1310 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
1312 int s0
= REAL_VALUE_NEGATIVE (f0
);
1313 int s1
= REAL_VALUE_NEGATIVE (f1
);
1318 /* Inf + -Inf = NaN plus exception. */
1323 /* Inf - Inf = NaN plus exception. */
1328 /* Inf / Inf = NaN plus exception. */
1335 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
1336 && flag_trapping_math
1337 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
1338 || (REAL_VALUE_ISINF (f1
)
1339 && REAL_VALUES_EQUAL (f0
, dconst0
))))
1340 /* Inf * 0 = NaN plus exception. */
1343 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
1345 real_convert (&result
, mode
, &value
);
1347 /* Don't constant fold this floating point operation if the
1348 result may dependent upon the run-time rounding mode and
1349 flag_rounding_math is set. */
1350 if (flag_rounding_math
1351 && (inexact
|| !real_identical (&result
, &value
)))
1354 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
1358 /* We can fold some multi-word operations. */
1359 if (GET_MODE_CLASS (mode
) == MODE_INT
1360 && width
== HOST_BITS_PER_WIDE_INT
* 2
1361 && (GET_CODE (trueop0
) == CONST_DOUBLE
1362 || GET_CODE (trueop0
) == CONST_INT
)
1363 && (GET_CODE (trueop1
) == CONST_DOUBLE
1364 || GET_CODE (trueop1
) == CONST_INT
))
1366 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
1367 HOST_WIDE_INT h1
, h2
, hv
, ht
;
1369 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1370 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
1372 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
1374 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1375 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
1377 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
1382 /* A - B == A + (-B). */
1383 neg_double (l2
, h2
, &lv
, &hv
);
1386 /* Fall through.... */
1389 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1393 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1397 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
1398 &lv
, &hv
, <
, &ht
))
1403 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
1404 <
, &ht
, &lv
, &hv
))
1409 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
1410 &lv
, &hv
, <
, &ht
))
1415 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
1416 <
, &ht
, &lv
, &hv
))
1421 lv
= l1
& l2
, hv
= h1
& h2
;
1425 lv
= l1
| l2
, hv
= h1
| h2
;
1429 lv
= l1
^ l2
, hv
= h1
^ h2
;
1435 && ((unsigned HOST_WIDE_INT
) l1
1436 < (unsigned HOST_WIDE_INT
) l2
)))
1445 && ((unsigned HOST_WIDE_INT
) l1
1446 > (unsigned HOST_WIDE_INT
) l2
)))
1453 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1455 && ((unsigned HOST_WIDE_INT
) l1
1456 < (unsigned HOST_WIDE_INT
) l2
)))
1463 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1465 && ((unsigned HOST_WIDE_INT
) l1
1466 > (unsigned HOST_WIDE_INT
) l2
)))
1472 case LSHIFTRT
: case ASHIFTRT
:
1474 case ROTATE
: case ROTATERT
:
1475 if (SHIFT_COUNT_TRUNCATED
)
1476 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1478 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1481 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1482 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1484 else if (code
== ASHIFT
)
1485 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1486 else if (code
== ROTATE
)
1487 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1488 else /* code == ROTATERT */
1489 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1496 return immed_double_const (lv
, hv
, mode
);
1499 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1500 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1502 /* Even if we can't compute a constant result,
1503 there are some cases worth simplifying. */
1508 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1509 when x is NaN, infinite, or finite and nonzero. They aren't
1510 when x is -0 and the rounding mode is not towards -infinity,
1511 since (-0) + 0 is then 0. */
1512 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1515 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1516 transformations are safe even for IEEE. */
1517 if (GET_CODE (op0
) == NEG
)
1518 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1519 else if (GET_CODE (op1
) == NEG
)
1520 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1522 /* (~a) + 1 -> -a */
1523 if (INTEGRAL_MODE_P (mode
)
1524 && GET_CODE (op0
) == NOT
1525 && trueop1
== const1_rtx
)
1526 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1528 /* Handle both-operands-constant cases. We can only add
1529 CONST_INTs to constants since the sum of relocatable symbols
1530 can't be handled by most assemblers. Don't add CONST_INT
1531 to CONST_INT since overflow won't be computed properly if wider
1532 than HOST_BITS_PER_WIDE_INT. */
1534 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1535 && GET_CODE (op1
) == CONST_INT
)
1536 return plus_constant (op0
, INTVAL (op1
));
1537 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1538 && GET_CODE (op0
) == CONST_INT
)
1539 return plus_constant (op1
, INTVAL (op0
));
1541 /* See if this is something like X * C - X or vice versa or
1542 if the multiplication is written as a shift. If so, we can
1543 distribute and make a new multiply, shift, or maybe just
1544 have X (if C is 2 in the example above). But don't make
1545 something more expensive than we had before. */
1547 if (! FLOAT_MODE_P (mode
))
1549 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1550 rtx lhs
= op0
, rhs
= op1
;
1552 if (GET_CODE (lhs
) == NEG
)
1553 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1554 else if (GET_CODE (lhs
) == MULT
1555 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1557 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1559 else if (GET_CODE (lhs
) == ASHIFT
1560 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1561 && INTVAL (XEXP (lhs
, 1)) >= 0
1562 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1564 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1565 lhs
= XEXP (lhs
, 0);
1568 if (GET_CODE (rhs
) == NEG
)
1569 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1570 else if (GET_CODE (rhs
) == MULT
1571 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1573 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1575 else if (GET_CODE (rhs
) == ASHIFT
1576 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1577 && INTVAL (XEXP (rhs
, 1)) >= 0
1578 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1580 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1581 rhs
= XEXP (rhs
, 0);
1584 if (rtx_equal_p (lhs
, rhs
))
1586 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1587 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1588 GEN_INT (coeff0
+ coeff1
));
1589 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1594 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1595 if ((GET_CODE (op1
) == CONST_INT
1596 || GET_CODE (op1
) == CONST_DOUBLE
)
1597 && GET_CODE (op0
) == XOR
1598 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1599 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1600 && mode_signbit_p (mode
, op1
))
1601 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1602 simplify_gen_binary (XOR
, mode
, op1
,
1605 /* If one of the operands is a PLUS or a MINUS, see if we can
1606 simplify this by the associative law.
1607 Don't use the associative law for floating point.
1608 The inaccuracy makes it nonassociative,
1609 and subtle programs can break if operations are associated. */
1611 if (INTEGRAL_MODE_P (mode
)
1612 && (plus_minus_operand_p (op0
)
1613 || plus_minus_operand_p (op1
))
1614 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1617 /* Reassociate floating point addition only when the user
1618 specifies unsafe math optimizations. */
1619 if (FLOAT_MODE_P (mode
)
1620 && flag_unsafe_math_optimizations
)
1622 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1630 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1631 using cc0, in which case we want to leave it as a COMPARE
1632 so we can distinguish it from a register-register-copy.
1634 In IEEE floating point, x-0 is not the same as x. */
1636 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1637 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1638 && trueop1
== CONST0_RTX (mode
))
1642 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1643 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1644 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1645 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1647 rtx xop00
= XEXP (op0
, 0);
1648 rtx xop10
= XEXP (op1
, 0);
1651 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1653 if (REG_P (xop00
) && REG_P (xop10
)
1654 && GET_MODE (xop00
) == GET_MODE (xop10
)
1655 && REGNO (xop00
) == REGNO (xop10
)
1656 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1657 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1664 /* We can't assume x-x is 0 even with non-IEEE floating point,
1665 but since it is zero except in very strange circumstances, we
1666 will treat it as zero with -funsafe-math-optimizations. */
1667 if (rtx_equal_p (trueop0
, trueop1
)
1668 && ! side_effects_p (op0
)
1669 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1670 return CONST0_RTX (mode
);
1672 /* Change subtraction from zero into negation. (0 - x) is the
1673 same as -x when x is NaN, infinite, or finite and nonzero.
1674 But if the mode has signed zeros, and does not round towards
1675 -infinity, then 0 - 0 is 0, not -0. */
1676 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1677 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1679 /* (-1 - a) is ~a. */
1680 if (trueop0
== constm1_rtx
)
1681 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1683 /* Subtracting 0 has no effect unless the mode has signed zeros
1684 and supports rounding towards -infinity. In such a case,
1686 if (!(HONOR_SIGNED_ZEROS (mode
)
1687 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1688 && trueop1
== CONST0_RTX (mode
))
1691 /* See if this is something like X * C - X or vice versa or
1692 if the multiplication is written as a shift. If so, we can
1693 distribute and make a new multiply, shift, or maybe just
1694 have X (if C is 2 in the example above). But don't make
1695 something more expensive than we had before. */
1697 if (! FLOAT_MODE_P (mode
))
1699 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1700 rtx lhs
= op0
, rhs
= op1
;
1702 if (GET_CODE (lhs
) == NEG
)
1703 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1704 else if (GET_CODE (lhs
) == MULT
1705 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1707 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1709 else if (GET_CODE (lhs
) == ASHIFT
1710 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1711 && INTVAL (XEXP (lhs
, 1)) >= 0
1712 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1714 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1715 lhs
= XEXP (lhs
, 0);
1718 if (GET_CODE (rhs
) == NEG
)
1719 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1720 else if (GET_CODE (rhs
) == MULT
1721 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1723 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1725 else if (GET_CODE (rhs
) == ASHIFT
1726 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1727 && INTVAL (XEXP (rhs
, 1)) >= 0
1728 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1730 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1731 rhs
= XEXP (rhs
, 0);
1734 if (rtx_equal_p (lhs
, rhs
))
1736 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1737 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1738 GEN_INT (coeff0
- coeff1
));
1739 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1744 /* (a - (-b)) -> (a + b). True even for IEEE. */
1745 if (GET_CODE (op1
) == NEG
)
1746 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1748 /* (-x - c) may be simplified as (-c - x). */
1749 if (GET_CODE (op0
) == NEG
1750 && (GET_CODE (op1
) == CONST_INT
1751 || GET_CODE (op1
) == CONST_DOUBLE
))
1753 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1755 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1758 /* If one of the operands is a PLUS or a MINUS, see if we can
1759 simplify this by the associative law.
1760 Don't use the associative law for floating point.
1761 The inaccuracy makes it nonassociative,
1762 and subtle programs can break if operations are associated. */
1764 if (INTEGRAL_MODE_P (mode
)
1765 && (plus_minus_operand_p (op0
)
1766 || plus_minus_operand_p (op1
))
1767 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1770 /* Don't let a relocatable value get a negative coeff. */
1771 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1772 return simplify_gen_binary (PLUS
, mode
,
1774 neg_const_int (mode
, op1
));
1776 /* (x - (x & y)) -> (x & ~y) */
1777 if (GET_CODE (op1
) == AND
)
1779 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1781 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1782 GET_MODE (XEXP (op1
, 1)));
1783 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1785 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1787 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1788 GET_MODE (XEXP (op1
, 0)));
1789 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1795 if (trueop1
== constm1_rtx
)
1796 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1798 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1799 x is NaN, since x * 0 is then also NaN. Nor is it valid
1800 when the mode has signed zeros, since multiplying a negative
1801 number by 0 will give -0, not 0. */
1802 if (!HONOR_NANS (mode
)
1803 && !HONOR_SIGNED_ZEROS (mode
)
1804 && trueop1
== CONST0_RTX (mode
)
1805 && ! side_effects_p (op0
))
1808 /* In IEEE floating point, x*1 is not equivalent to x for
1810 if (!HONOR_SNANS (mode
)
1811 && trueop1
== CONST1_RTX (mode
))
1814 /* Convert multiply by constant power of two into shift unless
1815 we are still generating RTL. This test is a kludge. */
1816 if (GET_CODE (trueop1
) == CONST_INT
1817 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1818 /* If the mode is larger than the host word size, and the
1819 uppermost bit is set, then this isn't a power of two due
1820 to implicit sign extension. */
1821 && (width
<= HOST_BITS_PER_WIDE_INT
1822 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1823 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1825 /* x*2 is x+x and x*(-1) is -x */
1826 if (GET_CODE (trueop1
) == CONST_DOUBLE
1827 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1828 && GET_MODE (op0
) == mode
)
1831 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1833 if (REAL_VALUES_EQUAL (d
, dconst2
))
1834 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1836 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1837 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1840 /* Reassociate multiplication, but for floating point MULTs
1841 only when the user specifies unsafe math optimizations. */
1842 if (! FLOAT_MODE_P (mode
)
1843 || flag_unsafe_math_optimizations
)
1845 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1852 if (trueop1
== const0_rtx
)
1854 if (GET_CODE (trueop1
) == CONST_INT
1855 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1856 == GET_MODE_MASK (mode
)))
1858 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1860 /* A | (~A) -> -1 */
1861 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1862 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1863 && ! side_effects_p (op0
)
1864 && GET_MODE_CLASS (mode
) != MODE_CC
)
1866 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1872 if (trueop1
== const0_rtx
)
1874 if (GET_CODE (trueop1
) == CONST_INT
1875 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1876 == GET_MODE_MASK (mode
)))
1877 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1878 if (trueop0
== trueop1
1879 && ! side_effects_p (op0
)
1880 && GET_MODE_CLASS (mode
) != MODE_CC
)
1883 /* Canonicalize XOR of the most significant bit to PLUS. */
1884 if ((GET_CODE (op1
) == CONST_INT
1885 || GET_CODE (op1
) == CONST_DOUBLE
)
1886 && mode_signbit_p (mode
, op1
))
1887 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
1888 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1889 if ((GET_CODE (op1
) == CONST_INT
1890 || GET_CODE (op1
) == CONST_DOUBLE
)
1891 && GET_CODE (op0
) == PLUS
1892 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1893 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1894 && mode_signbit_p (mode
, XEXP (op0
, 1)))
1895 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1896 simplify_gen_binary (XOR
, mode
, op1
,
1899 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1905 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1907 /* If we are turning off bits already known off in OP0, we need
1909 if (GET_CODE (trueop1
) == CONST_INT
1910 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1911 && (nonzero_bits (trueop0
, mode
) & ~INTVAL (trueop1
)) == 0)
1913 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1914 && GET_MODE_CLASS (mode
) != MODE_CC
)
1917 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1918 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1919 && ! side_effects_p (op0
)
1920 && GET_MODE_CLASS (mode
) != MODE_CC
)
1923 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1924 there are no non-zero bits of C outside of X's mode. */
1925 if ((GET_CODE (op0
) == SIGN_EXTEND
1926 || GET_CODE (op0
) == ZERO_EXTEND
)
1927 && GET_CODE (trueop1
) == CONST_INT
1928 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1929 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
1930 & INTVAL (trueop1
)) == 0)
1932 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
1933 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
1934 gen_int_mode (INTVAL (trueop1
),
1936 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
1939 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1940 ((A & N) + B) & M -> (A + B) & M
1941 Similarly if (N & M) == 0,
1942 ((A | N) + B) & M -> (A + B) & M
1943 and for - instead of + and/or ^ instead of |. */
1944 if (GET_CODE (trueop1
) == CONST_INT
1945 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1946 && ~INTVAL (trueop1
)
1947 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
1948 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
1953 pmop
[0] = XEXP (op0
, 0);
1954 pmop
[1] = XEXP (op0
, 1);
1956 for (which
= 0; which
< 2; which
++)
1959 switch (GET_CODE (tem
))
1962 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
1963 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
1964 == INTVAL (trueop1
))
1965 pmop
[which
] = XEXP (tem
, 0);
1969 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
1970 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
1971 pmop
[which
] = XEXP (tem
, 0);
1978 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
1980 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
1982 return simplify_gen_binary (code
, mode
, tem
, op1
);
1985 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1991 /* 0/x is 0 (or x&0 if x has side-effects). */
1992 if (trueop0
== const0_rtx
)
1993 return side_effects_p (op1
)
1994 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
1997 if (trueop1
== const1_rtx
)
1999 /* Handle narrowing UDIV. */
2000 rtx x
= gen_lowpart_common (mode
, op0
);
2003 if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
2004 return gen_lowpart_SUBREG (mode
, op0
);
2007 /* Convert divide by power of two into shift. */
2008 if (GET_CODE (trueop1
) == CONST_INT
2009 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
2010 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (arg1
));
2014 /* Handle floating point and integers separately. */
2015 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2017 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2018 safe for modes with NaNs, since 0.0 / 0.0 will then be
2019 NaN rather than 0.0. Nor is it safe for modes with signed
2020 zeros, since dividing 0 by a negative number gives -0.0 */
2021 if (trueop0
== CONST0_RTX (mode
)
2022 && !HONOR_NANS (mode
)
2023 && !HONOR_SIGNED_ZEROS (mode
)
2024 && ! side_effects_p (op1
))
2027 if (trueop1
== CONST1_RTX (mode
)
2028 && !HONOR_SNANS (mode
))
2031 if (GET_CODE (trueop1
) == CONST_DOUBLE
2032 && trueop1
!= CONST0_RTX (mode
))
2035 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2038 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2039 && !HONOR_SNANS (mode
))
2040 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2042 /* Change FP division by a constant into multiplication.
2043 Only do this with -funsafe-math-optimizations. */
2044 if (flag_unsafe_math_optimizations
2045 && !REAL_VALUES_EQUAL (d
, dconst0
))
2047 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2048 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2049 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2055 /* 0/x is 0 (or x&0 if x has side-effects). */
2056 if (trueop0
== const0_rtx
)
2057 return side_effects_p (op1
)
2058 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
2061 if (trueop1
== const1_rtx
)
2063 /* Handle narrowing DIV. */
2064 rtx x
= gen_lowpart_common (mode
, op0
);
2067 if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
2068 return gen_lowpart_SUBREG (mode
, op0
);
2072 if (trueop1
== constm1_rtx
)
2074 rtx x
= gen_lowpart_common (mode
, op0
);
2076 x
= (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
2077 ? gen_lowpart_SUBREG (mode
, op0
) : op0
;
2078 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2084 /* 0%x is 0 (or x&0 if x has side-effects). */
2085 if (trueop0
== const0_rtx
)
2086 return side_effects_p (op1
)
2087 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
2089 /* x%1 is 0 (of x&0 if x has side-effects). */
2090 if (trueop1
== const1_rtx
)
2091 return side_effects_p (op0
)
2092 ? simplify_gen_binary (AND
, mode
, op0
, const0_rtx
)
2094 /* Implement modulus by power of two as AND. */
2095 if (GET_CODE (trueop1
) == CONST_INT
2096 && exact_log2 (INTVAL (trueop1
)) > 0)
2097 return simplify_gen_binary (AND
, mode
, op0
,
2098 GEN_INT (INTVAL (op1
) - 1));
2102 /* 0%x is 0 (or x&0 if x has side-effects). */
2103 if (trueop0
== const0_rtx
)
2104 return side_effects_p (op1
)
2105 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
2107 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2108 if (trueop1
== const1_rtx
|| trueop1
== constm1_rtx
)
2109 return side_effects_p (op0
)
2110 ? simplify_gen_binary (AND
, mode
, op0
, const0_rtx
)
2117 /* Rotating ~0 always results in ~0. */
2118 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
2119 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2120 && ! side_effects_p (op1
))
2123 /* Fall through.... */
2127 if (trueop1
== const0_rtx
)
2129 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
2134 if (width
<= HOST_BITS_PER_WIDE_INT
2135 && GET_CODE (trueop1
) == CONST_INT
2136 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2137 && ! side_effects_p (op0
))
2139 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2141 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2147 if (width
<= HOST_BITS_PER_WIDE_INT
2148 && GET_CODE (trueop1
) == CONST_INT
2149 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2150 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2151 && ! side_effects_p (op0
))
2153 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2155 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2161 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
2163 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2165 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2171 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2173 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2175 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2184 /* ??? There are simplifications that can be done. */
2188 if (!VECTOR_MODE_P (mode
))
2190 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2191 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2192 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2193 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2194 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
2196 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2197 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2202 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2203 gcc_assert (GET_MODE_INNER (mode
)
2204 == GET_MODE_INNER (GET_MODE (trueop0
)));
2205 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2207 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2209 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2210 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2211 rtvec v
= rtvec_alloc (n_elts
);
2214 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2215 for (i
= 0; i
< n_elts
; i
++)
2217 rtx x
= XVECEXP (trueop1
, 0, i
);
2219 gcc_assert (GET_CODE (x
) == CONST_INT
);
2220 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2224 return gen_rtx_CONST_VECTOR (mode
, v
);
2230 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2231 ? GET_MODE (trueop0
)
2232 : GET_MODE_INNER (mode
));
2233 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2234 ? GET_MODE (trueop1
)
2235 : GET_MODE_INNER (mode
));
2237 gcc_assert (VECTOR_MODE_P (mode
));
2238 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2239 == GET_MODE_SIZE (mode
));
2241 if (VECTOR_MODE_P (op0_mode
))
2242 gcc_assert (GET_MODE_INNER (mode
)
2243 == GET_MODE_INNER (op0_mode
));
2245 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2247 if (VECTOR_MODE_P (op1_mode
))
2248 gcc_assert (GET_MODE_INNER (mode
)
2249 == GET_MODE_INNER (op1_mode
));
2251 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2253 if ((GET_CODE (trueop0
) == CONST_VECTOR
2254 || GET_CODE (trueop0
) == CONST_INT
2255 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2256 && (GET_CODE (trueop1
) == CONST_VECTOR
2257 || GET_CODE (trueop1
) == CONST_INT
2258 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2260 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2261 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2262 rtvec v
= rtvec_alloc (n_elts
);
2264 unsigned in_n_elts
= 1;
2266 if (VECTOR_MODE_P (op0_mode
))
2267 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2268 for (i
= 0; i
< n_elts
; i
++)
2272 if (!VECTOR_MODE_P (op0_mode
))
2273 RTVEC_ELT (v
, i
) = trueop0
;
2275 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2279 if (!VECTOR_MODE_P (op1_mode
))
2280 RTVEC_ELT (v
, i
) = trueop1
;
2282 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2287 return gen_rtx_CONST_VECTOR (mode
, v
);
2299 /* Get the integer argument values in two forms:
2300 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2302 arg0
= INTVAL (trueop0
);
2303 arg1
= INTVAL (trueop1
);
2305 if (width
< HOST_BITS_PER_WIDE_INT
)
2307 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2308 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2311 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2312 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2315 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2316 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2324 /* Compute the value of the arithmetic. */
2329 val
= arg0s
+ arg1s
;
2333 val
= arg0s
- arg1s
;
2337 val
= arg0s
* arg1s
;
2342 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2345 val
= arg0s
/ arg1s
;
2350 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2353 val
= arg0s
% arg1s
;
2358 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2361 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2366 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2369 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2387 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
2388 value is in range. We can't return any old value for out-of-range
2389 arguments because either the middle-end (via shift_truncation_mask)
2390 or the back-end might be relying on target-specific knowledge.
2391 Nor can we rely on shift_truncation_mask, since the shift might
2392 not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
2393 if (SHIFT_COUNT_TRUNCATED
)
2394 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
2395 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
2398 val
= (code
== ASHIFT
2399 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
2400 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
2402 /* Sign-extend the result for arithmetic right shifts. */
2403 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
2404 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
2412 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2413 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2421 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2422 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2426 /* Do nothing here. */
2430 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2434 val
= ((unsigned HOST_WIDE_INT
) arg0
2435 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2439 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2443 val
= ((unsigned HOST_WIDE_INT
) arg0
2444 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2451 /* ??? There are simplifications that can be done. */
2458 val
= trunc_int_for_mode (val
, mode
);
2460 return GEN_INT (val
);
2463 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2466 Rather than test for specific case, we do this by a brute-force method
2467 and do all possible simplifications until no more changes occur. Then
2468 we rebuild the operation.
2470 If FORCE is true, then always generate the rtx. This is used to
2471 canonicalize stuff emitted from simplify_gen_binary. Note that this
2472 can still fail if the rtx is too complex. It won't fail just because
2473 the result is not 'simpler' than the input, however. */
2475 struct simplify_plus_minus_op_data
2482 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2484 const struct simplify_plus_minus_op_data
*d1
= p1
;
2485 const struct simplify_plus_minus_op_data
*d2
= p2
;
2487 return (commutative_operand_precedence (d2
->op
)
2488 - commutative_operand_precedence (d1
->op
));
2492 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2495 struct simplify_plus_minus_op_data ops
[8];
2497 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2501 memset (ops
, 0, sizeof ops
);
2503 /* Set up the two operands and then expand them until nothing has been
2504 changed. If we run out of room in our array, give up; this should
2505 almost never happen. */
2510 ops
[1].neg
= (code
== MINUS
);
2516 for (i
= 0; i
< n_ops
; i
++)
2518 rtx this_op
= ops
[i
].op
;
2519 int this_neg
= ops
[i
].neg
;
2520 enum rtx_code this_code
= GET_CODE (this_op
);
2529 ops
[n_ops
].op
= XEXP (this_op
, 1);
2530 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2533 ops
[i
].op
= XEXP (this_op
, 0);
2539 ops
[i
].op
= XEXP (this_op
, 0);
2540 ops
[i
].neg
= ! this_neg
;
2546 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2547 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2548 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2550 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2551 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2552 ops
[n_ops
].neg
= this_neg
;
2560 /* ~a -> (-a - 1) */
2563 ops
[n_ops
].op
= constm1_rtx
;
2564 ops
[n_ops
++].neg
= this_neg
;
2565 ops
[i
].op
= XEXP (this_op
, 0);
2566 ops
[i
].neg
= !this_neg
;
2574 ops
[i
].op
= neg_const_int (mode
, this_op
);
2587 /* If we only have two operands, we can't do anything. */
2588 if (n_ops
<= 2 && !force
)
2591 /* Count the number of CONSTs we didn't split above. */
2592 for (i
= 0; i
< n_ops
; i
++)
2593 if (GET_CODE (ops
[i
].op
) == CONST
)
2596 /* Now simplify each pair of operands until nothing changes. The first
2597 time through just simplify constants against each other. */
2604 for (i
= 0; i
< n_ops
- 1; i
++)
2605 for (j
= i
+ 1; j
< n_ops
; j
++)
2607 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2608 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2610 if (lhs
!= 0 && rhs
!= 0
2611 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2613 enum rtx_code ncode
= PLUS
;
2619 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2621 else if (swap_commutative_operands_p (lhs
, rhs
))
2622 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2624 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2626 /* Reject "simplifications" that just wrap the two
2627 arguments in a CONST. Failure to do so can result
2628 in infinite recursion with simplify_binary_operation
2629 when it calls us to simplify CONST operations. */
2631 && ! (GET_CODE (tem
) == CONST
2632 && GET_CODE (XEXP (tem
, 0)) == ncode
2633 && XEXP (XEXP (tem
, 0), 0) == lhs
2634 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2635 /* Don't allow -x + -1 -> ~x simplifications in the
2636 first pass. This allows us the chance to combine
2637 the -1 with other constants. */
2639 && GET_CODE (tem
) == NOT
2640 && XEXP (tem
, 0) == rhs
))
2643 if (GET_CODE (tem
) == NEG
)
2644 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2645 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2646 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2650 ops
[j
].op
= NULL_RTX
;
2660 /* Pack all the operands to the lower-numbered entries. */
2661 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2666 /* Sort the operations based on swap_commutative_operands_p. */
2667 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2669 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2671 && GET_CODE (ops
[1].op
) == CONST_INT
2672 && CONSTANT_P (ops
[0].op
)
2674 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
2676 /* We suppressed creation of trivial CONST expressions in the
2677 combination loop to avoid recursion. Create one manually now.
2678 The combination loop should have ensured that there is exactly
2679 one CONST_INT, and the sort will have ensured that it is last
2680 in the array and that any other constant will be next-to-last. */
2683 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2684 && CONSTANT_P (ops
[n_ops
- 2].op
))
2686 rtx value
= ops
[n_ops
- 1].op
;
2687 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2688 value
= neg_const_int (mode
, value
);
2689 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2693 /* Count the number of CONSTs that we generated. */
2695 for (i
= 0; i
< n_ops
; i
++)
2696 if (GET_CODE (ops
[i
].op
) == CONST
)
2699 /* Give up if we didn't reduce the number of operands we had. Make
2700 sure we count a CONST as two operands. If we have the same
2701 number of operands, but have made more CONSTs than before, this
2702 is also an improvement, so accept it. */
2704 && (n_ops
+ n_consts
> input_ops
2705 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2708 /* Put a non-negated operand first, if possible. */
2710 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2713 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
2722 /* Now make the result by performing the requested operations. */
2724 for (i
= 1; i
< n_ops
; i
++)
2725 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2726 mode
, result
, ops
[i
].op
);
2731 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2733 plus_minus_operand_p (rtx x
)
2735 return GET_CODE (x
) == PLUS
2736 || GET_CODE (x
) == MINUS
2737 || (GET_CODE (x
) == CONST
2738 && GET_CODE (XEXP (x
, 0)) == PLUS
2739 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
2740 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
2743 /* Like simplify_binary_operation except used for relational operators.
2744 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2745 not also be VOIDmode.
2747 CMP_MODE specifies in which mode the comparison is done in, so it is
2748 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2749 the operands or, if both are VOIDmode, the operands are compared in
2750 "infinite precision". */
2752 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2753 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2755 rtx tem
, trueop0
, trueop1
;
2757 if (cmp_mode
== VOIDmode
)
2758 cmp_mode
= GET_MODE (op0
);
2759 if (cmp_mode
== VOIDmode
)
2760 cmp_mode
= GET_MODE (op1
);
2762 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
2765 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2767 if (tem
== const0_rtx
)
2768 return CONST0_RTX (mode
);
2769 #ifdef FLOAT_STORE_FLAG_VALUE
2771 REAL_VALUE_TYPE val
;
2772 val
= FLOAT_STORE_FLAG_VALUE (mode
);
2773 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
2779 if (VECTOR_MODE_P (mode
))
2781 if (tem
== const0_rtx
)
2782 return CONST0_RTX (mode
);
2783 #ifdef VECTOR_STORE_FLAG_VALUE
2788 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
2789 if (val
== NULL_RTX
)
2791 if (val
== const1_rtx
)
2792 return CONST1_RTX (mode
);
2794 units
= GET_MODE_NUNITS (mode
);
2795 v
= rtvec_alloc (units
);
2796 for (i
= 0; i
< units
; i
++)
2797 RTVEC_ELT (v
, i
) = val
;
2798 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
2808 /* For the following tests, ensure const0_rtx is op1. */
2809 if (swap_commutative_operands_p (op0
, op1
)
2810 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
2811 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
2813 /* If op0 is a compare, extract the comparison arguments from it. */
2814 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2815 return simplify_relational_operation (code
, mode
, VOIDmode
,
2816 XEXP (op0
, 0), XEXP (op0
, 1));
2818 if (mode
== VOIDmode
2819 || GET_MODE_CLASS (cmp_mode
) == MODE_CC
2823 trueop0
= avoid_constant_pool_reference (op0
);
2824 trueop1
= avoid_constant_pool_reference (op1
);
2825 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
2829 /* This part of simplify_relational_operation is only used when CMP_MODE
2830 is not in class MODE_CC (i.e. it is a real comparison).
2832 MODE is the mode of the result, while CMP_MODE specifies in which
2833 mode the comparison is done in, so it is the mode of the operands. */
2836 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
2837 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2839 enum rtx_code op0code
= GET_CODE (op0
);
2841 if (GET_CODE (op1
) == CONST_INT
)
2843 if (INTVAL (op1
) == 0 && COMPARISON_P (op0
))
2845 /* If op0 is a comparison, extract the comparison arguments form it. */
2848 if (GET_MODE (op0
) == cmp_mode
)
2849 return simplify_rtx (op0
);
2851 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
2852 XEXP (op0
, 0), XEXP (op0
, 1));
2854 else if (code
== EQ
)
2856 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
2857 if (new_code
!= UNKNOWN
)
2858 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
2859 XEXP (op0
, 0), XEXP (op0
, 1));
2864 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2865 if ((code
== EQ
|| code
== NE
)
2866 && (op0code
== PLUS
|| op0code
== MINUS
)
2868 && CONSTANT_P (XEXP (op0
, 1))
2869 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
2871 rtx x
= XEXP (op0
, 0);
2872 rtx c
= XEXP (op0
, 1);
2874 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
2876 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
2882 /* Check if the given comparison (done in the given MODE) is actually a
2883 tautology or a contradiction.
2884 If no simplification is possible, this function returns zero.
2885 Otherwise, it returns either const_true_rtx or const0_rtx. */
2888 simplify_const_relational_operation (enum rtx_code code
,
2889 enum machine_mode mode
,
2892 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2897 gcc_assert (mode
!= VOIDmode
2898 || (GET_MODE (op0
) == VOIDmode
2899 && GET_MODE (op1
) == VOIDmode
));
2901 /* If op0 is a compare, extract the comparison arguments from it. */
2902 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2903 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2905 /* We can't simplify MODE_CC values since we don't know what the
2906 actual comparison is. */
2907 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2910 /* Make sure the constant is second. */
2911 if (swap_commutative_operands_p (op0
, op1
))
2913 tem
= op0
, op0
= op1
, op1
= tem
;
2914 code
= swap_condition (code
);
2917 trueop0
= avoid_constant_pool_reference (op0
);
2918 trueop1
= avoid_constant_pool_reference (op1
);
2920 /* For integer comparisons of A and B maybe we can simplify A - B and can
2921 then simplify a comparison of that with zero. If A and B are both either
2922 a register or a CONST_INT, this can't help; testing for these cases will
2923 prevent infinite recursion here and speed things up.
2925 If CODE is an unsigned comparison, then we can never do this optimization,
2926 because it gives an incorrect result if the subtraction wraps around zero.
2927 ANSI C defines unsigned operations such that they never overflow, and
2928 thus such cases can not be ignored; but we cannot do it even for
2929 signed comparisons for languages such as Java, so test flag_wrapv. */
2931 if (!flag_wrapv
&& INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2932 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
2933 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
2934 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2935 /* We cannot do this for == or != if tem is a nonzero address. */
2936 && ((code
!= EQ
&& code
!= NE
) || ! nonzero_address_p (tem
))
2937 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2938 return simplify_const_relational_operation (signed_condition (code
),
2939 mode
, tem
, const0_rtx
);
2941 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2942 return const_true_rtx
;
2944 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2947 /* For modes without NaNs, if the two operands are equal, we know the
2948 result except if they have side-effects. */
2949 if (! HONOR_NANS (GET_MODE (trueop0
))
2950 && rtx_equal_p (trueop0
, trueop1
)
2951 && ! side_effects_p (trueop0
))
2952 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2954 /* If the operands are floating-point constants, see if we can fold
2956 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2957 && GET_CODE (trueop1
) == CONST_DOUBLE
2958 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2960 REAL_VALUE_TYPE d0
, d1
;
2962 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2963 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2965 /* Comparisons are unordered iff at least one of the values is NaN. */
2966 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2976 return const_true_rtx
;
2989 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2990 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2991 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2994 /* Otherwise, see if the operands are both integers. */
2995 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2996 && (GET_CODE (trueop0
) == CONST_DOUBLE
2997 || GET_CODE (trueop0
) == CONST_INT
)
2998 && (GET_CODE (trueop1
) == CONST_DOUBLE
2999 || GET_CODE (trueop1
) == CONST_INT
))
3001 int width
= GET_MODE_BITSIZE (mode
);
3002 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
3003 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
3005 /* Get the two words comprising each integer constant. */
3006 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
3008 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
3009 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
3013 l0u
= l0s
= INTVAL (trueop0
);
3014 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
3017 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
3019 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
3020 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
3024 l1u
= l1s
= INTVAL (trueop1
);
3025 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
3028 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3029 we have to sign or zero-extend the values. */
3030 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
3032 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3033 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3035 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3036 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3038 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3039 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3041 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
3042 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
3044 equal
= (h0u
== h1u
&& l0u
== l1u
);
3045 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
3046 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
3047 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
3048 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
3051 /* Otherwise, there are some code-specific tests we can make. */
3054 /* Optimize comparisons with upper and lower bounds. */
3055 if (SCALAR_INT_MODE_P (mode
)
3056 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3069 get_mode_bounds (mode
, sign
, mode
, &mmin
, &mmax
);
3076 /* x >= min is always true. */
3077 if (rtx_equal_p (trueop1
, mmin
))
3078 tem
= const_true_rtx
;
3084 /* x <= max is always true. */
3085 if (rtx_equal_p (trueop1
, mmax
))
3086 tem
= const_true_rtx
;
3091 /* x > max is always false. */
3092 if (rtx_equal_p (trueop1
, mmax
))
3098 /* x < min is always false. */
3099 if (rtx_equal_p (trueop1
, mmin
))
3106 if (tem
== const0_rtx
3107 || tem
== const_true_rtx
)
3114 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3119 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3120 return const_true_rtx
;
3124 /* Optimize abs(x) < 0.0. */
3125 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
3127 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3129 if (GET_CODE (tem
) == ABS
)
3135 /* Optimize abs(x) >= 0.0. */
3136 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
3138 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3140 if (GET_CODE (tem
) == ABS
)
3141 return const_true_rtx
;
3146 /* Optimize ! (abs(x) < 0.0). */
3147 if (trueop1
== CONST0_RTX (mode
))
3149 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3151 if (GET_CODE (tem
) == ABS
)
3152 return const_true_rtx
;
3163 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3169 return equal
? const_true_rtx
: const0_rtx
;
3172 return ! equal
? const_true_rtx
: const0_rtx
;
3175 return op0lt
? const_true_rtx
: const0_rtx
;
3178 return op1lt
? const_true_rtx
: const0_rtx
;
3180 return op0ltu
? const_true_rtx
: const0_rtx
;
3182 return op1ltu
? const_true_rtx
: const0_rtx
;
3185 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
3188 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
3190 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
3192 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
3194 return const_true_rtx
;
3202 /* Simplify CODE, an operation with result mode MODE and three operands,
3203 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3204 a constant. Return 0 if no simplifications is possible. */
3207 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
3208 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
3211 unsigned int width
= GET_MODE_BITSIZE (mode
);
3213 /* VOIDmode means "infinite" precision. */
3215 width
= HOST_BITS_PER_WIDE_INT
;
3221 if (GET_CODE (op0
) == CONST_INT
3222 && GET_CODE (op1
) == CONST_INT
3223 && GET_CODE (op2
) == CONST_INT
3224 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
3225 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
3227 /* Extracting a bit-field from a constant */
3228 HOST_WIDE_INT val
= INTVAL (op0
);
3230 if (BITS_BIG_ENDIAN
)
3231 val
>>= (GET_MODE_BITSIZE (op0_mode
)
3232 - INTVAL (op2
) - INTVAL (op1
));
3234 val
>>= INTVAL (op2
);
3236 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
3238 /* First zero-extend. */
3239 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
3240 /* If desired, propagate sign bit. */
3241 if (code
== SIGN_EXTRACT
3242 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
3243 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
3246 /* Clear the bits that don't belong in our mode,
3247 unless they and our sign bit are all one.
3248 So we get either a reasonable negative value or a reasonable
3249 unsigned value for this mode. */
3250 if (width
< HOST_BITS_PER_WIDE_INT
3251 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
3252 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
3253 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3255 return gen_int_mode (val
, mode
);
3260 if (GET_CODE (op0
) == CONST_INT
)
3261 return op0
!= const0_rtx
? op1
: op2
;
3263 /* Convert c ? a : a into "a". */
3264 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
3267 /* Convert a != b ? a : b into "a". */
3268 if (GET_CODE (op0
) == NE
3269 && ! side_effects_p (op0
)
3270 && ! HONOR_NANS (mode
)
3271 && ! HONOR_SIGNED_ZEROS (mode
)
3272 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3273 && rtx_equal_p (XEXP (op0
, 1), op2
))
3274 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3275 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3278 /* Convert a == b ? a : b into "b". */
3279 if (GET_CODE (op0
) == EQ
3280 && ! side_effects_p (op0
)
3281 && ! HONOR_NANS (mode
)
3282 && ! HONOR_SIGNED_ZEROS (mode
)
3283 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3284 && rtx_equal_p (XEXP (op0
, 1), op2
))
3285 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3286 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3289 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
3291 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
3292 ? GET_MODE (XEXP (op0
, 1))
3293 : GET_MODE (XEXP (op0
, 0)));
3296 /* Look for happy constants in op1 and op2. */
3297 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
3299 HOST_WIDE_INT t
= INTVAL (op1
);
3300 HOST_WIDE_INT f
= INTVAL (op2
);
3302 if (t
== STORE_FLAG_VALUE
&& f
== 0)
3303 code
= GET_CODE (op0
);
3304 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
3307 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
3315 return simplify_gen_relational (code
, mode
, cmp_mode
,
3316 XEXP (op0
, 0), XEXP (op0
, 1));
3319 if (cmp_mode
== VOIDmode
)
3320 cmp_mode
= op0_mode
;
3321 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
3322 cmp_mode
, XEXP (op0
, 0),
3325 /* See if any simplifications were possible. */
3328 if (GET_CODE (temp
) == CONST_INT
)
3329 return temp
== const0_rtx
? op2
: op1
;
3331 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
3337 gcc_assert (GET_MODE (op0
) == mode
);
3338 gcc_assert (GET_MODE (op1
) == mode
);
3339 gcc_assert (VECTOR_MODE_P (mode
));
3340 op2
= avoid_constant_pool_reference (op2
);
3341 if (GET_CODE (op2
) == CONST_INT
)
3343 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3344 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3345 int mask
= (1 << n_elts
) - 1;
3347 if (!(INTVAL (op2
) & mask
))
3349 if ((INTVAL (op2
) & mask
) == mask
)
3352 op0
= avoid_constant_pool_reference (op0
);
3353 op1
= avoid_constant_pool_reference (op1
);
3354 if (GET_CODE (op0
) == CONST_VECTOR
3355 && GET_CODE (op1
) == CONST_VECTOR
)
3357 rtvec v
= rtvec_alloc (n_elts
);
3360 for (i
= 0; i
< n_elts
; i
++)
3361 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
3362 ? CONST_VECTOR_ELT (op0
, i
)
3363 : CONST_VECTOR_ELT (op1
, i
));
3364 return gen_rtx_CONST_VECTOR (mode
, v
);
3376 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3377 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3379 Works by unpacking OP into a collection of 8-bit values
3380 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3381 and then repacking them again for OUTERMODE. */
3384 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
3385 enum machine_mode innermode
, unsigned int byte
)
3387 /* We support up to 512-bit values (for V8DFmode). */
3391 value_mask
= (1 << value_bit
) - 1
3393 unsigned char value
[max_bitsize
/ value_bit
];
3402 rtvec result_v
= NULL
;
3403 enum mode_class outer_class
;
3404 enum machine_mode outer_submode
;
3406 /* Some ports misuse CCmode. */
3407 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
3410 /* We have no way to represent a complex constant at the rtl level. */
3411 if (COMPLEX_MODE_P (outermode
))
3414 /* Unpack the value. */
3416 if (GET_CODE (op
) == CONST_VECTOR
)
3418 num_elem
= CONST_VECTOR_NUNITS (op
);
3419 elems
= &CONST_VECTOR_ELT (op
, 0);
3420 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
3426 elem_bitsize
= max_bitsize
;
3428 /* If this asserts, it is too complicated; reducing value_bit may help. */
3429 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
3430 /* I don't know how to handle endianness of sub-units. */
3431 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
3433 for (elem
= 0; elem
< num_elem
; elem
++)
3436 rtx el
= elems
[elem
];
3438 /* Vectors are kept in target memory order. (This is probably
3441 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3442 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3444 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3445 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3446 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3447 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3448 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3451 switch (GET_CODE (el
))
3455 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3457 *vp
++ = INTVAL (el
) >> i
;
3458 /* CONST_INTs are always logically sign-extended. */
3459 for (; i
< elem_bitsize
; i
+= value_bit
)
3460 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
3464 if (GET_MODE (el
) == VOIDmode
)
3466 /* If this triggers, someone should have generated a
3467 CONST_INT instead. */
3468 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
3470 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
3471 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
3472 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
3475 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
3478 /* It shouldn't matter what's done here, so fill it with
3480 for (; i
< max_bitsize
; i
+= value_bit
)
3485 long tmp
[max_bitsize
/ 32];
3486 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
3488 gcc_assert (GET_MODE_CLASS (GET_MODE (el
)) == MODE_FLOAT
);
3489 gcc_assert (bitsize
<= elem_bitsize
);
3490 gcc_assert (bitsize
% value_bit
== 0);
3492 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
3495 /* real_to_target produces its result in words affected by
3496 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3497 and use WORDS_BIG_ENDIAN instead; see the documentation
3498 of SUBREG in rtl.texi. */
3499 for (i
= 0; i
< bitsize
; i
+= value_bit
)
3502 if (WORDS_BIG_ENDIAN
)
3503 ibase
= bitsize
- 1 - i
;
3506 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
3509 /* It shouldn't matter what's done here, so fill it with
3511 for (; i
< elem_bitsize
; i
+= value_bit
)
3521 /* Now, pick the right byte to start with. */
3522 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3523 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3524 will already have offset 0. */
3525 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
3527 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
3529 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3530 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3531 byte
= (subword_byte
% UNITS_PER_WORD
3532 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3535 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3536 so if it's become negative it will instead be very large.) */
3537 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
3539 /* Convert from bytes to chunks of size value_bit. */
3540 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
3542 /* Re-pack the value. */
3544 if (VECTOR_MODE_P (outermode
))
3546 num_elem
= GET_MODE_NUNITS (outermode
);
3547 result_v
= rtvec_alloc (num_elem
);
3548 elems
= &RTVEC_ELT (result_v
, 0);
3549 outer_submode
= GET_MODE_INNER (outermode
);
3555 outer_submode
= outermode
;
3558 outer_class
= GET_MODE_CLASS (outer_submode
);
3559 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
3561 gcc_assert (elem_bitsize
% value_bit
== 0);
3562 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
3564 for (elem
= 0; elem
< num_elem
; elem
++)
3568 /* Vectors are stored in target memory order. (This is probably
3571 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3572 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3574 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3575 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3576 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3577 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3578 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3581 switch (outer_class
)
3584 case MODE_PARTIAL_INT
:
3586 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
3589 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3591 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
3592 for (; i
< elem_bitsize
; i
+= value_bit
)
3593 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
3594 << (i
- HOST_BITS_PER_WIDE_INT
));
3596 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3598 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3599 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
3601 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
3608 long tmp
[max_bitsize
/ 32];
3610 /* real_from_target wants its input in words affected by
3611 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3612 and use WORDS_BIG_ENDIAN instead; see the documentation
3613 of SUBREG in rtl.texi. */
3614 for (i
= 0; i
< max_bitsize
/ 32; i
++)
3616 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
3619 if (WORDS_BIG_ENDIAN
)
3620 ibase
= elem_bitsize
- 1 - i
;
3623 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
3626 real_from_target (&r
, tmp
, outer_submode
);
3627 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
3635 if (VECTOR_MODE_P (outermode
))
3636 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
3641 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3642 Return 0 if no simplifications are possible. */
3644 simplify_subreg (enum machine_mode outermode
, rtx op
,
3645 enum machine_mode innermode
, unsigned int byte
)
3647 /* Little bit of sanity checking. */
3648 gcc_assert (innermode
!= VOIDmode
);
3649 gcc_assert (outermode
!= VOIDmode
);
3650 gcc_assert (innermode
!= BLKmode
);
3651 gcc_assert (outermode
!= BLKmode
);
3653 gcc_assert (GET_MODE (op
) == innermode
3654 || GET_MODE (op
) == VOIDmode
);
3656 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
3657 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
3659 if (outermode
== innermode
&& !byte
)
3662 if (GET_CODE (op
) == CONST_INT
3663 || GET_CODE (op
) == CONST_DOUBLE
3664 || GET_CODE (op
) == CONST_VECTOR
)
3665 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
3667 /* Changing mode twice with SUBREG => just change it once,
3668 or not at all if changing back op starting mode. */
3669 if (GET_CODE (op
) == SUBREG
)
3671 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3672 int final_offset
= byte
+ SUBREG_BYTE (op
);
3675 if (outermode
== innermostmode
3676 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3677 return SUBREG_REG (op
);
3679 /* The SUBREG_BYTE represents offset, as if the value were stored
3680 in memory. Irritating exception is paradoxical subreg, where
3681 we define SUBREG_BYTE to be 0. On big endian machines, this
3682 value should be negative. For a moment, undo this exception. */
3683 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3685 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3686 if (WORDS_BIG_ENDIAN
)
3687 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3688 if (BYTES_BIG_ENDIAN
)
3689 final_offset
+= difference
% UNITS_PER_WORD
;
3691 if (SUBREG_BYTE (op
) == 0
3692 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3694 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3695 if (WORDS_BIG_ENDIAN
)
3696 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3697 if (BYTES_BIG_ENDIAN
)
3698 final_offset
+= difference
% UNITS_PER_WORD
;
3701 /* See whether resulting subreg will be paradoxical. */
3702 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3704 /* In nonparadoxical subregs we can't handle negative offsets. */
3705 if (final_offset
< 0)
3707 /* Bail out in case resulting subreg would be incorrect. */
3708 if (final_offset
% GET_MODE_SIZE (outermode
)
3709 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3715 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3717 /* In paradoxical subreg, see if we are still looking on lower part.
3718 If so, our SUBREG_BYTE will be 0. */
3719 if (WORDS_BIG_ENDIAN
)
3720 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3721 if (BYTES_BIG_ENDIAN
)
3722 offset
+= difference
% UNITS_PER_WORD
;
3723 if (offset
== final_offset
)
3729 /* Recurse for further possible simplifications. */
3730 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
3734 if (validate_subreg (outermode
, innermostmode
,
3735 SUBREG_REG (op
), final_offset
))
3736 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3740 /* SUBREG of a hard register => just change the register number
3741 and/or mode. If the hard register is not valid in that mode,
3742 suppress this simplification. If the hard register is the stack,
3743 frame, or argument pointer, leave this as a SUBREG. */
3746 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3747 #ifdef CANNOT_CHANGE_MODE_CLASS
3748 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3749 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3750 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3752 && ((reload_completed
&& !frame_pointer_needed
)
3753 || (REGNO (op
) != FRAME_POINTER_REGNUM
3754 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3755 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3758 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3759 && REGNO (op
) != ARG_POINTER_REGNUM
3761 && REGNO (op
) != STACK_POINTER_REGNUM
3762 && subreg_offset_representable_p (REGNO (op
), innermode
,
3765 unsigned int regno
= REGNO (op
);
3766 unsigned int final_regno
3767 = regno
+ subreg_regno_offset (regno
, innermode
, byte
, outermode
);
3769 /* ??? We do allow it if the current REG is not valid for
3770 its mode. This is a kludge to work around how float/complex
3771 arguments are passed on 32-bit SPARC and should be fixed. */
3772 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3773 || ! HARD_REGNO_MODE_OK (regno
, innermode
))
3775 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3777 /* Propagate original regno. We don't have any way to specify
3778 the offset inside original regno, so do so only for lowpart.
3779 The information is used only by alias analysis that can not
3780 grog partial register anyway. */
3782 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3783 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3788 /* If we have a SUBREG of a register that we are replacing and we are
3789 replacing it with a MEM, make a new MEM and try replacing the
3790 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3791 or if we would be widening it. */
3794 && ! mode_dependent_address_p (XEXP (op
, 0))
3795 /* Allow splitting of volatile memory references in case we don't
3796 have instruction to move the whole thing. */
3797 && (! MEM_VOLATILE_P (op
)
3798 || ! have_insn_for (SET
, innermode
))
3799 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3800 return adjust_address_nv (op
, outermode
, byte
);
3802 /* Handle complex values represented as CONCAT
3803 of real and imaginary part. */
3804 if (GET_CODE (op
) == CONCAT
)
3806 unsigned int inner_size
, final_offset
;
3809 inner_size
= GET_MODE_UNIT_SIZE (innermode
);
3810 part
= byte
< inner_size
? XEXP (op
, 0) : XEXP (op
, 1);
3811 final_offset
= byte
% inner_size
;
3812 if (final_offset
+ GET_MODE_SIZE (outermode
) > inner_size
)
3815 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3818 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
3819 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3823 /* Optimize SUBREG truncations of zero and sign extended values. */
3824 if ((GET_CODE (op
) == ZERO_EXTEND
3825 || GET_CODE (op
) == SIGN_EXTEND
)
3826 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
3828 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
3830 /* If we're requesting the lowpart of a zero or sign extension,
3831 there are three possibilities. If the outermode is the same
3832 as the origmode, we can omit both the extension and the subreg.
3833 If the outermode is not larger than the origmode, we can apply
3834 the truncation without the extension. Finally, if the outermode
3835 is larger than the origmode, but both are integer modes, we
3836 can just extend to the appropriate mode. */
3839 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
3840 if (outermode
== origmode
)
3841 return XEXP (op
, 0);
3842 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
3843 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
3844 subreg_lowpart_offset (outermode
,
3846 if (SCALAR_INT_MODE_P (outermode
))
3847 return simplify_gen_unary (GET_CODE (op
), outermode
,
3848 XEXP (op
, 0), origmode
);
3851 /* A SUBREG resulting from a zero extension may fold to zero if
3852 it extracts higher bits that the ZERO_EXTEND's source bits. */
3853 if (GET_CODE (op
) == ZERO_EXTEND
3854 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
3855 return CONST0_RTX (outermode
);
3858 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
3859 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
3860 the outer subreg is effectively a truncation to the original mode. */
3861 if ((GET_CODE (op
) == LSHIFTRT
3862 || GET_CODE (op
) == ASHIFTRT
)
3863 && SCALAR_INT_MODE_P (outermode
)
3864 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
3865 to avoid the possibility that an outer LSHIFTRT shifts by more
3866 than the sign extension's sign_bit_copies and introduces zeros
3867 into the high bits of the result. */
3868 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
3869 && GET_CODE (XEXP (op
, 1)) == CONST_INT
3870 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
3871 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
3872 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
3873 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
3874 return simplify_gen_binary (ASHIFTRT
, outermode
,
3875 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
3877 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
3878 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
3879 the outer subreg is effectively a truncation to the original mode. */
3880 if ((GET_CODE (op
) == LSHIFTRT
3881 || GET_CODE (op
) == ASHIFTRT
)
3882 && SCALAR_INT_MODE_P (outermode
)
3883 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
3884 && GET_CODE (XEXP (op
, 1)) == CONST_INT
3885 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
3886 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
3887 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
3888 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
3889 return simplify_gen_binary (LSHIFTRT
, outermode
,
3890 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
3892 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
3893 to (ashift:QI (x:QI) C), where C is a suitable small constant and
3894 the outer subreg is effectively a truncation to the original mode. */
3895 if (GET_CODE (op
) == ASHIFT
3896 && SCALAR_INT_MODE_P (outermode
)
3897 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
3898 && GET_CODE (XEXP (op
, 1)) == CONST_INT
3899 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
3900 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
3901 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
3902 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
3903 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
3904 return simplify_gen_binary (ASHIFT
, outermode
,
3905 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
3910 /* Make a SUBREG operation or equivalent if it folds. */
3913 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
3914 enum machine_mode innermode
, unsigned int byte
)
3918 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
3922 if (GET_CODE (op
) == SUBREG
3923 || GET_CODE (op
) == CONCAT
3924 || GET_MODE (op
) == VOIDmode
)
3927 if (validate_subreg (outermode
, innermode
, op
, byte
))
3928 return gen_rtx_SUBREG (outermode
, op
, byte
);
3933 /* Simplify X, an rtx expression.
3935 Return the simplified expression or NULL if no simplifications
3938 This is the preferred entry point into the simplification routines;
3939 however, we still allow passes to call the more specific routines.
3941 Right now GCC has three (yes, three) major bodies of RTL simplification
3942 code that need to be unified.
3944 1. fold_rtx in cse.c. This code uses various CSE specific
3945 information to aid in RTL simplification.
3947 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3948 it uses combine specific information to aid in RTL
3951 3. The routines in this file.
3954 Long term we want to only have one body of simplification code; to
3955 get to that state I recommend the following steps:
3957 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3958 which are not pass dependent state into these routines.
3960 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3961 use this routine whenever possible.
3963 3. Allow for pass dependent state to be provided to these
3964 routines and add simplifications based on the pass dependent
3965 state. Remove code from cse.c & combine.c that becomes
3968 It will take time, but ultimately the compiler will be easier to
3969 maintain and improve. It's totally silly that when we add a
3970 simplification that it needs to be added to 4 places (3 for RTL
3971 simplification and 1 for tree simplification. */
3974 simplify_rtx (rtx x
)
3976 enum rtx_code code
= GET_CODE (x
);
3977 enum machine_mode mode
= GET_MODE (x
);
3979 switch (GET_RTX_CLASS (code
))
3982 return simplify_unary_operation (code
, mode
,
3983 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3984 case RTX_COMM_ARITH
:
3985 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3986 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
3988 /* Fall through.... */
3991 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3994 case RTX_BITFIELD_OPS
:
3995 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3996 XEXP (x
, 0), XEXP (x
, 1),
4000 case RTX_COMM_COMPARE
:
4001 return simplify_relational_operation (code
, mode
,
4002 ((GET_MODE (XEXP (x
, 0))
4004 ? GET_MODE (XEXP (x
, 0))
4005 : GET_MODE (XEXP (x
, 1))),
4011 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
4012 GET_MODE (SUBREG_REG (x
)),
4019 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4020 if (GET_CODE (XEXP (x
, 0)) == HIGH
4021 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))