1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
56 static bool associative_constant_p (rtx
);
57 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 /* Negate a CONST_INT rtx, truncating (because a conversion from a
61 maximally negative number can overflow). */
63 neg_const_int (enum machine_mode mode
, rtx i
)
65 return gen_int_mode (- INTVAL (i
), mode
);
69 /* Make a binary operation by properly ordering the operands and
70 seeing if the expression folds. */
73 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
78 /* Put complex operands first and constants second if commutative. */
79 if (GET_RTX_CLASS (code
) == 'c'
80 && swap_commutative_operands_p (op0
, op1
))
81 tem
= op0
, op0
= op1
, op1
= tem
;
83 /* If this simplifies, do it. */
84 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
88 /* Handle addition and subtraction specially. Otherwise, just form
91 if (code
== PLUS
|| code
== MINUS
)
93 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
98 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
101 /* If X is a MEM referencing the constant pool, return the real value.
102 Otherwise return X. */
104 avoid_constant_pool_reference (rtx x
)
107 enum machine_mode cmode
;
109 switch (GET_CODE (x
))
115 /* Handle float extensions of constant pool references. */
117 c
= avoid_constant_pool_reference (tmp
);
118 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
122 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
123 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
133 /* Call target hook to avoid the effects of -fpic etc.... */
134 addr
= (*targetm
.delegitimize_address
) (addr
);
136 if (GET_CODE (addr
) == LO_SUM
)
137 addr
= XEXP (addr
, 1);
139 if (GET_CODE (addr
) != SYMBOL_REF
140 || ! CONSTANT_POOL_ADDRESS_P (addr
))
143 c
= get_pool_constant (addr
);
144 cmode
= get_pool_mode (addr
);
146 /* If we're accessing the constant in a different mode than it was
147 originally stored, attempt to fix that up via subreg simplifications.
148 If that fails we have no choice but to return the original memory. */
149 if (cmode
!= GET_MODE (x
))
151 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
158 /* Make a unary operation by first seeing if it folds and otherwise making
159 the specified operation. */
162 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
163 enum machine_mode op_mode
)
167 /* If this simplifies, use it. */
168 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
171 return gen_rtx_fmt_e (code
, mode
, op
);
174 /* Likewise for ternary operations. */
177 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
178 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
182 /* If this simplifies, use it. */
183 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
187 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
190 /* Likewise, for relational operations.
191 CMP_MODE specifies mode comparison is done in.
195 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
196 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
200 if (cmp_mode
== VOIDmode
)
201 cmp_mode
= GET_MODE (op0
);
202 if (cmp_mode
== VOIDmode
)
203 cmp_mode
= GET_MODE (op1
);
205 if (cmp_mode
!= VOIDmode
)
207 tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
);
211 #ifdef FLOAT_STORE_FLAG_VALUE
212 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
215 if (tem
== const0_rtx
)
216 return CONST0_RTX (mode
);
217 if (tem
!= const_true_rtx
)
219 val
= FLOAT_STORE_FLAG_VALUE (mode
);
220 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
227 /* For the following tests, ensure const0_rtx is op1. */
228 if (swap_commutative_operands_p (op0
, op1
)
229 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
230 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
232 /* If op0 is a compare, extract the comparison arguments from it. */
233 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
234 return simplify_gen_relational (code
, mode
, VOIDmode
,
235 XEXP (op0
, 0), XEXP (op0
, 1));
237 /* If op0 is a comparison, extract the comparison arguments form it. */
238 if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && op1
== const0_rtx
)
242 if (GET_MODE (op0
) == mode
)
244 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
245 XEXP (op0
, 0), XEXP (op0
, 1));
249 enum rtx_code
new = reversed_comparison_code (op0
, NULL_RTX
);
251 return simplify_gen_relational (new, mode
, VOIDmode
,
252 XEXP (op0
, 0), XEXP (op0
, 1));
256 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
259 /* Replace all occurrences of OLD in X with NEW and try to simplify the
260 resulting RTX. Return a new RTX which is as simplified as possible. */
263 simplify_replace_rtx (rtx x
, rtx old
, rtx
new)
265 enum rtx_code code
= GET_CODE (x
);
266 enum machine_mode mode
= GET_MODE (x
);
267 enum machine_mode op_mode
;
270 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
271 to build a new expression substituting recursively. If we can't do
272 anything, return our input. */
277 switch (GET_RTX_CLASS (code
))
281 op_mode
= GET_MODE (op0
);
282 op0
= simplify_replace_rtx (op0
, old
, new);
283 if (op0
== XEXP (x
, 0))
285 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
289 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
290 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
291 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
293 return simplify_gen_binary (code
, mode
, op0
, op1
);
298 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
299 op0
= simplify_replace_rtx (op0
, old
, new);
300 op1
= simplify_replace_rtx (op1
, old
, new);
301 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
303 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
308 op_mode
= GET_MODE (op0
);
309 op0
= simplify_replace_rtx (op0
, old
, new);
310 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
311 op2
= simplify_replace_rtx (XEXP (x
, 2), old
, new);
312 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
314 if (op_mode
== VOIDmode
)
315 op_mode
= GET_MODE (op0
);
316 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
319 /* The only case we try to handle is a SUBREG. */
322 op0
= simplify_replace_rtx (SUBREG_REG (x
), old
, new);
323 if (op0
== SUBREG_REG (x
))
325 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
326 GET_MODE (SUBREG_REG (x
)),
328 return op0
? op0
: x
;
335 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
336 if (op0
== XEXP (x
, 0))
338 return replace_equiv_address_nv (x
, op0
);
340 else if (code
== LO_SUM
)
342 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
343 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
349 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
351 return gen_rtx_LO_SUM (mode
, op0
, op1
);
353 else if (code
== REG
)
355 if (REG_P (old
) && REGNO (x
) == REGNO (old
))
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
371 rtx op
, enum machine_mode op_mode
)
373 unsigned int width
= GET_MODE_BITSIZE (mode
);
374 rtx trueop
= avoid_constant_pool_reference (op
);
376 if (code
== VEC_DUPLICATE
)
378 if (!VECTOR_MODE_P (mode
))
380 if (GET_MODE (trueop
) != VOIDmode
381 && !VECTOR_MODE_P (GET_MODE (trueop
))
382 && GET_MODE_INNER (mode
) != GET_MODE (trueop
))
384 if (GET_MODE (trueop
) != VOIDmode
385 && VECTOR_MODE_P (GET_MODE (trueop
))
386 && GET_MODE_INNER (mode
) != GET_MODE_INNER (GET_MODE (trueop
)))
388 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
389 || GET_CODE (trueop
) == CONST_VECTOR
)
391 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
392 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
393 rtvec v
= rtvec_alloc (n_elts
);
396 if (GET_CODE (trueop
) != CONST_VECTOR
)
397 for (i
= 0; i
< n_elts
; i
++)
398 RTVEC_ELT (v
, i
) = trueop
;
401 enum machine_mode inmode
= GET_MODE (trueop
);
402 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
403 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
405 if (in_n_elts
>= n_elts
|| n_elts
% in_n_elts
)
407 for (i
= 0; i
< n_elts
; i
++)
408 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
410 return gen_rtx_CONST_VECTOR (mode
, v
);
413 else if (GET_CODE (op
) == CONST
)
414 return simplify_unary_operation (code
, mode
, XEXP (op
, 0), op_mode
);
416 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
418 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
419 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
420 enum machine_mode opmode
= GET_MODE (trueop
);
421 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
422 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
423 rtvec v
= rtvec_alloc (n_elts
);
426 if (op_n_elts
!= n_elts
)
429 for (i
= 0; i
< n_elts
; i
++)
431 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
432 CONST_VECTOR_ELT (trueop
, i
),
433 GET_MODE_INNER (opmode
));
436 RTVEC_ELT (v
, i
) = x
;
438 return gen_rtx_CONST_VECTOR (mode
, v
);
441 /* The order of these tests is critical so that, for example, we don't
442 check the wrong mode (input vs. output) for a conversion operation,
443 such as FIX. At some point, this should be simplified. */
445 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
446 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
448 HOST_WIDE_INT hv
, lv
;
451 if (GET_CODE (trueop
) == CONST_INT
)
452 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
454 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
456 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
457 d
= real_value_truncate (mode
, d
);
458 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
460 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
461 && (GET_CODE (trueop
) == CONST_DOUBLE
462 || GET_CODE (trueop
) == CONST_INT
))
464 HOST_WIDE_INT hv
, lv
;
467 if (GET_CODE (trueop
) == CONST_INT
)
468 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
470 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
472 if (op_mode
== VOIDmode
)
474 /* We don't know how to interpret negative-looking numbers in
475 this case, so don't try to fold those. */
479 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
482 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
484 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
485 d
= real_value_truncate (mode
, d
);
486 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
489 if (GET_CODE (trueop
) == CONST_INT
490 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
492 HOST_WIDE_INT arg0
= INTVAL (trueop
);
506 val
= (arg0
>= 0 ? arg0
: - arg0
);
510 /* Don't use ffs here. Instead, get low order bit and then its
511 number. If arg0 is zero, this will return 0, as desired. */
512 arg0
&= GET_MODE_MASK (mode
);
513 val
= exact_log2 (arg0
& (- arg0
)) + 1;
517 arg0
&= GET_MODE_MASK (mode
);
518 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
521 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
525 arg0
&= GET_MODE_MASK (mode
);
528 /* Even if the value at zero is undefined, we have to come
529 up with some replacement. Seems good enough. */
530 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
531 val
= GET_MODE_BITSIZE (mode
);
534 val
= exact_log2 (arg0
& -arg0
);
538 arg0
&= GET_MODE_MASK (mode
);
541 val
++, arg0
&= arg0
- 1;
545 arg0
&= GET_MODE_MASK (mode
);
548 val
++, arg0
&= arg0
- 1;
557 /* When zero-extending a CONST_INT, we need to know its
559 if (op_mode
== VOIDmode
)
561 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
563 /* If we were really extending the mode,
564 we would have to distinguish between zero-extension
565 and sign-extension. */
566 if (width
!= GET_MODE_BITSIZE (op_mode
))
570 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
571 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
577 if (op_mode
== VOIDmode
)
579 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
581 /* If we were really extending the mode,
582 we would have to distinguish between zero-extension
583 and sign-extension. */
584 if (width
!= GET_MODE_BITSIZE (op_mode
))
588 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
591 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
593 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
594 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
611 val
= trunc_int_for_mode (val
, mode
);
613 return GEN_INT (val
);
616 /* We can do some operations on integer CONST_DOUBLEs. Also allow
617 for a DImode operation on a CONST_INT. */
618 else if (GET_MODE (trueop
) == VOIDmode
619 && width
<= HOST_BITS_PER_WIDE_INT
* 2
620 && (GET_CODE (trueop
) == CONST_DOUBLE
621 || GET_CODE (trueop
) == CONST_INT
))
623 unsigned HOST_WIDE_INT l1
, lv
;
624 HOST_WIDE_INT h1
, hv
;
626 if (GET_CODE (trueop
) == CONST_DOUBLE
)
627 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
629 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
639 neg_double (l1
, h1
, &lv
, &hv
);
644 neg_double (l1
, h1
, &lv
, &hv
);
656 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
659 lv
= exact_log2 (l1
& -l1
) + 1;
665 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
666 - HOST_BITS_PER_WIDE_INT
;
668 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
669 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
670 lv
= GET_MODE_BITSIZE (mode
);
676 lv
= exact_log2 (l1
& -l1
);
678 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
679 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
680 lv
= GET_MODE_BITSIZE (mode
);
703 /* This is just a change-of-mode, so do nothing. */
708 if (op_mode
== VOIDmode
)
711 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
715 lv
= l1
& GET_MODE_MASK (op_mode
);
719 if (op_mode
== VOIDmode
720 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
724 lv
= l1
& GET_MODE_MASK (op_mode
);
725 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
726 && (lv
& ((HOST_WIDE_INT
) 1
727 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
728 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
730 hv
= HWI_SIGN_EXTEND (lv
);
741 return immed_double_const (lv
, hv
, mode
);
744 else if (GET_CODE (trueop
) == CONST_DOUBLE
745 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
747 REAL_VALUE_TYPE d
, t
;
748 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
753 if (HONOR_SNANS (mode
) && real_isnan (&d
))
755 real_sqrt (&t
, mode
, &d
);
759 d
= REAL_VALUE_ABS (d
);
762 d
= REAL_VALUE_NEGATE (d
);
765 d
= real_value_truncate (mode
, d
);
768 /* All this does is change the mode. */
771 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
777 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
780 else if (GET_CODE (trueop
) == CONST_DOUBLE
781 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
782 && GET_MODE_CLASS (mode
) == MODE_INT
783 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
785 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
786 operators are intentionally left unspecified (to ease implementation
787 by target backends), for consistency, this routine implements the
788 same semantics for constant folding as used by the middle-end. */
790 HOST_WIDE_INT xh
, xl
, th
, tl
;
791 REAL_VALUE_TYPE x
, t
;
792 REAL_VALUE_FROM_CONST_DOUBLE (x
, trueop
);
796 if (REAL_VALUE_ISNAN (x
))
799 /* Test against the signed upper bound. */
800 if (width
> HOST_BITS_PER_WIDE_INT
)
802 th
= ((unsigned HOST_WIDE_INT
) 1
803 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
809 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
811 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
812 if (REAL_VALUES_LESS (t
, x
))
819 /* Test against the signed lower bound. */
820 if (width
> HOST_BITS_PER_WIDE_INT
)
822 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
828 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
830 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
831 if (REAL_VALUES_LESS (x
, t
))
837 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
841 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
844 /* Test against the unsigned upper bound. */
845 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
850 else if (width
>= HOST_BITS_PER_WIDE_INT
)
852 th
= ((unsigned HOST_WIDE_INT
) 1
853 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
859 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
861 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
862 if (REAL_VALUES_LESS (t
, x
))
869 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
875 return immed_double_const (xl
, xh
, mode
);
878 /* This was formerly used only for non-IEEE float.
879 eggert@twinsun.com says it is safe for IEEE also. */
882 enum rtx_code reversed
;
885 /* There are some simplifications we can do even if the operands
890 /* (not (not X)) == X. */
891 if (GET_CODE (op
) == NOT
)
894 /* (not (eq X Y)) == (ne X Y), etc. */
895 if (GET_RTX_CLASS (GET_CODE (op
)) == '<'
896 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
897 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
899 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
900 XEXP (op
, 0), XEXP (op
, 1));
902 /* (not (plus X -1)) can become (neg X). */
903 if (GET_CODE (op
) == PLUS
904 && XEXP (op
, 1) == constm1_rtx
)
905 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
907 /* Similarly, (not (neg X)) is (plus X -1). */
908 if (GET_CODE (op
) == NEG
)
909 return plus_constant (XEXP (op
, 0), -1);
911 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
912 if (GET_CODE (op
) == XOR
913 && GET_CODE (XEXP (op
, 1)) == CONST_INT
914 && (temp
= simplify_unary_operation (NOT
, mode
,
917 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
920 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
921 operands other than 1, but that is not valid. We could do a
922 similar simplification for (not (lshiftrt C X)) where C is
923 just the sign bit, but this doesn't seem common enough to
925 if (GET_CODE (op
) == ASHIFT
926 && XEXP (op
, 0) == const1_rtx
)
928 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
929 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
932 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
933 by reversing the comparison code if valid. */
934 if (STORE_FLAG_VALUE
== -1
935 && GET_RTX_CLASS (GET_CODE (op
)) == '<'
936 && (reversed
= reversed_comparison_code (op
, NULL_RTX
))
938 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
939 XEXP (op
, 0), XEXP (op
, 1));
941 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
942 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
943 so we can perform the above simplification. */
945 if (STORE_FLAG_VALUE
== -1
946 && GET_CODE (op
) == ASHIFTRT
947 && GET_CODE (XEXP (op
, 1)) == CONST_INT
948 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
949 return simplify_gen_relational (GE
, mode
, VOIDmode
,
950 XEXP (op
, 0), const0_rtx
);
955 /* (neg (neg X)) == X. */
956 if (GET_CODE (op
) == NEG
)
959 /* (neg (plus X 1)) can become (not X). */
960 if (GET_CODE (op
) == PLUS
961 && XEXP (op
, 1) == const1_rtx
)
962 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
964 /* Similarly, (neg (not X)) is (plus X 1). */
965 if (GET_CODE (op
) == NOT
)
966 return plus_constant (XEXP (op
, 0), 1);
968 /* (neg (minus X Y)) can become (minus Y X). This transformation
969 isn't safe for modes with signed zeros, since if X and Y are
970 both +0, (minus Y X) is the same as (minus X Y). If the
971 rounding mode is towards +infinity (or -infinity) then the two
972 expressions will be rounded differently. */
973 if (GET_CODE (op
) == MINUS
974 && !HONOR_SIGNED_ZEROS (mode
)
975 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
976 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1),
979 if (GET_CODE (op
) == PLUS
980 && !HONOR_SIGNED_ZEROS (mode
)
981 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
983 /* (neg (plus A C)) is simplified to (minus -C A). */
984 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
985 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
987 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1),
990 return simplify_gen_binary (MINUS
, mode
, temp
,
994 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
995 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
996 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
999 /* (neg (mult A B)) becomes (mult (neg A) B).
1000 This works even for floating-point values. */
1001 if (GET_CODE (op
) == MULT
1002 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1004 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1005 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
1008 /* NEG commutes with ASHIFT since it is multiplication. Only do
1009 this if we can then eliminate the NEG (e.g., if the operand
1011 if (GET_CODE (op
) == ASHIFT
)
1013 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0),
1016 return simplify_gen_binary (ASHIFT
, mode
, temp
,
1023 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1024 becomes just the MINUS if its mode is MODE. This allows
1025 folding switch statements on machines using casesi (such as
1027 if (GET_CODE (op
) == TRUNCATE
1028 && GET_MODE (XEXP (op
, 0)) == mode
1029 && GET_CODE (XEXP (op
, 0)) == MINUS
1030 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1031 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1032 return XEXP (op
, 0);
1034 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1035 if (! POINTERS_EXTEND_UNSIGNED
1036 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1038 || (GET_CODE (op
) == SUBREG
1039 && GET_CODE (SUBREG_REG (op
)) == REG
1040 && REG_POINTER (SUBREG_REG (op
))
1041 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1042 return convert_memory_address (Pmode
, op
);
1046 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1048 if (POINTERS_EXTEND_UNSIGNED
> 0
1049 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1051 || (GET_CODE (op
) == SUBREG
1052 && GET_CODE (SUBREG_REG (op
)) == REG
1053 && REG_POINTER (SUBREG_REG (op
))
1054 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1055 return convert_memory_address (Pmode
, op
);
1067 /* Subroutine of simplify_associative_operation. Return true if rtx OP
1068 is a suitable integer or floating point immediate constant. */
1070 associative_constant_p (rtx op
)
1072 if (GET_CODE (op
) == CONST_INT
1073 || GET_CODE (op
) == CONST_DOUBLE
)
1075 op
= avoid_constant_pool_reference (op
);
1076 return GET_CODE (op
) == CONST_INT
1077 || GET_CODE (op
) == CONST_DOUBLE
;
1080 /* Subroutine of simplify_binary_operation to simplify an associative
1081 binary operation CODE with result mode MODE, operating on OP0 and OP1.
1082 Return 0 if no simplification is possible. */
1084 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1089 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
1090 if (GET_CODE (op0
) == code
1091 && associative_constant_p (op1
)
1092 && associative_constant_p (XEXP (op0
, 1)))
1094 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1097 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1100 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
1101 if (GET_CODE (op0
) == code
1102 && GET_CODE (op1
) == code
1103 && associative_constant_p (XEXP (op0
, 1))
1104 && associative_constant_p (XEXP (op1
, 1)))
1106 rtx c
= simplify_binary_operation (code
, mode
,
1107 XEXP (op0
, 1), XEXP (op1
, 1));
1110 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1111 return simplify_gen_binary (code
, mode
, tem
, c
);
1114 /* Canonicalize (x op c) op y as (x op y) op c. */
1115 if (GET_CODE (op0
) == code
1116 && associative_constant_p (XEXP (op0
, 1)))
1118 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1119 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1122 /* Canonicalize x op (y op c) as (x op y) op c. */
1123 if (GET_CODE (op1
) == code
1124 && associative_constant_p (XEXP (op1
, 1)))
1126 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1127 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1133 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1134 and OP1. Return 0 if no simplification is possible.
1136 Don't use this for relational operations such as EQ or LT.
1137 Use simplify_relational_operation instead. */
1139 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1142 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
1144 unsigned int width
= GET_MODE_BITSIZE (mode
);
1146 rtx trueop0
= avoid_constant_pool_reference (op0
);
1147 rtx trueop1
= avoid_constant_pool_reference (op1
);
1149 /* Relational operations don't work here. We must know the mode
1150 of the operands in order to do the comparison correctly.
1151 Assuming a full word can give incorrect results.
1152 Consider comparing 128 with -128 in QImode. */
1154 if (GET_RTX_CLASS (code
) == '<')
1157 /* Make sure the constant is second. */
1158 if (GET_RTX_CLASS (code
) == 'c'
1159 && swap_commutative_operands_p (trueop0
, trueop1
))
1161 tem
= op0
, op0
= op1
, op1
= tem
;
1162 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
1165 if (VECTOR_MODE_P (mode
)
1166 && GET_CODE (trueop0
) == CONST_VECTOR
1167 && GET_CODE (trueop1
) == CONST_VECTOR
)
1169 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1170 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1171 enum machine_mode op0mode
= GET_MODE (trueop0
);
1172 int op0_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op0mode
));
1173 unsigned op0_n_elts
= (GET_MODE_SIZE (op0mode
) / op0_elt_size
);
1174 enum machine_mode op1mode
= GET_MODE (trueop1
);
1175 int op1_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op1mode
));
1176 unsigned op1_n_elts
= (GET_MODE_SIZE (op1mode
) / op1_elt_size
);
1177 rtvec v
= rtvec_alloc (n_elts
);
1180 if (op0_n_elts
!= n_elts
|| op1_n_elts
!= n_elts
)
1183 for (i
= 0; i
< n_elts
; i
++)
1185 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
1186 CONST_VECTOR_ELT (trueop0
, i
),
1187 CONST_VECTOR_ELT (trueop1
, i
));
1190 RTVEC_ELT (v
, i
) = x
;
1193 return gen_rtx_CONST_VECTOR (mode
, v
);
1196 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1197 && GET_CODE (trueop0
) == CONST_DOUBLE
1198 && GET_CODE (trueop1
) == CONST_DOUBLE
1199 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
1201 REAL_VALUE_TYPE f0
, f1
, value
;
1203 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
1204 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
1205 f0
= real_value_truncate (mode
, f0
);
1206 f1
= real_value_truncate (mode
, f1
);
1208 if (HONOR_SNANS (mode
)
1209 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
1213 && REAL_VALUES_EQUAL (f1
, dconst0
)
1214 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
1217 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
1219 value
= real_value_truncate (mode
, value
);
1220 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
1223 /* We can fold some multi-word operations. */
1224 if (GET_MODE_CLASS (mode
) == MODE_INT
1225 && width
== HOST_BITS_PER_WIDE_INT
* 2
1226 && (GET_CODE (trueop0
) == CONST_DOUBLE
1227 || GET_CODE (trueop0
) == CONST_INT
)
1228 && (GET_CODE (trueop1
) == CONST_DOUBLE
1229 || GET_CODE (trueop1
) == CONST_INT
))
1231 unsigned HOST_WIDE_INT l1
, l2
, lv
;
1232 HOST_WIDE_INT h1
, h2
, hv
;
1234 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1235 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
1237 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
1239 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1240 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
1242 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
1247 /* A - B == A + (-B). */
1248 neg_double (l2
, h2
, &lv
, &hv
);
1251 /* Fall through.... */
1254 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1258 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1261 case DIV
: case MOD
: case UDIV
: case UMOD
:
1262 /* We'd need to include tree.h to do this and it doesn't seem worth
1267 lv
= l1
& l2
, hv
= h1
& h2
;
1271 lv
= l1
| l2
, hv
= h1
| h2
;
1275 lv
= l1
^ l2
, hv
= h1
^ h2
;
1281 && ((unsigned HOST_WIDE_INT
) l1
1282 < (unsigned HOST_WIDE_INT
) l2
)))
1291 && ((unsigned HOST_WIDE_INT
) l1
1292 > (unsigned HOST_WIDE_INT
) l2
)))
1299 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1301 && ((unsigned HOST_WIDE_INT
) l1
1302 < (unsigned HOST_WIDE_INT
) l2
)))
1309 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1311 && ((unsigned HOST_WIDE_INT
) l1
1312 > (unsigned HOST_WIDE_INT
) l2
)))
1318 case LSHIFTRT
: case ASHIFTRT
:
1320 case ROTATE
: case ROTATERT
:
1321 #ifdef SHIFT_COUNT_TRUNCATED
1322 if (SHIFT_COUNT_TRUNCATED
)
1323 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1326 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1329 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1330 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1332 else if (code
== ASHIFT
)
1333 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1334 else if (code
== ROTATE
)
1335 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1336 else /* code == ROTATERT */
1337 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1344 return immed_double_const (lv
, hv
, mode
);
1347 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1348 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1350 /* Even if we can't compute a constant result,
1351 there are some cases worth simplifying. */
1356 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1357 when x is NaN, infinite, or finite and nonzero. They aren't
1358 when x is -0 and the rounding mode is not towards -infinity,
1359 since (-0) + 0 is then 0. */
1360 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1363 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1364 transformations are safe even for IEEE. */
1365 if (GET_CODE (op0
) == NEG
)
1366 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1367 else if (GET_CODE (op1
) == NEG
)
1368 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1370 /* (~a) + 1 -> -a */
1371 if (INTEGRAL_MODE_P (mode
)
1372 && GET_CODE (op0
) == NOT
1373 && trueop1
== const1_rtx
)
1374 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1376 /* Handle both-operands-constant cases. We can only add
1377 CONST_INTs to constants since the sum of relocatable symbols
1378 can't be handled by most assemblers. Don't add CONST_INT
1379 to CONST_INT since overflow won't be computed properly if wider
1380 than HOST_BITS_PER_WIDE_INT. */
1382 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1383 && GET_CODE (op1
) == CONST_INT
)
1384 return plus_constant (op0
, INTVAL (op1
));
1385 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1386 && GET_CODE (op0
) == CONST_INT
)
1387 return plus_constant (op1
, INTVAL (op0
));
1389 /* See if this is something like X * C - X or vice versa or
1390 if the multiplication is written as a shift. If so, we can
1391 distribute and make a new multiply, shift, or maybe just
1392 have X (if C is 2 in the example above). But don't make
1393 real multiply if we didn't have one before. */
1395 if (! FLOAT_MODE_P (mode
))
1397 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1398 rtx lhs
= op0
, rhs
= op1
;
1401 if (GET_CODE (lhs
) == NEG
)
1402 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1403 else if (GET_CODE (lhs
) == MULT
1404 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1406 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1409 else if (GET_CODE (lhs
) == ASHIFT
1410 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1411 && INTVAL (XEXP (lhs
, 1)) >= 0
1412 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1414 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1415 lhs
= XEXP (lhs
, 0);
1418 if (GET_CODE (rhs
) == NEG
)
1419 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1420 else if (GET_CODE (rhs
) == MULT
1421 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1423 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1426 else if (GET_CODE (rhs
) == ASHIFT
1427 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1428 && INTVAL (XEXP (rhs
, 1)) >= 0
1429 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1431 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1432 rhs
= XEXP (rhs
, 0);
1435 if (rtx_equal_p (lhs
, rhs
))
1437 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1438 GEN_INT (coeff0
+ coeff1
));
1439 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1443 /* If one of the operands is a PLUS or a MINUS, see if we can
1444 simplify this by the associative law.
1445 Don't use the associative law for floating point.
1446 The inaccuracy makes it nonassociative,
1447 and subtle programs can break if operations are associated. */
1449 if (INTEGRAL_MODE_P (mode
)
1450 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1451 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1452 || (GET_CODE (op0
) == CONST
1453 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1454 || (GET_CODE (op1
) == CONST
1455 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1456 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1459 /* Reassociate floating point addition only when the user
1460 specifies unsafe math optimizations. */
1461 if (FLOAT_MODE_P (mode
)
1462 && flag_unsafe_math_optimizations
)
1464 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1472 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1473 using cc0, in which case we want to leave it as a COMPARE
1474 so we can distinguish it from a register-register-copy.
1476 In IEEE floating point, x-0 is not the same as x. */
1478 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1479 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1480 && trueop1
== CONST0_RTX (mode
))
1484 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1485 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1486 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1487 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1489 rtx xop00
= XEXP (op0
, 0);
1490 rtx xop10
= XEXP (op1
, 0);
1493 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1495 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1496 && GET_MODE (xop00
) == GET_MODE (xop10
)
1497 && REGNO (xop00
) == REGNO (xop10
)
1498 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1499 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1506 /* We can't assume x-x is 0 even with non-IEEE floating point,
1507 but since it is zero except in very strange circumstances, we
1508 will treat it as zero with -funsafe-math-optimizations. */
1509 if (rtx_equal_p (trueop0
, trueop1
)
1510 && ! side_effects_p (op0
)
1511 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1512 return CONST0_RTX (mode
);
1514 /* Change subtraction from zero into negation. (0 - x) is the
1515 same as -x when x is NaN, infinite, or finite and nonzero.
1516 But if the mode has signed zeros, and does not round towards
1517 -infinity, then 0 - 0 is 0, not -0. */
1518 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1519 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1521 /* (-1 - a) is ~a. */
1522 if (trueop0
== constm1_rtx
)
1523 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1525 /* Subtracting 0 has no effect unless the mode has signed zeros
1526 and supports rounding towards -infinity. In such a case,
1528 if (!(HONOR_SIGNED_ZEROS (mode
)
1529 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1530 && trueop1
== CONST0_RTX (mode
))
1533 /* See if this is something like X * C - X or vice versa or
1534 if the multiplication is written as a shift. If so, we can
1535 distribute and make a new multiply, shift, or maybe just
1536 have X (if C is 2 in the example above). But don't make
1537 real multiply if we didn't have one before. */
1539 if (! FLOAT_MODE_P (mode
))
1541 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1542 rtx lhs
= op0
, rhs
= op1
;
1545 if (GET_CODE (lhs
) == NEG
)
1546 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1547 else if (GET_CODE (lhs
) == MULT
1548 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1550 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1553 else if (GET_CODE (lhs
) == ASHIFT
1554 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1555 && INTVAL (XEXP (lhs
, 1)) >= 0
1556 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1558 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1559 lhs
= XEXP (lhs
, 0);
1562 if (GET_CODE (rhs
) == NEG
)
1563 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1564 else if (GET_CODE (rhs
) == MULT
1565 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1567 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1570 else if (GET_CODE (rhs
) == ASHIFT
1571 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1572 && INTVAL (XEXP (rhs
, 1)) >= 0
1573 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1575 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1576 rhs
= XEXP (rhs
, 0);
1579 if (rtx_equal_p (lhs
, rhs
))
1581 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1582 GEN_INT (coeff0
- coeff1
));
1583 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1587 /* (a - (-b)) -> (a + b). True even for IEEE. */
1588 if (GET_CODE (op1
) == NEG
)
1589 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1591 /* (-x - c) may be simplified as (-c - x). */
1592 if (GET_CODE (op0
) == NEG
1593 && (GET_CODE (op1
) == CONST_INT
1594 || GET_CODE (op1
) == CONST_DOUBLE
))
1596 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1598 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1601 /* If one of the operands is a PLUS or a MINUS, see if we can
1602 simplify this by the associative law.
1603 Don't use the associative law for floating point.
1604 The inaccuracy makes it nonassociative,
1605 and subtle programs can break if operations are associated. */
1607 if (INTEGRAL_MODE_P (mode
)
1608 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1609 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1610 || (GET_CODE (op0
) == CONST
1611 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1612 || (GET_CODE (op1
) == CONST
1613 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1614 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1617 /* Don't let a relocatable value get a negative coeff. */
1618 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1619 return simplify_gen_binary (PLUS
, mode
,
1621 neg_const_int (mode
, op1
));
1623 /* (x - (x & y)) -> (x & ~y) */
1624 if (GET_CODE (op1
) == AND
)
1626 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1628 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1629 GET_MODE (XEXP (op1
, 1)));
1630 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1632 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1634 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1635 GET_MODE (XEXP (op1
, 0)));
1636 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1642 if (trueop1
== constm1_rtx
)
1643 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1645 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1646 x is NaN, since x * 0 is then also NaN. Nor is it valid
1647 when the mode has signed zeros, since multiplying a negative
1648 number by 0 will give -0, not 0. */
1649 if (!HONOR_NANS (mode
)
1650 && !HONOR_SIGNED_ZEROS (mode
)
1651 && trueop1
== CONST0_RTX (mode
)
1652 && ! side_effects_p (op0
))
1655 /* In IEEE floating point, x*1 is not equivalent to x for
1657 if (!HONOR_SNANS (mode
)
1658 && trueop1
== CONST1_RTX (mode
))
1661 /* Convert multiply by constant power of two into shift unless
1662 we are still generating RTL. This test is a kludge. */
1663 if (GET_CODE (trueop1
) == CONST_INT
1664 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1665 /* If the mode is larger than the host word size, and the
1666 uppermost bit is set, then this isn't a power of two due
1667 to implicit sign extension. */
1668 && (width
<= HOST_BITS_PER_WIDE_INT
1669 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1670 && ! rtx_equal_function_value_matters
)
1671 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1673 /* x*2 is x+x and x*(-1) is -x */
1674 if (GET_CODE (trueop1
) == CONST_DOUBLE
1675 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1676 && GET_MODE (op0
) == mode
)
1679 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1681 if (REAL_VALUES_EQUAL (d
, dconst2
))
1682 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1684 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1685 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1688 /* Reassociate multiplication, but for floating point MULTs
1689 only when the user specifies unsafe math optimizations. */
1690 if (! FLOAT_MODE_P (mode
)
1691 || flag_unsafe_math_optimizations
)
1693 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1700 if (trueop1
== const0_rtx
)
1702 if (GET_CODE (trueop1
) == CONST_INT
1703 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1704 == GET_MODE_MASK (mode
)))
1706 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1708 /* A | (~A) -> -1 */
1709 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1710 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1711 && ! side_effects_p (op0
)
1712 && GET_MODE_CLASS (mode
) != MODE_CC
)
1714 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1720 if (trueop1
== const0_rtx
)
1722 if (GET_CODE (trueop1
) == CONST_INT
1723 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1724 == GET_MODE_MASK (mode
)))
1725 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1726 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1727 && GET_MODE_CLASS (mode
) != MODE_CC
)
1729 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1735 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1737 if (GET_CODE (trueop1
) == CONST_INT
1738 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1739 == GET_MODE_MASK (mode
)))
1741 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1742 && GET_MODE_CLASS (mode
) != MODE_CC
)
1745 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1746 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1747 && ! side_effects_p (op0
)
1748 && GET_MODE_CLASS (mode
) != MODE_CC
)
1750 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1756 /* Convert divide by power of two into shift (divide by 1 handled
1758 if (GET_CODE (trueop1
) == CONST_INT
1759 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1760 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (arg1
));
1762 /* Fall through.... */
1765 if (trueop1
== CONST1_RTX (mode
))
1767 /* On some platforms DIV uses narrower mode than its
1769 rtx x
= gen_lowpart_common (mode
, op0
);
1772 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1773 return gen_lowpart_SUBREG (mode
, op0
);
1778 /* Maybe change 0 / x to 0. This transformation isn't safe for
1779 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1780 Nor is it safe for modes with signed zeros, since dividing
1781 0 by a negative number gives -0, not 0. */
1782 if (!HONOR_NANS (mode
)
1783 && !HONOR_SIGNED_ZEROS (mode
)
1784 && trueop0
== CONST0_RTX (mode
)
1785 && ! side_effects_p (op1
))
1788 /* Change division by a constant into multiplication. Only do
1789 this with -funsafe-math-optimizations. */
1790 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1791 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1792 && trueop1
!= CONST0_RTX (mode
)
1793 && flag_unsafe_math_optimizations
)
1796 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1798 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1800 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1801 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1802 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
1808 /* Handle modulus by power of two (mod with 1 handled below). */
1809 if (GET_CODE (trueop1
) == CONST_INT
1810 && exact_log2 (INTVAL (trueop1
)) > 0)
1811 return simplify_gen_binary (AND
, mode
, op0
,
1812 GEN_INT (INTVAL (op1
) - 1));
1814 /* Fall through.... */
1817 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1818 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1825 /* Rotating ~0 always results in ~0. */
1826 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1827 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1828 && ! side_effects_p (op1
))
1831 /* Fall through.... */
1835 if (trueop1
== const0_rtx
)
1837 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1842 if (width
<= HOST_BITS_PER_WIDE_INT
1843 && GET_CODE (trueop1
) == CONST_INT
1844 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1845 && ! side_effects_p (op0
))
1847 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1849 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1855 if (width
<= HOST_BITS_PER_WIDE_INT
1856 && GET_CODE (trueop1
) == CONST_INT
1857 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1858 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1859 && ! side_effects_p (op0
))
1861 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1863 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1869 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1871 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1873 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1879 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1881 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1883 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1892 /* ??? There are simplifications that can be done. */
1896 if (!VECTOR_MODE_P (mode
))
1898 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1900 != GET_MODE_INNER (GET_MODE (trueop0
)))
1901 || GET_CODE (trueop1
) != PARALLEL
1902 || XVECLEN (trueop1
, 0) != 1
1903 || GET_CODE (XVECEXP (trueop1
, 0, 0)) != CONST_INT
)
1906 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1907 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP (trueop1
, 0, 0)));
1911 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1912 || (GET_MODE_INNER (mode
)
1913 != GET_MODE_INNER (GET_MODE (trueop0
)))
1914 || GET_CODE (trueop1
) != PARALLEL
)
1917 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1919 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1920 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1921 rtvec v
= rtvec_alloc (n_elts
);
1924 if (XVECLEN (trueop1
, 0) != (int) n_elts
)
1926 for (i
= 0; i
< n_elts
; i
++)
1928 rtx x
= XVECEXP (trueop1
, 0, i
);
1930 if (GET_CODE (x
) != CONST_INT
)
1932 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, INTVAL (x
));
1935 return gen_rtx_CONST_VECTOR (mode
, v
);
1941 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
1942 ? GET_MODE (trueop0
)
1943 : GET_MODE_INNER (mode
));
1944 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
1945 ? GET_MODE (trueop1
)
1946 : GET_MODE_INNER (mode
));
1948 if (!VECTOR_MODE_P (mode
)
1949 || (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
1950 != GET_MODE_SIZE (mode
)))
1953 if ((VECTOR_MODE_P (op0_mode
)
1954 && (GET_MODE_INNER (mode
)
1955 != GET_MODE_INNER (op0_mode
)))
1956 || (!VECTOR_MODE_P (op0_mode
)
1957 && GET_MODE_INNER (mode
) != op0_mode
))
1960 if ((VECTOR_MODE_P (op1_mode
)
1961 && (GET_MODE_INNER (mode
)
1962 != GET_MODE_INNER (op1_mode
)))
1963 || (!VECTOR_MODE_P (op1_mode
)
1964 && GET_MODE_INNER (mode
) != op1_mode
))
1967 if ((GET_CODE (trueop0
) == CONST_VECTOR
1968 || GET_CODE (trueop0
) == CONST_INT
1969 || GET_CODE (trueop0
) == CONST_DOUBLE
)
1970 && (GET_CODE (trueop1
) == CONST_VECTOR
1971 || GET_CODE (trueop1
) == CONST_INT
1972 || GET_CODE (trueop1
) == CONST_DOUBLE
))
1974 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1975 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1976 rtvec v
= rtvec_alloc (n_elts
);
1978 unsigned in_n_elts
= 1;
1980 if (VECTOR_MODE_P (op0_mode
))
1981 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
1982 for (i
= 0; i
< n_elts
; i
++)
1986 if (!VECTOR_MODE_P (op0_mode
))
1987 RTVEC_ELT (v
, i
) = trueop0
;
1989 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
1993 if (!VECTOR_MODE_P (op1_mode
))
1994 RTVEC_ELT (v
, i
) = trueop1
;
1996 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2001 return gen_rtx_CONST_VECTOR (mode
, v
);
2013 /* Get the integer argument values in two forms:
2014 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2016 arg0
= INTVAL (trueop0
);
2017 arg1
= INTVAL (trueop1
);
2019 if (width
< HOST_BITS_PER_WIDE_INT
)
2021 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2022 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2025 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2026 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2029 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2030 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2038 /* Compute the value of the arithmetic. */
2043 val
= arg0s
+ arg1s
;
2047 val
= arg0s
- arg1s
;
2051 val
= arg0s
* arg1s
;
2056 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2059 val
= arg0s
/ arg1s
;
2064 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2067 val
= arg0s
% arg1s
;
2072 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2075 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2080 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2083 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2099 /* If shift count is undefined, don't fold it; let the machine do
2100 what it wants. But truncate it if the machine will do that. */
2104 #ifdef SHIFT_COUNT_TRUNCATED
2105 if (SHIFT_COUNT_TRUNCATED
)
2109 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
2116 #ifdef SHIFT_COUNT_TRUNCATED
2117 if (SHIFT_COUNT_TRUNCATED
)
2121 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
2128 #ifdef SHIFT_COUNT_TRUNCATED
2129 if (SHIFT_COUNT_TRUNCATED
)
2133 val
= arg0s
>> arg1
;
2135 /* Bootstrap compiler may not have sign extended the right shift.
2136 Manually extend the sign to insure bootstrap cc matches gcc. */
2137 if (arg0s
< 0 && arg1
> 0)
2138 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
2147 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2148 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2156 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2157 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2161 /* Do nothing here. */
2165 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2169 val
= ((unsigned HOST_WIDE_INT
) arg0
2170 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2174 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2178 val
= ((unsigned HOST_WIDE_INT
) arg0
2179 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2186 /* ??? There are simplifications that can be done. */
2193 val
= trunc_int_for_mode (val
, mode
);
2195 return GEN_INT (val
);
2198 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2201 Rather than test for specific case, we do this by a brute-force method
2202 and do all possible simplifications until no more changes occur. Then
2203 we rebuild the operation.
2205 If FORCE is true, then always generate the rtx. This is used to
2206 canonicalize stuff emitted from simplify_gen_binary. Note that this
2207 can still fail if the rtx is too complex. It won't fail just because
2208 the result is not 'simpler' than the input, however. */
2210 struct simplify_plus_minus_op_data
2217 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2219 const struct simplify_plus_minus_op_data
*d1
= p1
;
2220 const struct simplify_plus_minus_op_data
*d2
= p2
;
2222 return (commutative_operand_precedence (d2
->op
)
2223 - commutative_operand_precedence (d1
->op
));
2227 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2230 struct simplify_plus_minus_op_data ops
[8];
2232 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2233 int first
, negate
, changed
;
2236 memset (ops
, 0, sizeof ops
);
2238 /* Set up the two operands and then expand them until nothing has been
2239 changed. If we run out of room in our array, give up; this should
2240 almost never happen. */
2245 ops
[1].neg
= (code
== MINUS
);
2251 for (i
= 0; i
< n_ops
; i
++)
2253 rtx this_op
= ops
[i
].op
;
2254 int this_neg
= ops
[i
].neg
;
2255 enum rtx_code this_code
= GET_CODE (this_op
);
2264 ops
[n_ops
].op
= XEXP (this_op
, 1);
2265 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2268 ops
[i
].op
= XEXP (this_op
, 0);
2274 ops
[i
].op
= XEXP (this_op
, 0);
2275 ops
[i
].neg
= ! this_neg
;
2281 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2282 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2283 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2285 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2286 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2287 ops
[n_ops
].neg
= this_neg
;
2295 /* ~a -> (-a - 1) */
2298 ops
[n_ops
].op
= constm1_rtx
;
2299 ops
[n_ops
++].neg
= this_neg
;
2300 ops
[i
].op
= XEXP (this_op
, 0);
2301 ops
[i
].neg
= !this_neg
;
2309 ops
[i
].op
= neg_const_int (mode
, this_op
);
2322 /* If we only have two operands, we can't do anything. */
2323 if (n_ops
<= 2 && !force
)
2326 /* Count the number of CONSTs we didn't split above. */
2327 for (i
= 0; i
< n_ops
; i
++)
2328 if (GET_CODE (ops
[i
].op
) == CONST
)
2331 /* Now simplify each pair of operands until nothing changes. The first
2332 time through just simplify constants against each other. */
2339 for (i
= 0; i
< n_ops
- 1; i
++)
2340 for (j
= i
+ 1; j
< n_ops
; j
++)
2342 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2343 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2345 if (lhs
!= 0 && rhs
!= 0
2346 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2348 enum rtx_code ncode
= PLUS
;
2354 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2356 else if (swap_commutative_operands_p (lhs
, rhs
))
2357 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2359 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2361 /* Reject "simplifications" that just wrap the two
2362 arguments in a CONST. Failure to do so can result
2363 in infinite recursion with simplify_binary_operation
2364 when it calls us to simplify CONST operations. */
2366 && ! (GET_CODE (tem
) == CONST
2367 && GET_CODE (XEXP (tem
, 0)) == ncode
2368 && XEXP (XEXP (tem
, 0), 0) == lhs
2369 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2370 /* Don't allow -x + -1 -> ~x simplifications in the
2371 first pass. This allows us the chance to combine
2372 the -1 with other constants. */
2374 && GET_CODE (tem
) == NOT
2375 && XEXP (tem
, 0) == rhs
))
2378 if (GET_CODE (tem
) == NEG
)
2379 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2380 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2381 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2385 ops
[j
].op
= NULL_RTX
;
2395 /* Pack all the operands to the lower-numbered entries. */
2396 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2401 /* Sort the operations based on swap_commutative_operands_p. */
2402 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2404 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2406 && GET_CODE (ops
[1].op
) == CONST_INT
2407 && CONSTANT_P (ops
[0].op
)
2409 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
2411 /* We suppressed creation of trivial CONST expressions in the
2412 combination loop to avoid recursion. Create one manually now.
2413 The combination loop should have ensured that there is exactly
2414 one CONST_INT, and the sort will have ensured that it is last
2415 in the array and that any other constant will be next-to-last. */
2418 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2419 && CONSTANT_P (ops
[n_ops
- 2].op
))
2421 rtx value
= ops
[n_ops
- 1].op
;
2422 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2423 value
= neg_const_int (mode
, value
);
2424 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2428 /* Count the number of CONSTs that we generated. */
2430 for (i
= 0; i
< n_ops
; i
++)
2431 if (GET_CODE (ops
[i
].op
) == CONST
)
2434 /* Give up if we didn't reduce the number of operands we had. Make
2435 sure we count a CONST as two operands. If we have the same
2436 number of operands, but have made more CONSTs than before, this
2437 is also an improvement, so accept it. */
2439 && (n_ops
+ n_consts
> input_ops
2440 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2443 /* Put a non-negated operand first. If there aren't any, make all
2444 operands positive and negate the whole thing later. */
2447 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2451 for (i
= 0; i
< n_ops
; i
++)
2463 /* Now make the result by performing the requested operations. */
2465 for (i
= 1; i
< n_ops
; i
++)
2466 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2467 mode
, result
, ops
[i
].op
);
2469 return negate
? gen_rtx_NEG (mode
, result
) : result
;
2472 /* Like simplify_binary_operation except used for relational operators.
2473 MODE is the mode of the operands, not that of the result. If MODE
2474 is VOIDmode, both operands must also be VOIDmode and we compare the
2475 operands in "infinite precision".
2477 If no simplification is possible, this function returns zero. Otherwise,
2478 it returns either const_true_rtx or const0_rtx. */
2481 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2484 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2489 if (mode
== VOIDmode
2490 && (GET_MODE (op0
) != VOIDmode
2491 || GET_MODE (op1
) != VOIDmode
))
2494 /* If op0 is a compare, extract the comparison arguments from it. */
2495 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2496 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2498 trueop0
= avoid_constant_pool_reference (op0
);
2499 trueop1
= avoid_constant_pool_reference (op1
);
2501 /* We can't simplify MODE_CC values since we don't know what the
2502 actual comparison is. */
2503 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2506 /* Make sure the constant is second. */
2507 if (swap_commutative_operands_p (trueop0
, trueop1
))
2509 tem
= op0
, op0
= op1
, op1
= tem
;
2510 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
2511 code
= swap_condition (code
);
2514 /* For integer comparisons of A and B maybe we can simplify A - B and can
2515 then simplify a comparison of that with zero. If A and B are both either
2516 a register or a CONST_INT, this can't help; testing for these cases will
2517 prevent infinite recursion here and speed things up.
2519 If CODE is an unsigned comparison, then we can never do this optimization,
2520 because it gives an incorrect result if the subtraction wraps around zero.
2521 ANSI C defines unsigned operations such that they never overflow, and
2522 thus such cases can not be ignored. */
2524 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2525 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
2526 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
2527 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2528 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2529 return simplify_relational_operation (signed_condition (code
),
2530 mode
, tem
, const0_rtx
);
2532 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2533 return const_true_rtx
;
2535 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2538 /* For modes without NaNs, if the two operands are equal, we know the
2539 result except if they have side-effects. */
2540 if (! HONOR_NANS (GET_MODE (trueop0
))
2541 && rtx_equal_p (trueop0
, trueop1
)
2542 && ! side_effects_p (trueop0
))
2543 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2545 /* If the operands are floating-point constants, see if we can fold
2547 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2548 && GET_CODE (trueop1
) == CONST_DOUBLE
2549 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2551 REAL_VALUE_TYPE d0
, d1
;
2553 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2554 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2556 /* Comparisons are unordered iff at least one of the values is NaN. */
2557 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2567 return const_true_rtx
;
2580 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2581 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2582 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2585 /* Otherwise, see if the operands are both integers. */
2586 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2587 && (GET_CODE (trueop0
) == CONST_DOUBLE
2588 || GET_CODE (trueop0
) == CONST_INT
)
2589 && (GET_CODE (trueop1
) == CONST_DOUBLE
2590 || GET_CODE (trueop1
) == CONST_INT
))
2592 int width
= GET_MODE_BITSIZE (mode
);
2593 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2594 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2596 /* Get the two words comprising each integer constant. */
2597 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2599 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2600 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2604 l0u
= l0s
= INTVAL (trueop0
);
2605 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2608 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2610 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2611 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2615 l1u
= l1s
= INTVAL (trueop1
);
2616 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2619 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2620 we have to sign or zero-extend the values. */
2621 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2623 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2624 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2626 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2627 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2629 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2630 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2632 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2633 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2635 equal
= (h0u
== h1u
&& l0u
== l1u
);
2636 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2637 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2638 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2639 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2642 /* Otherwise, there are some code-specific tests we can make. */
2648 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2653 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2654 return const_true_rtx
;
2658 /* Unsigned values are never negative. */
2659 if (trueop1
== const0_rtx
)
2660 return const_true_rtx
;
2664 if (trueop1
== const0_rtx
)
2669 /* Unsigned values are never greater than the largest
2671 if (GET_CODE (trueop1
) == CONST_INT
2672 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2673 && INTEGRAL_MODE_P (mode
))
2674 return const_true_rtx
;
2678 if (GET_CODE (trueop1
) == CONST_INT
2679 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2680 && INTEGRAL_MODE_P (mode
))
2685 /* Optimize abs(x) < 0.0. */
2686 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
2688 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2690 if (GET_CODE (tem
) == ABS
)
2696 /* Optimize abs(x) >= 0.0. */
2697 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
2699 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2701 if (GET_CODE (tem
) == ABS
)
2702 return const_true_rtx
;
2707 /* Optimize ! (abs(x) < 0.0). */
2708 if (trueop1
== CONST0_RTX (mode
))
2710 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2712 if (GET_CODE (tem
) == ABS
)
2713 return const_true_rtx
;
2724 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2730 return equal
? const_true_rtx
: const0_rtx
;
2733 return ! equal
? const_true_rtx
: const0_rtx
;
2736 return op0lt
? const_true_rtx
: const0_rtx
;
2739 return op1lt
? const_true_rtx
: const0_rtx
;
2741 return op0ltu
? const_true_rtx
: const0_rtx
;
2743 return op1ltu
? const_true_rtx
: const0_rtx
;
2746 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2749 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2751 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2753 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2755 return const_true_rtx
;
2763 /* Simplify CODE, an operation with result mode MODE and three operands,
2764 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2765 a constant. Return 0 if no simplifications is possible. */
2768 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
2769 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
2772 unsigned int width
= GET_MODE_BITSIZE (mode
);
2774 /* VOIDmode means "infinite" precision. */
2776 width
= HOST_BITS_PER_WIDE_INT
;
2782 if (GET_CODE (op0
) == CONST_INT
2783 && GET_CODE (op1
) == CONST_INT
2784 && GET_CODE (op2
) == CONST_INT
2785 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2786 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2788 /* Extracting a bit-field from a constant */
2789 HOST_WIDE_INT val
= INTVAL (op0
);
2791 if (BITS_BIG_ENDIAN
)
2792 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2793 - INTVAL (op2
) - INTVAL (op1
));
2795 val
>>= INTVAL (op2
);
2797 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2799 /* First zero-extend. */
2800 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2801 /* If desired, propagate sign bit. */
2802 if (code
== SIGN_EXTRACT
2803 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2804 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2807 /* Clear the bits that don't belong in our mode,
2808 unless they and our sign bit are all one.
2809 So we get either a reasonable negative value or a reasonable
2810 unsigned value for this mode. */
2811 if (width
< HOST_BITS_PER_WIDE_INT
2812 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2813 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2814 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2816 return GEN_INT (val
);
2821 if (GET_CODE (op0
) == CONST_INT
)
2822 return op0
!= const0_rtx
? op1
: op2
;
2824 /* Convert c ? a : a into "a". */
2825 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
2828 /* Convert a != b ? a : b into "a". */
2829 if (GET_CODE (op0
) == NE
2830 && ! side_effects_p (op0
)
2831 && ! HONOR_NANS (mode
)
2832 && ! HONOR_SIGNED_ZEROS (mode
)
2833 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
2834 && rtx_equal_p (XEXP (op0
, 1), op2
))
2835 || (rtx_equal_p (XEXP (op0
, 0), op2
)
2836 && rtx_equal_p (XEXP (op0
, 1), op1
))))
2839 /* Convert a == b ? a : b into "b". */
2840 if (GET_CODE (op0
) == EQ
2841 && ! side_effects_p (op0
)
2842 && ! HONOR_NANS (mode
)
2843 && ! HONOR_SIGNED_ZEROS (mode
)
2844 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
2845 && rtx_equal_p (XEXP (op0
, 1), op2
))
2846 || (rtx_equal_p (XEXP (op0
, 0), op2
)
2847 && rtx_equal_p (XEXP (op0
, 1), op1
))))
2850 if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2852 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2853 ? GET_MODE (XEXP (op0
, 1))
2854 : GET_MODE (XEXP (op0
, 0)));
2856 if (cmp_mode
== VOIDmode
)
2857 cmp_mode
= op0_mode
;
2858 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2859 XEXP (op0
, 0), XEXP (op0
, 1));
2861 /* See if any simplifications were possible. */
2862 if (temp
== const0_rtx
)
2864 else if (temp
== const_true_rtx
)
2869 /* Look for happy constants in op1 and op2. */
2870 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2872 HOST_WIDE_INT t
= INTVAL (op1
);
2873 HOST_WIDE_INT f
= INTVAL (op2
);
2875 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2876 code
= GET_CODE (op0
);
2877 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2880 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2888 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2894 if (GET_MODE (op0
) != mode
2895 || GET_MODE (op1
) != mode
2896 || !VECTOR_MODE_P (mode
))
2898 op2
= avoid_constant_pool_reference (op2
);
2899 if (GET_CODE (op2
) == CONST_INT
)
2901 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2902 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2903 int mask
= (1 << n_elts
) - 1;
2905 if (!(INTVAL (op2
) & mask
))
2907 if ((INTVAL (op2
) & mask
) == mask
)
2910 op0
= avoid_constant_pool_reference (op0
);
2911 op1
= avoid_constant_pool_reference (op1
);
2912 if (GET_CODE (op0
) == CONST_VECTOR
2913 && GET_CODE (op1
) == CONST_VECTOR
)
2915 rtvec v
= rtvec_alloc (n_elts
);
2918 for (i
= 0; i
< n_elts
; i
++)
2919 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
2920 ? CONST_VECTOR_ELT (op0
, i
)
2921 : CONST_VECTOR_ELT (op1
, i
));
2922 return gen_rtx_CONST_VECTOR (mode
, v
);
2934 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2935 Return 0 if no simplifications is possible. */
2937 simplify_subreg (enum machine_mode outermode
, rtx op
,
2938 enum machine_mode innermode
, unsigned int byte
)
2940 /* Little bit of sanity checking. */
2941 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2942 || innermode
== BLKmode
|| outermode
== BLKmode
)
2945 if (GET_MODE (op
) != innermode
2946 && GET_MODE (op
) != VOIDmode
)
2949 if (byte
% GET_MODE_SIZE (outermode
)
2950 || byte
>= GET_MODE_SIZE (innermode
))
2953 if (outermode
== innermode
&& !byte
)
2956 /* Simplify subregs of vector constants. */
2957 if (GET_CODE (op
) == CONST_VECTOR
)
2959 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (innermode
));
2960 const unsigned int offset
= byte
/ elt_size
;
2963 if (GET_MODE_INNER (innermode
) == outermode
)
2965 elt
= CONST_VECTOR_ELT (op
, offset
);
2967 /* ?? We probably don't need this copy_rtx because constants
2968 can be shared. ?? */
2970 return copy_rtx (elt
);
2972 else if (GET_MODE_INNER (innermode
) == GET_MODE_INNER (outermode
)
2973 && GET_MODE_SIZE (innermode
) > GET_MODE_SIZE (outermode
))
2975 return (gen_rtx_CONST_VECTOR
2977 gen_rtvec_v (GET_MODE_NUNITS (outermode
),
2978 &CONST_VECTOR_ELT (op
, offset
))));
2980 else if (GET_MODE_CLASS (outermode
) == MODE_INT
2981 && (GET_MODE_SIZE (outermode
) % elt_size
== 0))
2983 /* This happens when the target register size is smaller then
2984 the vector mode, and we synthesize operations with vectors
2985 of elements that are smaller than the register size. */
2986 HOST_WIDE_INT sum
= 0, high
= 0;
2987 unsigned n_elts
= (GET_MODE_SIZE (outermode
) / elt_size
);
2988 unsigned i
= BYTES_BIG_ENDIAN
? offset
: offset
+ n_elts
- 1;
2989 unsigned step
= BYTES_BIG_ENDIAN
? 1 : -1;
2990 int shift
= BITS_PER_UNIT
* elt_size
;
2991 unsigned HOST_WIDE_INT unit_mask
;
2993 unit_mask
= (unsigned HOST_WIDE_INT
) -1
2994 >> (sizeof (HOST_WIDE_INT
) * BITS_PER_UNIT
- shift
);
2996 for (; n_elts
--; i
+= step
)
2998 elt
= CONST_VECTOR_ELT (op
, i
);
2999 if (GET_CODE (elt
) == CONST_DOUBLE
3000 && GET_MODE_CLASS (GET_MODE (elt
)) == MODE_FLOAT
)
3002 elt
= gen_lowpart_common (int_mode_for_mode (GET_MODE (elt
)),
3007 if (GET_CODE (elt
) != CONST_INT
)
3009 /* Avoid overflow. */
3010 if (high
>> (HOST_BITS_PER_WIDE_INT
- shift
))
3012 high
= high
<< shift
| sum
>> (HOST_BITS_PER_WIDE_INT
- shift
);
3013 sum
= (sum
<< shift
) + (INTVAL (elt
) & unit_mask
);
3015 if (GET_MODE_BITSIZE (outermode
) <= HOST_BITS_PER_WIDE_INT
)
3016 return GEN_INT (trunc_int_for_mode (sum
, outermode
));
3017 else if (GET_MODE_BITSIZE (outermode
) == 2* HOST_BITS_PER_WIDE_INT
)
3018 return immed_double_const (sum
, high
, outermode
);
3022 else if (GET_MODE_CLASS (outermode
) == MODE_INT
3023 && (elt_size
% GET_MODE_SIZE (outermode
) == 0))
3025 enum machine_mode new_mode
3026 = int_mode_for_mode (GET_MODE_INNER (innermode
));
3027 int subbyte
= byte
% elt_size
;
3029 op
= simplify_subreg (new_mode
, op
, innermode
, byte
- subbyte
);
3032 return simplify_subreg (outermode
, op
, new_mode
, subbyte
);
3034 else if (GET_MODE_CLASS (outermode
) == MODE_INT
)
3035 /* This shouldn't happen, but let's not do anything stupid. */
3039 /* Attempt to simplify constant to non-SUBREG expression. */
3040 if (CONSTANT_P (op
))
3043 unsigned HOST_WIDE_INT val
= 0;
3045 if (VECTOR_MODE_P (outermode
))
3047 /* Construct a CONST_VECTOR from individual subregs. */
3048 enum machine_mode submode
= GET_MODE_INNER (outermode
);
3049 int subsize
= GET_MODE_UNIT_SIZE (outermode
);
3050 int i
, elts
= GET_MODE_NUNITS (outermode
);
3051 rtvec v
= rtvec_alloc (elts
);
3054 for (i
= 0; i
< elts
; i
++, byte
+= subsize
)
3056 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
3057 /* ??? It would be nice if we could actually make such subregs
3058 on targets that allow such relocations. */
3059 if (byte
>= GET_MODE_SIZE (innermode
))
3060 elt
= CONST0_RTX (submode
);
3062 elt
= simplify_subreg (submode
, op
, innermode
, byte
);
3065 RTVEC_ELT (v
, i
) = elt
;
3067 return gen_rtx_CONST_VECTOR (outermode
, v
);
3070 /* ??? This code is partly redundant with code below, but can handle
3071 the subregs of floats and similar corner cases.
3072 Later it we should move all simplification code here and rewrite
3073 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
3074 using SIMPLIFY_SUBREG. */
3075 if (subreg_lowpart_offset (outermode
, innermode
) == byte
3076 && GET_CODE (op
) != CONST_VECTOR
)
3078 rtx
new = gen_lowpart_if_possible (outermode
, op
);
3083 /* Similar comment as above apply here. */
3084 if (GET_MODE_SIZE (outermode
) == UNITS_PER_WORD
3085 && GET_MODE_SIZE (innermode
) > UNITS_PER_WORD
3086 && GET_MODE_CLASS (outermode
) == MODE_INT
)
3088 rtx
new = constant_subword (op
,
3089 (byte
/ UNITS_PER_WORD
),
3095 if (GET_MODE_CLASS (outermode
) != MODE_INT
3096 && GET_MODE_CLASS (outermode
) != MODE_CC
)
3098 enum machine_mode new_mode
= int_mode_for_mode (outermode
);
3100 if (new_mode
!= innermode
|| byte
!= 0)
3102 op
= simplify_subreg (new_mode
, op
, innermode
, byte
);
3105 return simplify_subreg (outermode
, op
, new_mode
, 0);
3109 offset
= byte
* BITS_PER_UNIT
;
3110 switch (GET_CODE (op
))
3113 if (GET_MODE (op
) != VOIDmode
)
3116 /* We can't handle this case yet. */
3117 if (GET_MODE_BITSIZE (outermode
) >= HOST_BITS_PER_WIDE_INT
)
3120 part
= offset
>= HOST_BITS_PER_WIDE_INT
;
3121 if ((BITS_PER_WORD
> HOST_BITS_PER_WIDE_INT
3122 && BYTES_BIG_ENDIAN
)
3123 || (BITS_PER_WORD
<= HOST_BITS_PER_WIDE_INT
3124 && WORDS_BIG_ENDIAN
))
3126 val
= part
? CONST_DOUBLE_HIGH (op
) : CONST_DOUBLE_LOW (op
);
3127 offset
%= HOST_BITS_PER_WIDE_INT
;
3129 /* We've already picked the word we want from a double, so
3130 pretend this is actually an integer. */
3131 innermode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
3135 if (GET_CODE (op
) == CONST_INT
)
3138 /* We don't handle synthesizing of non-integral constants yet. */
3139 if (GET_MODE_CLASS (outermode
) != MODE_INT
)
3142 if (BYTES_BIG_ENDIAN
|| WORDS_BIG_ENDIAN
)
3144 if (WORDS_BIG_ENDIAN
)
3145 offset
= (GET_MODE_BITSIZE (innermode
)
3146 - GET_MODE_BITSIZE (outermode
) - offset
);
3147 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
3148 && GET_MODE_SIZE (outermode
) < UNITS_PER_WORD
)
3149 offset
= (offset
+ BITS_PER_WORD
- GET_MODE_BITSIZE (outermode
)
3150 - 2 * (offset
% BITS_PER_WORD
));
3153 if (offset
>= HOST_BITS_PER_WIDE_INT
)
3154 return ((HOST_WIDE_INT
) val
< 0) ? constm1_rtx
: const0_rtx
;
3158 if (GET_MODE_BITSIZE (outermode
) < HOST_BITS_PER_WIDE_INT
)
3159 val
= trunc_int_for_mode (val
, outermode
);
3160 return GEN_INT (val
);
3167 /* Changing mode twice with SUBREG => just change it once,
3168 or not at all if changing back op starting mode. */
3169 if (GET_CODE (op
) == SUBREG
)
3171 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3172 int final_offset
= byte
+ SUBREG_BYTE (op
);
3175 if (outermode
== innermostmode
3176 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3177 return SUBREG_REG (op
);
3179 /* The SUBREG_BYTE represents offset, as if the value were stored
3180 in memory. Irritating exception is paradoxical subreg, where
3181 we define SUBREG_BYTE to be 0. On big endian machines, this
3182 value should be negative. For a moment, undo this exception. */
3183 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3185 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3186 if (WORDS_BIG_ENDIAN
)
3187 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3188 if (BYTES_BIG_ENDIAN
)
3189 final_offset
+= difference
% UNITS_PER_WORD
;
3191 if (SUBREG_BYTE (op
) == 0
3192 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3194 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3195 if (WORDS_BIG_ENDIAN
)
3196 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3197 if (BYTES_BIG_ENDIAN
)
3198 final_offset
+= difference
% UNITS_PER_WORD
;
3201 /* See whether resulting subreg will be paradoxical. */
3202 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3204 /* In nonparadoxical subregs we can't handle negative offsets. */
3205 if (final_offset
< 0)
3207 /* Bail out in case resulting subreg would be incorrect. */
3208 if (final_offset
% GET_MODE_SIZE (outermode
)
3209 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3215 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3217 /* In paradoxical subreg, see if we are still looking on lower part.
3218 If so, our SUBREG_BYTE will be 0. */
3219 if (WORDS_BIG_ENDIAN
)
3220 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3221 if (BYTES_BIG_ENDIAN
)
3222 offset
+= difference
% UNITS_PER_WORD
;
3223 if (offset
== final_offset
)
3229 /* Recurse for further possible simplifications. */
3230 new = simplify_subreg (outermode
, SUBREG_REG (op
),
3231 GET_MODE (SUBREG_REG (op
)),
3235 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3238 /* SUBREG of a hard register => just change the register number
3239 and/or mode. If the hard register is not valid in that mode,
3240 suppress this simplification. If the hard register is the stack,
3241 frame, or argument pointer, leave this as a SUBREG. */
3244 && (! REG_FUNCTION_VALUE_P (op
)
3245 || ! rtx_equal_function_value_matters
)
3246 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3247 #ifdef CANNOT_CHANGE_MODE_CLASS
3248 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3249 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3250 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3252 && ((reload_completed
&& !frame_pointer_needed
)
3253 || (REGNO (op
) != FRAME_POINTER_REGNUM
3254 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3255 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3258 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3259 && REGNO (op
) != ARG_POINTER_REGNUM
3261 && REGNO (op
) != STACK_POINTER_REGNUM
3262 && subreg_offset_representable_p (REGNO (op
), innermode
,
3265 rtx tem
= gen_rtx_SUBREG (outermode
, op
, byte
);
3266 int final_regno
= subreg_hard_regno (tem
, 0);
3268 /* ??? We do allow it if the current REG is not valid for
3269 its mode. This is a kludge to work around how float/complex
3270 arguments are passed on 32-bit SPARC and should be fixed. */
3271 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3272 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
3274 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3276 /* Propagate original regno. We don't have any way to specify
3277 the offset inside original regno, so do so only for lowpart.
3278 The information is used only by alias analysis that can not
3279 grog partial register anyway. */
3281 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3282 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3287 /* If we have a SUBREG of a register that we are replacing and we are
3288 replacing it with a MEM, make a new MEM and try replacing the
3289 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3290 or if we would be widening it. */
3292 if (GET_CODE (op
) == MEM
3293 && ! mode_dependent_address_p (XEXP (op
, 0))
3294 /* Allow splitting of volatile memory references in case we don't
3295 have instruction to move the whole thing. */
3296 && (! MEM_VOLATILE_P (op
)
3297 || ! have_insn_for (SET
, innermode
))
3298 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3299 return adjust_address_nv (op
, outermode
, byte
);
3301 /* Handle complex values represented as CONCAT
3302 of real and imaginary part. */
3303 if (GET_CODE (op
) == CONCAT
)
3305 int is_realpart
= byte
< (unsigned int) GET_MODE_UNIT_SIZE (innermode
);
3306 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
3307 unsigned int final_offset
;
3310 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
3311 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3314 /* We can at least simplify it by referring directly to the relevant part. */
3315 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3320 /* Make a SUBREG operation or equivalent if it folds. */
3323 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
3324 enum machine_mode innermode
, unsigned int byte
)
3327 /* Little bit of sanity checking. */
3328 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3329 || innermode
== BLKmode
|| outermode
== BLKmode
)
3332 if (GET_MODE (op
) != innermode
3333 && GET_MODE (op
) != VOIDmode
)
3336 if (byte
% GET_MODE_SIZE (outermode
)
3337 || byte
>= GET_MODE_SIZE (innermode
))
3340 if (GET_CODE (op
) == QUEUED
)
3343 new = simplify_subreg (outermode
, op
, innermode
, byte
);
3347 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
3350 return gen_rtx_SUBREG (outermode
, op
, byte
);
3352 /* Simplify X, an rtx expression.
3354 Return the simplified expression or NULL if no simplifications
3357 This is the preferred entry point into the simplification routines;
3358 however, we still allow passes to call the more specific routines.
3360 Right now GCC has three (yes, three) major bodies of RTL simplification
3361 code that need to be unified.
3363 1. fold_rtx in cse.c. This code uses various CSE specific
3364 information to aid in RTL simplification.
3366 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3367 it uses combine specific information to aid in RTL
3370 3. The routines in this file.
3373 Long term we want to only have one body of simplification code; to
3374 get to that state I recommend the following steps:
3376 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3377 which are not pass dependent state into these routines.
3379 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3380 use this routine whenever possible.
3382 3. Allow for pass dependent state to be provided to these
3383 routines and add simplifications based on the pass dependent
3384 state. Remove code from cse.c & combine.c that becomes
3387 It will take time, but ultimately the compiler will be easier to
3388 maintain and improve. It's totally silly that when we add a
3389 simplification that it needs to be added to 4 places (3 for RTL
3390 simplification and 1 for tree simplification. */
3393 simplify_rtx (rtx x
)
3395 enum rtx_code code
= GET_CODE (x
);
3396 enum machine_mode mode
= GET_MODE (x
);
3399 switch (GET_RTX_CLASS (code
))
3402 return simplify_unary_operation (code
, mode
,
3403 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3405 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3406 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
3408 /* Fall through.... */
3411 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3415 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3416 XEXP (x
, 0), XEXP (x
, 1),
3420 temp
= simplify_relational_operation (code
,
3421 ((GET_MODE (XEXP (x
, 0))
3423 ? GET_MODE (XEXP (x
, 0))
3424 : GET_MODE (XEXP (x
, 1))),
3425 XEXP (x
, 0), XEXP (x
, 1));
3426 #ifdef FLOAT_STORE_FLAG_VALUE
3427 if (temp
!= 0 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
3429 if (temp
== const0_rtx
)
3430 temp
= CONST0_RTX (mode
);
3432 temp
= CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode
),
3440 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
3441 GET_MODE (SUBREG_REG (x
)),
3443 if (code
== CONSTANT_P_RTX
)
3445 if (CONSTANT_P (XEXP (x
, 0)))
3453 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3454 if (GET_CODE (XEXP (x
, 0)) == HIGH
3455 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))