1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
56 static bool associative_constant_p (rtx
);
57 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 /* Negate a CONST_INT rtx, truncating (because a conversion from a
61 maximally negative number can overflow). */
63 neg_const_int (enum machine_mode mode
, rtx i
)
65 return gen_int_mode (- INTVAL (i
), mode
);
69 /* Make a binary operation by properly ordering the operands and
70 seeing if the expression folds. */
73 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
78 /* Put complex operands first and constants second if commutative. */
79 if (GET_RTX_CLASS (code
) == 'c'
80 && swap_commutative_operands_p (op0
, op1
))
81 tem
= op0
, op0
= op1
, op1
= tem
;
83 /* If this simplifies, do it. */
84 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
88 /* Handle addition and subtraction specially. Otherwise, just form
91 if (code
== PLUS
|| code
== MINUS
)
93 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
98 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
101 /* If X is a MEM referencing the constant pool, return the real value.
102 Otherwise return X. */
104 avoid_constant_pool_reference (rtx x
)
107 enum machine_mode cmode
;
109 switch (GET_CODE (x
))
115 /* Handle float extensions of constant pool references. */
117 c
= avoid_constant_pool_reference (tmp
);
118 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
122 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
123 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
133 /* Call target hook to avoid the effects of -fpic etc.... */
134 addr
= (*targetm
.delegitimize_address
) (addr
);
136 if (GET_CODE (addr
) == LO_SUM
)
137 addr
= XEXP (addr
, 1);
139 if (GET_CODE (addr
) != SYMBOL_REF
140 || ! CONSTANT_POOL_ADDRESS_P (addr
))
143 c
= get_pool_constant (addr
);
144 cmode
= get_pool_mode (addr
);
146 /* If we're accessing the constant in a different mode than it was
147 originally stored, attempt to fix that up via subreg simplifications.
148 If that fails we have no choice but to return the original memory. */
149 if (cmode
!= GET_MODE (x
))
151 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
158 /* Make a unary operation by first seeing if it folds and otherwise making
159 the specified operation. */
162 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
163 enum machine_mode op_mode
)
167 /* If this simplifies, use it. */
168 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
171 return gen_rtx_fmt_e (code
, mode
, op
);
174 /* Likewise for ternary operations. */
177 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
178 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
182 /* If this simplifies, use it. */
183 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
187 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
190 /* Likewise, for relational operations.
191 CMP_MODE specifies mode comparison is done in.
195 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
196 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
200 if (cmp_mode
== VOIDmode
)
201 cmp_mode
= GET_MODE (op0
);
202 if (cmp_mode
== VOIDmode
)
203 cmp_mode
= GET_MODE (op1
);
205 if (cmp_mode
!= VOIDmode
)
207 tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
);
211 #ifdef FLOAT_STORE_FLAG_VALUE
212 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
215 if (tem
== const0_rtx
)
216 return CONST0_RTX (mode
);
217 if (tem
!= const_true_rtx
)
219 val
= FLOAT_STORE_FLAG_VALUE (mode
);
220 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
227 /* For the following tests, ensure const0_rtx is op1. */
228 if (swap_commutative_operands_p (op0
, op1
)
229 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
230 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
232 /* If op0 is a compare, extract the comparison arguments from it. */
233 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
234 return simplify_gen_relational (code
, mode
, VOIDmode
,
235 XEXP (op0
, 0), XEXP (op0
, 1));
237 /* If op0 is a comparison, extract the comparison arguments form it. */
238 if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && op1
== const0_rtx
)
242 if (GET_MODE (op0
) == mode
)
244 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
245 XEXP (op0
, 0), XEXP (op0
, 1));
249 enum rtx_code
new = reversed_comparison_code (op0
, NULL_RTX
);
251 return simplify_gen_relational (new, mode
, VOIDmode
,
252 XEXP (op0
, 0), XEXP (op0
, 1));
256 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
259 /* Replace all occurrences of OLD in X with NEW and try to simplify the
260 resulting RTX. Return a new RTX which is as simplified as possible. */
263 simplify_replace_rtx (rtx x
, rtx old
, rtx
new)
265 enum rtx_code code
= GET_CODE (x
);
266 enum machine_mode mode
= GET_MODE (x
);
267 enum machine_mode op_mode
;
270 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
271 to build a new expression substituting recursively. If we can't do
272 anything, return our input. */
277 switch (GET_RTX_CLASS (code
))
281 op_mode
= GET_MODE (op0
);
282 op0
= simplify_replace_rtx (op0
, old
, new);
283 if (op0
== XEXP (x
, 0))
285 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
289 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
290 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
291 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
293 return simplify_gen_binary (code
, mode
, op0
, op1
);
298 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
299 op0
= simplify_replace_rtx (op0
, old
, new);
300 op1
= simplify_replace_rtx (op1
, old
, new);
301 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
303 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
308 op_mode
= GET_MODE (op0
);
309 op0
= simplify_replace_rtx (op0
, old
, new);
310 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
311 op2
= simplify_replace_rtx (XEXP (x
, 2), old
, new);
312 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
314 if (op_mode
== VOIDmode
)
315 op_mode
= GET_MODE (op0
);
316 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
319 /* The only case we try to handle is a SUBREG. */
322 op0
= simplify_replace_rtx (SUBREG_REG (x
), old
, new);
323 if (op0
== SUBREG_REG (x
))
325 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
326 GET_MODE (SUBREG_REG (x
)),
328 return op0
? op0
: x
;
335 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
336 if (op0
== XEXP (x
, 0))
338 return replace_equiv_address_nv (x
, op0
);
340 else if (code
== LO_SUM
)
342 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
343 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
349 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
351 return gen_rtx_LO_SUM (mode
, op0
, op1
);
353 else if (code
== REG
)
355 if (REG_P (old
) && REGNO (x
) == REGNO (old
))
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
371 rtx op
, enum machine_mode op_mode
)
373 unsigned int width
= GET_MODE_BITSIZE (mode
);
374 rtx trueop
= avoid_constant_pool_reference (op
);
376 if (code
== VEC_DUPLICATE
)
378 if (!VECTOR_MODE_P (mode
))
380 if (GET_MODE (trueop
) != VOIDmode
381 && !VECTOR_MODE_P (GET_MODE (trueop
))
382 && GET_MODE_INNER (mode
) != GET_MODE (trueop
))
384 if (GET_MODE (trueop
) != VOIDmode
385 && VECTOR_MODE_P (GET_MODE (trueop
))
386 && GET_MODE_INNER (mode
) != GET_MODE_INNER (GET_MODE (trueop
)))
388 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
389 || GET_CODE (trueop
) == CONST_VECTOR
)
391 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
392 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
393 rtvec v
= rtvec_alloc (n_elts
);
396 if (GET_CODE (trueop
) != CONST_VECTOR
)
397 for (i
= 0; i
< n_elts
; i
++)
398 RTVEC_ELT (v
, i
) = trueop
;
401 enum machine_mode inmode
= GET_MODE (trueop
);
402 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
403 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
405 if (in_n_elts
>= n_elts
|| n_elts
% in_n_elts
)
407 for (i
= 0; i
< n_elts
; i
++)
408 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
410 return gen_rtx_CONST_VECTOR (mode
, v
);
413 else if (GET_CODE (op
) == CONST
)
414 return simplify_unary_operation (code
, mode
, XEXP (op
, 0), op_mode
);
416 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
418 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
419 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
420 enum machine_mode opmode
= GET_MODE (trueop
);
421 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
422 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
423 rtvec v
= rtvec_alloc (n_elts
);
426 if (op_n_elts
!= n_elts
)
429 for (i
= 0; i
< n_elts
; i
++)
431 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
432 CONST_VECTOR_ELT (trueop
, i
),
433 GET_MODE_INNER (opmode
));
436 RTVEC_ELT (v
, i
) = x
;
438 return gen_rtx_CONST_VECTOR (mode
, v
);
441 /* The order of these tests is critical so that, for example, we don't
442 check the wrong mode (input vs. output) for a conversion operation,
443 such as FIX. At some point, this should be simplified. */
445 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
446 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
448 HOST_WIDE_INT hv
, lv
;
451 if (GET_CODE (trueop
) == CONST_INT
)
452 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
454 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
456 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
457 d
= real_value_truncate (mode
, d
);
458 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
460 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
461 && (GET_CODE (trueop
) == CONST_DOUBLE
462 || GET_CODE (trueop
) == CONST_INT
))
464 HOST_WIDE_INT hv
, lv
;
467 if (GET_CODE (trueop
) == CONST_INT
)
468 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
470 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
472 if (op_mode
== VOIDmode
)
474 /* We don't know how to interpret negative-looking numbers in
475 this case, so don't try to fold those. */
479 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
482 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
484 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
485 d
= real_value_truncate (mode
, d
);
486 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
489 if (GET_CODE (trueop
) == CONST_INT
490 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
492 HOST_WIDE_INT arg0
= INTVAL (trueop
);
506 val
= (arg0
>= 0 ? arg0
: - arg0
);
510 /* Don't use ffs here. Instead, get low order bit and then its
511 number. If arg0 is zero, this will return 0, as desired. */
512 arg0
&= GET_MODE_MASK (mode
);
513 val
= exact_log2 (arg0
& (- arg0
)) + 1;
517 arg0
&= GET_MODE_MASK (mode
);
518 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
521 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
525 arg0
&= GET_MODE_MASK (mode
);
528 /* Even if the value at zero is undefined, we have to come
529 up with some replacement. Seems good enough. */
530 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
531 val
= GET_MODE_BITSIZE (mode
);
534 val
= exact_log2 (arg0
& -arg0
);
538 arg0
&= GET_MODE_MASK (mode
);
541 val
++, arg0
&= arg0
- 1;
545 arg0
&= GET_MODE_MASK (mode
);
548 val
++, arg0
&= arg0
- 1;
557 /* When zero-extending a CONST_INT, we need to know its
559 if (op_mode
== VOIDmode
)
561 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
563 /* If we were really extending the mode,
564 we would have to distinguish between zero-extension
565 and sign-extension. */
566 if (width
!= GET_MODE_BITSIZE (op_mode
))
570 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
571 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
577 if (op_mode
== VOIDmode
)
579 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
581 /* If we were really extending the mode,
582 we would have to distinguish between zero-extension
583 and sign-extension. */
584 if (width
!= GET_MODE_BITSIZE (op_mode
))
588 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
591 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
593 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
594 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
611 val
= trunc_int_for_mode (val
, mode
);
613 return GEN_INT (val
);
616 /* We can do some operations on integer CONST_DOUBLEs. Also allow
617 for a DImode operation on a CONST_INT. */
618 else if (GET_MODE (trueop
) == VOIDmode
619 && width
<= HOST_BITS_PER_WIDE_INT
* 2
620 && (GET_CODE (trueop
) == CONST_DOUBLE
621 || GET_CODE (trueop
) == CONST_INT
))
623 unsigned HOST_WIDE_INT l1
, lv
;
624 HOST_WIDE_INT h1
, hv
;
626 if (GET_CODE (trueop
) == CONST_DOUBLE
)
627 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
629 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
639 neg_double (l1
, h1
, &lv
, &hv
);
644 neg_double (l1
, h1
, &lv
, &hv
);
656 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
659 lv
= exact_log2 (l1
& -l1
) + 1;
665 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
666 - HOST_BITS_PER_WIDE_INT
;
668 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
669 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
670 lv
= GET_MODE_BITSIZE (mode
);
676 lv
= exact_log2 (l1
& -l1
);
678 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
679 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
680 lv
= GET_MODE_BITSIZE (mode
);
703 /* This is just a change-of-mode, so do nothing. */
708 if (op_mode
== VOIDmode
)
711 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
715 lv
= l1
& GET_MODE_MASK (op_mode
);
719 if (op_mode
== VOIDmode
720 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
724 lv
= l1
& GET_MODE_MASK (op_mode
);
725 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
726 && (lv
& ((HOST_WIDE_INT
) 1
727 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
728 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
730 hv
= HWI_SIGN_EXTEND (lv
);
741 return immed_double_const (lv
, hv
, mode
);
744 else if (GET_CODE (trueop
) == CONST_DOUBLE
745 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
747 REAL_VALUE_TYPE d
, t
;
748 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
753 if (HONOR_SNANS (mode
) && real_isnan (&d
))
755 real_sqrt (&t
, mode
, &d
);
759 d
= REAL_VALUE_ABS (d
);
762 d
= REAL_VALUE_NEGATE (d
);
765 d
= real_value_truncate (mode
, d
);
768 /* All this does is change the mode. */
771 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
777 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
780 else if (GET_CODE (trueop
) == CONST_DOUBLE
781 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
782 && GET_MODE_CLASS (mode
) == MODE_INT
783 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
785 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
786 operators are intentionally left unspecified (to ease implementation
787 by target backends), for consistency, this routine implements the
788 same semantics for constant folding as used by the middle-end. */
790 HOST_WIDE_INT xh
, xl
, th
, tl
;
791 REAL_VALUE_TYPE x
, t
;
792 REAL_VALUE_FROM_CONST_DOUBLE (x
, trueop
);
796 if (REAL_VALUE_ISNAN (x
))
799 /* Test against the signed upper bound. */
800 if (width
> HOST_BITS_PER_WIDE_INT
)
802 th
= ((unsigned HOST_WIDE_INT
) 1
803 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
809 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
811 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
812 if (REAL_VALUES_LESS (t
, x
))
819 /* Test against the signed lower bound. */
820 if (width
> HOST_BITS_PER_WIDE_INT
)
822 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
828 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
830 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
831 if (REAL_VALUES_LESS (x
, t
))
837 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
841 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
844 /* Test against the unsigned upper bound. */
845 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
850 else if (width
>= HOST_BITS_PER_WIDE_INT
)
852 th
= ((unsigned HOST_WIDE_INT
) 1
853 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
859 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
861 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
862 if (REAL_VALUES_LESS (t
, x
))
869 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
875 return immed_double_const (xl
, xh
, mode
);
878 /* This was formerly used only for non-IEEE float.
879 eggert@twinsun.com says it is safe for IEEE also. */
882 enum rtx_code reversed
;
885 /* There are some simplifications we can do even if the operands
890 /* (not (not X)) == X. */
891 if (GET_CODE (op
) == NOT
)
894 /* (not (eq X Y)) == (ne X Y), etc. */
895 if (GET_RTX_CLASS (GET_CODE (op
)) == '<'
896 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
897 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
899 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
900 XEXP (op
, 0), XEXP (op
, 1));
902 /* (not (plus X -1)) can become (neg X). */
903 if (GET_CODE (op
) == PLUS
904 && XEXP (op
, 1) == constm1_rtx
)
905 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
907 /* Similarly, (not (neg X)) is (plus X -1). */
908 if (GET_CODE (op
) == NEG
)
909 return plus_constant (XEXP (op
, 0), -1);
911 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
912 if (GET_CODE (op
) == XOR
913 && GET_CODE (XEXP (op
, 1)) == CONST_INT
914 && (temp
= simplify_unary_operation (NOT
, mode
,
917 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
920 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
921 operands other than 1, but that is not valid. We could do a
922 similar simplification for (not (lshiftrt C X)) where C is
923 just the sign bit, but this doesn't seem common enough to
925 if (GET_CODE (op
) == ASHIFT
926 && XEXP (op
, 0) == const1_rtx
)
928 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
929 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
932 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
933 by reversing the comparison code if valid. */
934 if (STORE_FLAG_VALUE
== -1
935 && GET_RTX_CLASS (GET_CODE (op
)) == '<'
936 && (reversed
= reversed_comparison_code (op
, NULL_RTX
))
938 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
939 XEXP (op
, 0), XEXP (op
, 1));
941 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
942 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
943 so we can perform the above simplification. */
945 if (STORE_FLAG_VALUE
== -1
946 && GET_CODE (op
) == ASHIFTRT
947 && GET_CODE (XEXP (op
, 1)) == CONST_INT
948 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
949 return simplify_gen_relational (GE
, mode
, VOIDmode
,
950 XEXP (op
, 0), const0_rtx
);
955 /* (neg (neg X)) == X. */
956 if (GET_CODE (op
) == NEG
)
959 /* (neg (plus X 1)) can become (not X). */
960 if (GET_CODE (op
) == PLUS
961 && XEXP (op
, 1) == const1_rtx
)
962 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
964 /* Similarly, (neg (not X)) is (plus X 1). */
965 if (GET_CODE (op
) == NOT
)
966 return plus_constant (XEXP (op
, 0), 1);
968 /* (neg (minus X Y)) can become (minus Y X). This transformation
969 isn't safe for modes with signed zeros, since if X and Y are
970 both +0, (minus Y X) is the same as (minus X Y). If the
971 rounding mode is towards +infinity (or -infinity) then the two
972 expressions will be rounded differently. */
973 if (GET_CODE (op
) == MINUS
974 && !HONOR_SIGNED_ZEROS (mode
)
975 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
976 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1),
979 if (GET_CODE (op
) == PLUS
980 && !HONOR_SIGNED_ZEROS (mode
)
981 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
983 /* (neg (plus A C)) is simplified to (minus -C A). */
984 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
985 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
987 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1),
990 return simplify_gen_binary (MINUS
, mode
, temp
,
994 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
995 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
996 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
999 /* (neg (mult A B)) becomes (mult (neg A) B).
1000 This works even for floating-point values. */
1001 if (GET_CODE (op
) == MULT
1002 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1004 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1005 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
1008 /* NEG commutes with ASHIFT since it is multiplication. Only do
1009 this if we can then eliminate the NEG (e.g., if the operand
1011 if (GET_CODE (op
) == ASHIFT
)
1013 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0),
1016 return simplify_gen_binary (ASHIFT
, mode
, temp
,
1023 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1024 becomes just the MINUS if its mode is MODE. This allows
1025 folding switch statements on machines using casesi (such as
1027 if (GET_CODE (op
) == TRUNCATE
1028 && GET_MODE (XEXP (op
, 0)) == mode
1029 && GET_CODE (XEXP (op
, 0)) == MINUS
1030 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1031 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1032 return XEXP (op
, 0);
1034 /* Check for a sign extension of a subreg of a promoted
1035 variable, where the promotion is sign-extended, and the
1036 target mode is the same as the variable's promotion. */
1037 if (GET_CODE (op
) == SUBREG
1038 && SUBREG_PROMOTED_VAR_P (op
)
1039 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1040 && GET_MODE (XEXP (op
, 0)) == mode
)
1041 return XEXP (op
, 0);
1043 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1044 if (! POINTERS_EXTEND_UNSIGNED
1045 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1047 || (GET_CODE (op
) == SUBREG
1048 && GET_CODE (SUBREG_REG (op
)) == REG
1049 && REG_POINTER (SUBREG_REG (op
))
1050 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1051 return convert_memory_address (Pmode
, op
);
1056 /* Check for a zero extension of a subreg of a promoted
1057 variable, where the promotion is zero-extended, and the
1058 target mode is the same as the variable's promotion. */
1059 if (GET_CODE (op
) == SUBREG
1060 && SUBREG_PROMOTED_VAR_P (op
)
1061 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1062 && GET_MODE (XEXP (op
, 0)) == mode
)
1063 return XEXP (op
, 0);
1065 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1066 if (POINTERS_EXTEND_UNSIGNED
> 0
1067 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1069 || (GET_CODE (op
) == SUBREG
1070 && GET_CODE (SUBREG_REG (op
)) == REG
1071 && REG_POINTER (SUBREG_REG (op
))
1072 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1073 return convert_memory_address (Pmode
, op
);
1085 /* Subroutine of simplify_associative_operation. Return true if rtx OP
1086 is a suitable integer or floating point immediate constant. */
1088 associative_constant_p (rtx op
)
1090 if (GET_CODE (op
) == CONST_INT
1091 || GET_CODE (op
) == CONST_DOUBLE
)
1093 op
= avoid_constant_pool_reference (op
);
1094 return GET_CODE (op
) == CONST_INT
1095 || GET_CODE (op
) == CONST_DOUBLE
;
1098 /* Subroutine of simplify_binary_operation to simplify an associative
1099 binary operation CODE with result mode MODE, operating on OP0 and OP1.
1100 Return 0 if no simplification is possible. */
1102 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1107 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
1108 if (GET_CODE (op0
) == code
1109 && associative_constant_p (op1
)
1110 && associative_constant_p (XEXP (op0
, 1)))
1112 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1115 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1118 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
1119 if (GET_CODE (op0
) == code
1120 && GET_CODE (op1
) == code
1121 && associative_constant_p (XEXP (op0
, 1))
1122 && associative_constant_p (XEXP (op1
, 1)))
1124 rtx c
= simplify_binary_operation (code
, mode
,
1125 XEXP (op0
, 1), XEXP (op1
, 1));
1128 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1129 return simplify_gen_binary (code
, mode
, tem
, c
);
1132 /* Canonicalize (x op c) op y as (x op y) op c. */
1133 if (GET_CODE (op0
) == code
1134 && associative_constant_p (XEXP (op0
, 1)))
1136 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1137 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1140 /* Canonicalize x op (y op c) as (x op y) op c. */
1141 if (GET_CODE (op1
) == code
1142 && associative_constant_p (XEXP (op1
, 1)))
1144 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1145 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1151 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1152 and OP1. Return 0 if no simplification is possible.
1154 Don't use this for relational operations such as EQ or LT.
1155 Use simplify_relational_operation instead. */
1157 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1160 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
1162 unsigned int width
= GET_MODE_BITSIZE (mode
);
1164 rtx trueop0
= avoid_constant_pool_reference (op0
);
1165 rtx trueop1
= avoid_constant_pool_reference (op1
);
1167 /* Relational operations don't work here. We must know the mode
1168 of the operands in order to do the comparison correctly.
1169 Assuming a full word can give incorrect results.
1170 Consider comparing 128 with -128 in QImode. */
1172 if (GET_RTX_CLASS (code
) == '<')
1175 /* Make sure the constant is second. */
1176 if (GET_RTX_CLASS (code
) == 'c'
1177 && swap_commutative_operands_p (trueop0
, trueop1
))
1179 tem
= op0
, op0
= op1
, op1
= tem
;
1180 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
1183 if (VECTOR_MODE_P (mode
)
1184 && GET_CODE (trueop0
) == CONST_VECTOR
1185 && GET_CODE (trueop1
) == CONST_VECTOR
)
1187 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1188 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1189 enum machine_mode op0mode
= GET_MODE (trueop0
);
1190 int op0_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op0mode
));
1191 unsigned op0_n_elts
= (GET_MODE_SIZE (op0mode
) / op0_elt_size
);
1192 enum machine_mode op1mode
= GET_MODE (trueop1
);
1193 int op1_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op1mode
));
1194 unsigned op1_n_elts
= (GET_MODE_SIZE (op1mode
) / op1_elt_size
);
1195 rtvec v
= rtvec_alloc (n_elts
);
1198 if (op0_n_elts
!= n_elts
|| op1_n_elts
!= n_elts
)
1201 for (i
= 0; i
< n_elts
; i
++)
1203 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
1204 CONST_VECTOR_ELT (trueop0
, i
),
1205 CONST_VECTOR_ELT (trueop1
, i
));
1208 RTVEC_ELT (v
, i
) = x
;
1211 return gen_rtx_CONST_VECTOR (mode
, v
);
1214 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1215 && GET_CODE (trueop0
) == CONST_DOUBLE
1216 && GET_CODE (trueop1
) == CONST_DOUBLE
1217 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
1219 REAL_VALUE_TYPE f0
, f1
, value
;
1221 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
1222 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
1223 f0
= real_value_truncate (mode
, f0
);
1224 f1
= real_value_truncate (mode
, f1
);
1226 if (HONOR_SNANS (mode
)
1227 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
1231 && REAL_VALUES_EQUAL (f1
, dconst0
)
1232 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
1235 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
1237 value
= real_value_truncate (mode
, value
);
1238 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
1241 /* We can fold some multi-word operations. */
1242 if (GET_MODE_CLASS (mode
) == MODE_INT
1243 && width
== HOST_BITS_PER_WIDE_INT
* 2
1244 && (GET_CODE (trueop0
) == CONST_DOUBLE
1245 || GET_CODE (trueop0
) == CONST_INT
)
1246 && (GET_CODE (trueop1
) == CONST_DOUBLE
1247 || GET_CODE (trueop1
) == CONST_INT
))
1249 unsigned HOST_WIDE_INT l1
, l2
, lv
;
1250 HOST_WIDE_INT h1
, h2
, hv
;
1252 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1253 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
1255 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
1257 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1258 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
1260 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
1265 /* A - B == A + (-B). */
1266 neg_double (l2
, h2
, &lv
, &hv
);
1269 /* Fall through.... */
1272 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1276 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1279 case DIV
: case MOD
: case UDIV
: case UMOD
:
1280 /* We'd need to include tree.h to do this and it doesn't seem worth
1285 lv
= l1
& l2
, hv
= h1
& h2
;
1289 lv
= l1
| l2
, hv
= h1
| h2
;
1293 lv
= l1
^ l2
, hv
= h1
^ h2
;
1299 && ((unsigned HOST_WIDE_INT
) l1
1300 < (unsigned HOST_WIDE_INT
) l2
)))
1309 && ((unsigned HOST_WIDE_INT
) l1
1310 > (unsigned HOST_WIDE_INT
) l2
)))
1317 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1319 && ((unsigned HOST_WIDE_INT
) l1
1320 < (unsigned HOST_WIDE_INT
) l2
)))
1327 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1329 && ((unsigned HOST_WIDE_INT
) l1
1330 > (unsigned HOST_WIDE_INT
) l2
)))
1336 case LSHIFTRT
: case ASHIFTRT
:
1338 case ROTATE
: case ROTATERT
:
1339 #ifdef SHIFT_COUNT_TRUNCATED
1340 if (SHIFT_COUNT_TRUNCATED
)
1341 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1344 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1347 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1348 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1350 else if (code
== ASHIFT
)
1351 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1352 else if (code
== ROTATE
)
1353 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1354 else /* code == ROTATERT */
1355 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1362 return immed_double_const (lv
, hv
, mode
);
1365 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1366 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1368 /* Even if we can't compute a constant result,
1369 there are some cases worth simplifying. */
1374 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1375 when x is NaN, infinite, or finite and nonzero. They aren't
1376 when x is -0 and the rounding mode is not towards -infinity,
1377 since (-0) + 0 is then 0. */
1378 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1381 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1382 transformations are safe even for IEEE. */
1383 if (GET_CODE (op0
) == NEG
)
1384 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1385 else if (GET_CODE (op1
) == NEG
)
1386 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1388 /* (~a) + 1 -> -a */
1389 if (INTEGRAL_MODE_P (mode
)
1390 && GET_CODE (op0
) == NOT
1391 && trueop1
== const1_rtx
)
1392 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1394 /* Handle both-operands-constant cases. We can only add
1395 CONST_INTs to constants since the sum of relocatable symbols
1396 can't be handled by most assemblers. Don't add CONST_INT
1397 to CONST_INT since overflow won't be computed properly if wider
1398 than HOST_BITS_PER_WIDE_INT. */
1400 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1401 && GET_CODE (op1
) == CONST_INT
)
1402 return plus_constant (op0
, INTVAL (op1
));
1403 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1404 && GET_CODE (op0
) == CONST_INT
)
1405 return plus_constant (op1
, INTVAL (op0
));
1407 /* See if this is something like X * C - X or vice versa or
1408 if the multiplication is written as a shift. If so, we can
1409 distribute and make a new multiply, shift, or maybe just
1410 have X (if C is 2 in the example above). But don't make
1411 real multiply if we didn't have one before. */
1413 if (! FLOAT_MODE_P (mode
))
1415 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1416 rtx lhs
= op0
, rhs
= op1
;
1419 if (GET_CODE (lhs
) == NEG
)
1420 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1421 else if (GET_CODE (lhs
) == MULT
1422 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1424 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1427 else if (GET_CODE (lhs
) == ASHIFT
1428 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1429 && INTVAL (XEXP (lhs
, 1)) >= 0
1430 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1432 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1433 lhs
= XEXP (lhs
, 0);
1436 if (GET_CODE (rhs
) == NEG
)
1437 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1438 else if (GET_CODE (rhs
) == MULT
1439 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1441 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1444 else if (GET_CODE (rhs
) == ASHIFT
1445 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1446 && INTVAL (XEXP (rhs
, 1)) >= 0
1447 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1449 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1450 rhs
= XEXP (rhs
, 0);
1453 if (rtx_equal_p (lhs
, rhs
))
1455 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1456 GEN_INT (coeff0
+ coeff1
));
1457 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1461 /* If one of the operands is a PLUS or a MINUS, see if we can
1462 simplify this by the associative law.
1463 Don't use the associative law for floating point.
1464 The inaccuracy makes it nonassociative,
1465 and subtle programs can break if operations are associated. */
1467 if (INTEGRAL_MODE_P (mode
)
1468 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1469 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1470 || (GET_CODE (op0
) == CONST
1471 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1472 || (GET_CODE (op1
) == CONST
1473 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1474 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1477 /* Reassociate floating point addition only when the user
1478 specifies unsafe math optimizations. */
1479 if (FLOAT_MODE_P (mode
)
1480 && flag_unsafe_math_optimizations
)
1482 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1490 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1491 using cc0, in which case we want to leave it as a COMPARE
1492 so we can distinguish it from a register-register-copy.
1494 In IEEE floating point, x-0 is not the same as x. */
1496 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1497 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1498 && trueop1
== CONST0_RTX (mode
))
1502 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1503 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1504 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1505 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1507 rtx xop00
= XEXP (op0
, 0);
1508 rtx xop10
= XEXP (op1
, 0);
1511 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1513 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1514 && GET_MODE (xop00
) == GET_MODE (xop10
)
1515 && REGNO (xop00
) == REGNO (xop10
)
1516 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1517 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1524 /* We can't assume x-x is 0 even with non-IEEE floating point,
1525 but since it is zero except in very strange circumstances, we
1526 will treat it as zero with -funsafe-math-optimizations. */
1527 if (rtx_equal_p (trueop0
, trueop1
)
1528 && ! side_effects_p (op0
)
1529 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1530 return CONST0_RTX (mode
);
1532 /* Change subtraction from zero into negation. (0 - x) is the
1533 same as -x when x is NaN, infinite, or finite and nonzero.
1534 But if the mode has signed zeros, and does not round towards
1535 -infinity, then 0 - 0 is 0, not -0. */
1536 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1537 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1539 /* (-1 - a) is ~a. */
1540 if (trueop0
== constm1_rtx
)
1541 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1543 /* Subtracting 0 has no effect unless the mode has signed zeros
1544 and supports rounding towards -infinity. In such a case,
1546 if (!(HONOR_SIGNED_ZEROS (mode
)
1547 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1548 && trueop1
== CONST0_RTX (mode
))
1551 /* See if this is something like X * C - X or vice versa or
1552 if the multiplication is written as a shift. If so, we can
1553 distribute and make a new multiply, shift, or maybe just
1554 have X (if C is 2 in the example above). But don't make
1555 real multiply if we didn't have one before. */
1557 if (! FLOAT_MODE_P (mode
))
1559 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1560 rtx lhs
= op0
, rhs
= op1
;
1563 if (GET_CODE (lhs
) == NEG
)
1564 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1565 else if (GET_CODE (lhs
) == MULT
1566 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1568 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1571 else if (GET_CODE (lhs
) == ASHIFT
1572 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1573 && INTVAL (XEXP (lhs
, 1)) >= 0
1574 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1576 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1577 lhs
= XEXP (lhs
, 0);
1580 if (GET_CODE (rhs
) == NEG
)
1581 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1582 else if (GET_CODE (rhs
) == MULT
1583 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1585 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1588 else if (GET_CODE (rhs
) == ASHIFT
1589 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1590 && INTVAL (XEXP (rhs
, 1)) >= 0
1591 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1593 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1594 rhs
= XEXP (rhs
, 0);
1597 if (rtx_equal_p (lhs
, rhs
))
1599 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1600 GEN_INT (coeff0
- coeff1
));
1601 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1605 /* (a - (-b)) -> (a + b). True even for IEEE. */
1606 if (GET_CODE (op1
) == NEG
)
1607 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1609 /* (-x - c) may be simplified as (-c - x). */
1610 if (GET_CODE (op0
) == NEG
1611 && (GET_CODE (op1
) == CONST_INT
1612 || GET_CODE (op1
) == CONST_DOUBLE
))
1614 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1616 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1619 /* If one of the operands is a PLUS or a MINUS, see if we can
1620 simplify this by the associative law.
1621 Don't use the associative law for floating point.
1622 The inaccuracy makes it nonassociative,
1623 and subtle programs can break if operations are associated. */
1625 if (INTEGRAL_MODE_P (mode
)
1626 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1627 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1628 || (GET_CODE (op0
) == CONST
1629 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1630 || (GET_CODE (op1
) == CONST
1631 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1632 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1635 /* Don't let a relocatable value get a negative coeff. */
1636 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1637 return simplify_gen_binary (PLUS
, mode
,
1639 neg_const_int (mode
, op1
));
1641 /* (x - (x & y)) -> (x & ~y) */
1642 if (GET_CODE (op1
) == AND
)
1644 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1646 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1647 GET_MODE (XEXP (op1
, 1)));
1648 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1650 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1652 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1653 GET_MODE (XEXP (op1
, 0)));
1654 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1660 if (trueop1
== constm1_rtx
)
1661 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1663 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1664 x is NaN, since x * 0 is then also NaN. Nor is it valid
1665 when the mode has signed zeros, since multiplying a negative
1666 number by 0 will give -0, not 0. */
1667 if (!HONOR_NANS (mode
)
1668 && !HONOR_SIGNED_ZEROS (mode
)
1669 && trueop1
== CONST0_RTX (mode
)
1670 && ! side_effects_p (op0
))
1673 /* In IEEE floating point, x*1 is not equivalent to x for
1675 if (!HONOR_SNANS (mode
)
1676 && trueop1
== CONST1_RTX (mode
))
1679 /* Convert multiply by constant power of two into shift unless
1680 we are still generating RTL. This test is a kludge. */
1681 if (GET_CODE (trueop1
) == CONST_INT
1682 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1683 /* If the mode is larger than the host word size, and the
1684 uppermost bit is set, then this isn't a power of two due
1685 to implicit sign extension. */
1686 && (width
<= HOST_BITS_PER_WIDE_INT
1687 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1688 && ! rtx_equal_function_value_matters
)
1689 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1691 /* x*2 is x+x and x*(-1) is -x */
1692 if (GET_CODE (trueop1
) == CONST_DOUBLE
1693 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1694 && GET_MODE (op0
) == mode
)
1697 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1699 if (REAL_VALUES_EQUAL (d
, dconst2
))
1700 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1702 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1703 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1706 /* Reassociate multiplication, but for floating point MULTs
1707 only when the user specifies unsafe math optimizations. */
1708 if (! FLOAT_MODE_P (mode
)
1709 || flag_unsafe_math_optimizations
)
1711 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1718 if (trueop1
== const0_rtx
)
1720 if (GET_CODE (trueop1
) == CONST_INT
1721 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1722 == GET_MODE_MASK (mode
)))
1724 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1726 /* A | (~A) -> -1 */
1727 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1728 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1729 && ! side_effects_p (op0
)
1730 && GET_MODE_CLASS (mode
) != MODE_CC
)
1732 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1738 if (trueop1
== const0_rtx
)
1740 if (GET_CODE (trueop1
) == CONST_INT
1741 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1742 == GET_MODE_MASK (mode
)))
1743 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1744 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1745 && GET_MODE_CLASS (mode
) != MODE_CC
)
1747 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1753 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1755 if (GET_CODE (trueop1
) == CONST_INT
1756 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1757 == GET_MODE_MASK (mode
)))
1759 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1760 && GET_MODE_CLASS (mode
) != MODE_CC
)
1763 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1764 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1765 && ! side_effects_p (op0
)
1766 && GET_MODE_CLASS (mode
) != MODE_CC
)
1768 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1774 /* Convert divide by power of two into shift (divide by 1 handled
1776 if (GET_CODE (trueop1
) == CONST_INT
1777 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1778 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (arg1
));
1780 /* Fall through.... */
1783 if (trueop1
== CONST1_RTX (mode
))
1785 /* On some platforms DIV uses narrower mode than its
1787 rtx x
= gen_lowpart_common (mode
, op0
);
1790 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1791 return gen_lowpart_SUBREG (mode
, op0
);
1796 /* Maybe change 0 / x to 0. This transformation isn't safe for
1797 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1798 Nor is it safe for modes with signed zeros, since dividing
1799 0 by a negative number gives -0, not 0. */
1800 if (!HONOR_NANS (mode
)
1801 && !HONOR_SIGNED_ZEROS (mode
)
1802 && trueop0
== CONST0_RTX (mode
)
1803 && ! side_effects_p (op1
))
1806 /* Change division by a constant into multiplication. Only do
1807 this with -funsafe-math-optimizations. */
1808 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1809 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1810 && trueop1
!= CONST0_RTX (mode
)
1811 && flag_unsafe_math_optimizations
)
1814 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1816 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1818 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1819 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1820 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
1826 /* Handle modulus by power of two (mod with 1 handled below). */
1827 if (GET_CODE (trueop1
) == CONST_INT
1828 && exact_log2 (INTVAL (trueop1
)) > 0)
1829 return simplify_gen_binary (AND
, mode
, op0
,
1830 GEN_INT (INTVAL (op1
) - 1));
1832 /* Fall through.... */
1835 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1836 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1843 /* Rotating ~0 always results in ~0. */
1844 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1845 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1846 && ! side_effects_p (op1
))
1849 /* Fall through.... */
1853 if (trueop1
== const0_rtx
)
1855 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1860 if (width
<= HOST_BITS_PER_WIDE_INT
1861 && GET_CODE (trueop1
) == CONST_INT
1862 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1863 && ! side_effects_p (op0
))
1865 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1867 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1873 if (width
<= HOST_BITS_PER_WIDE_INT
1874 && GET_CODE (trueop1
) == CONST_INT
1875 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1876 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1877 && ! side_effects_p (op0
))
1879 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1881 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1887 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1889 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1891 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1897 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1899 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1901 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1910 /* ??? There are simplifications that can be done. */
1914 if (!VECTOR_MODE_P (mode
))
1916 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1918 != GET_MODE_INNER (GET_MODE (trueop0
)))
1919 || GET_CODE (trueop1
) != PARALLEL
1920 || XVECLEN (trueop1
, 0) != 1
1921 || GET_CODE (XVECEXP (trueop1
, 0, 0)) != CONST_INT
)
1924 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1925 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP (trueop1
, 0, 0)));
1929 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1930 || (GET_MODE_INNER (mode
)
1931 != GET_MODE_INNER (GET_MODE (trueop0
)))
1932 || GET_CODE (trueop1
) != PARALLEL
)
1935 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1937 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1938 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1939 rtvec v
= rtvec_alloc (n_elts
);
1942 if (XVECLEN (trueop1
, 0) != (int) n_elts
)
1944 for (i
= 0; i
< n_elts
; i
++)
1946 rtx x
= XVECEXP (trueop1
, 0, i
);
1948 if (GET_CODE (x
) != CONST_INT
)
1950 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, INTVAL (x
));
1953 return gen_rtx_CONST_VECTOR (mode
, v
);
1959 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
1960 ? GET_MODE (trueop0
)
1961 : GET_MODE_INNER (mode
));
1962 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
1963 ? GET_MODE (trueop1
)
1964 : GET_MODE_INNER (mode
));
1966 if (!VECTOR_MODE_P (mode
)
1967 || (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
1968 != GET_MODE_SIZE (mode
)))
1971 if ((VECTOR_MODE_P (op0_mode
)
1972 && (GET_MODE_INNER (mode
)
1973 != GET_MODE_INNER (op0_mode
)))
1974 || (!VECTOR_MODE_P (op0_mode
)
1975 && GET_MODE_INNER (mode
) != op0_mode
))
1978 if ((VECTOR_MODE_P (op1_mode
)
1979 && (GET_MODE_INNER (mode
)
1980 != GET_MODE_INNER (op1_mode
)))
1981 || (!VECTOR_MODE_P (op1_mode
)
1982 && GET_MODE_INNER (mode
) != op1_mode
))
1985 if ((GET_CODE (trueop0
) == CONST_VECTOR
1986 || GET_CODE (trueop0
) == CONST_INT
1987 || GET_CODE (trueop0
) == CONST_DOUBLE
)
1988 && (GET_CODE (trueop1
) == CONST_VECTOR
1989 || GET_CODE (trueop1
) == CONST_INT
1990 || GET_CODE (trueop1
) == CONST_DOUBLE
))
1992 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1993 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1994 rtvec v
= rtvec_alloc (n_elts
);
1996 unsigned in_n_elts
= 1;
1998 if (VECTOR_MODE_P (op0_mode
))
1999 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2000 for (i
= 0; i
< n_elts
; i
++)
2004 if (!VECTOR_MODE_P (op0_mode
))
2005 RTVEC_ELT (v
, i
) = trueop0
;
2007 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2011 if (!VECTOR_MODE_P (op1_mode
))
2012 RTVEC_ELT (v
, i
) = trueop1
;
2014 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2019 return gen_rtx_CONST_VECTOR (mode
, v
);
2031 /* Get the integer argument values in two forms:
2032 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2034 arg0
= INTVAL (trueop0
);
2035 arg1
= INTVAL (trueop1
);
2037 if (width
< HOST_BITS_PER_WIDE_INT
)
2039 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2040 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2043 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2044 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2047 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2048 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2056 /* Compute the value of the arithmetic. */
2061 val
= arg0s
+ arg1s
;
2065 val
= arg0s
- arg1s
;
2069 val
= arg0s
* arg1s
;
2074 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2077 val
= arg0s
/ arg1s
;
2082 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2085 val
= arg0s
% arg1s
;
2090 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2093 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2098 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2101 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2117 /* If shift count is undefined, don't fold it; let the machine do
2118 what it wants. But truncate it if the machine will do that. */
2122 #ifdef SHIFT_COUNT_TRUNCATED
2123 if (SHIFT_COUNT_TRUNCATED
)
2127 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
2134 #ifdef SHIFT_COUNT_TRUNCATED
2135 if (SHIFT_COUNT_TRUNCATED
)
2139 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
2146 #ifdef SHIFT_COUNT_TRUNCATED
2147 if (SHIFT_COUNT_TRUNCATED
)
2151 val
= arg0s
>> arg1
;
2153 /* Bootstrap compiler may not have sign extended the right shift.
2154 Manually extend the sign to insure bootstrap cc matches gcc. */
2155 if (arg0s
< 0 && arg1
> 0)
2156 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
2165 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2166 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2174 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2175 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2179 /* Do nothing here. */
2183 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2187 val
= ((unsigned HOST_WIDE_INT
) arg0
2188 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2192 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2196 val
= ((unsigned HOST_WIDE_INT
) arg0
2197 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2204 /* ??? There are simplifications that can be done. */
2211 val
= trunc_int_for_mode (val
, mode
);
2213 return GEN_INT (val
);
2216 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2219 Rather than test for specific case, we do this by a brute-force method
2220 and do all possible simplifications until no more changes occur. Then
2221 we rebuild the operation.
2223 If FORCE is true, then always generate the rtx. This is used to
2224 canonicalize stuff emitted from simplify_gen_binary. Note that this
2225 can still fail if the rtx is too complex. It won't fail just because
2226 the result is not 'simpler' than the input, however. */
2228 struct simplify_plus_minus_op_data
2235 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2237 const struct simplify_plus_minus_op_data
*d1
= p1
;
2238 const struct simplify_plus_minus_op_data
*d2
= p2
;
2240 return (commutative_operand_precedence (d2
->op
)
2241 - commutative_operand_precedence (d1
->op
));
2245 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2248 struct simplify_plus_minus_op_data ops
[8];
2250 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2251 int first
, negate
, changed
;
2254 memset (ops
, 0, sizeof ops
);
2256 /* Set up the two operands and then expand them until nothing has been
2257 changed. If we run out of room in our array, give up; this should
2258 almost never happen. */
2263 ops
[1].neg
= (code
== MINUS
);
2269 for (i
= 0; i
< n_ops
; i
++)
2271 rtx this_op
= ops
[i
].op
;
2272 int this_neg
= ops
[i
].neg
;
2273 enum rtx_code this_code
= GET_CODE (this_op
);
2282 ops
[n_ops
].op
= XEXP (this_op
, 1);
2283 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2286 ops
[i
].op
= XEXP (this_op
, 0);
2292 ops
[i
].op
= XEXP (this_op
, 0);
2293 ops
[i
].neg
= ! this_neg
;
2299 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2300 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2301 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2303 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2304 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2305 ops
[n_ops
].neg
= this_neg
;
2313 /* ~a -> (-a - 1) */
2316 ops
[n_ops
].op
= constm1_rtx
;
2317 ops
[n_ops
++].neg
= this_neg
;
2318 ops
[i
].op
= XEXP (this_op
, 0);
2319 ops
[i
].neg
= !this_neg
;
2327 ops
[i
].op
= neg_const_int (mode
, this_op
);
2340 /* If we only have two operands, we can't do anything. */
2341 if (n_ops
<= 2 && !force
)
2344 /* Count the number of CONSTs we didn't split above. */
2345 for (i
= 0; i
< n_ops
; i
++)
2346 if (GET_CODE (ops
[i
].op
) == CONST
)
2349 /* Now simplify each pair of operands until nothing changes. The first
2350 time through just simplify constants against each other. */
2357 for (i
= 0; i
< n_ops
- 1; i
++)
2358 for (j
= i
+ 1; j
< n_ops
; j
++)
2360 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2361 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2363 if (lhs
!= 0 && rhs
!= 0
2364 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2366 enum rtx_code ncode
= PLUS
;
2372 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2374 else if (swap_commutative_operands_p (lhs
, rhs
))
2375 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2377 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2379 /* Reject "simplifications" that just wrap the two
2380 arguments in a CONST. Failure to do so can result
2381 in infinite recursion with simplify_binary_operation
2382 when it calls us to simplify CONST operations. */
2384 && ! (GET_CODE (tem
) == CONST
2385 && GET_CODE (XEXP (tem
, 0)) == ncode
2386 && XEXP (XEXP (tem
, 0), 0) == lhs
2387 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2388 /* Don't allow -x + -1 -> ~x simplifications in the
2389 first pass. This allows us the chance to combine
2390 the -1 with other constants. */
2392 && GET_CODE (tem
) == NOT
2393 && XEXP (tem
, 0) == rhs
))
2396 if (GET_CODE (tem
) == NEG
)
2397 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2398 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2399 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2403 ops
[j
].op
= NULL_RTX
;
2413 /* Pack all the operands to the lower-numbered entries. */
2414 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2419 /* Sort the operations based on swap_commutative_operands_p. */
2420 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2422 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2424 && GET_CODE (ops
[1].op
) == CONST_INT
2425 && CONSTANT_P (ops
[0].op
)
2427 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
2429 /* We suppressed creation of trivial CONST expressions in the
2430 combination loop to avoid recursion. Create one manually now.
2431 The combination loop should have ensured that there is exactly
2432 one CONST_INT, and the sort will have ensured that it is last
2433 in the array and that any other constant will be next-to-last. */
2436 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2437 && CONSTANT_P (ops
[n_ops
- 2].op
))
2439 rtx value
= ops
[n_ops
- 1].op
;
2440 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2441 value
= neg_const_int (mode
, value
);
2442 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2446 /* Count the number of CONSTs that we generated. */
2448 for (i
= 0; i
< n_ops
; i
++)
2449 if (GET_CODE (ops
[i
].op
) == CONST
)
2452 /* Give up if we didn't reduce the number of operands we had. Make
2453 sure we count a CONST as two operands. If we have the same
2454 number of operands, but have made more CONSTs than before, this
2455 is also an improvement, so accept it. */
2457 && (n_ops
+ n_consts
> input_ops
2458 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2461 /* Put a non-negated operand first. If there aren't any, make all
2462 operands positive and negate the whole thing later. */
2465 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2469 for (i
= 0; i
< n_ops
; i
++)
2481 /* Now make the result by performing the requested operations. */
2483 for (i
= 1; i
< n_ops
; i
++)
2484 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2485 mode
, result
, ops
[i
].op
);
2487 return negate
? gen_rtx_NEG (mode
, result
) : result
;
2490 /* Like simplify_binary_operation except used for relational operators.
2491 MODE is the mode of the operands, not that of the result. If MODE
2492 is VOIDmode, both operands must also be VOIDmode and we compare the
2493 operands in "infinite precision".
2495 If no simplification is possible, this function returns zero. Otherwise,
2496 it returns either const_true_rtx or const0_rtx. */
2499 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2502 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2507 if (mode
== VOIDmode
2508 && (GET_MODE (op0
) != VOIDmode
2509 || GET_MODE (op1
) != VOIDmode
))
2512 /* If op0 is a compare, extract the comparison arguments from it. */
2513 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2514 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2516 trueop0
= avoid_constant_pool_reference (op0
);
2517 trueop1
= avoid_constant_pool_reference (op1
);
2519 /* We can't simplify MODE_CC values since we don't know what the
2520 actual comparison is. */
2521 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2524 /* Make sure the constant is second. */
2525 if (swap_commutative_operands_p (trueop0
, trueop1
))
2527 tem
= op0
, op0
= op1
, op1
= tem
;
2528 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
2529 code
= swap_condition (code
);
2532 /* For integer comparisons of A and B maybe we can simplify A - B and can
2533 then simplify a comparison of that with zero. If A and B are both either
2534 a register or a CONST_INT, this can't help; testing for these cases will
2535 prevent infinite recursion here and speed things up.
2537 If CODE is an unsigned comparison, then we can never do this optimization,
2538 because it gives an incorrect result if the subtraction wraps around zero.
2539 ANSI C defines unsigned operations such that they never overflow, and
2540 thus such cases can not be ignored. */
2542 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2543 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
2544 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
2545 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2546 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2547 return simplify_relational_operation (signed_condition (code
),
2548 mode
, tem
, const0_rtx
);
2550 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2551 return const_true_rtx
;
2553 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2556 /* For modes without NaNs, if the two operands are equal, we know the
2557 result except if they have side-effects. */
2558 if (! HONOR_NANS (GET_MODE (trueop0
))
2559 && rtx_equal_p (trueop0
, trueop1
)
2560 && ! side_effects_p (trueop0
))
2561 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2563 /* If the operands are floating-point constants, see if we can fold
2565 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2566 && GET_CODE (trueop1
) == CONST_DOUBLE
2567 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2569 REAL_VALUE_TYPE d0
, d1
;
2571 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2572 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2574 /* Comparisons are unordered iff at least one of the values is NaN. */
2575 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2585 return const_true_rtx
;
2598 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2599 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2600 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2603 /* Otherwise, see if the operands are both integers. */
2604 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2605 && (GET_CODE (trueop0
) == CONST_DOUBLE
2606 || GET_CODE (trueop0
) == CONST_INT
)
2607 && (GET_CODE (trueop1
) == CONST_DOUBLE
2608 || GET_CODE (trueop1
) == CONST_INT
))
2610 int width
= GET_MODE_BITSIZE (mode
);
2611 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2612 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2614 /* Get the two words comprising each integer constant. */
2615 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2617 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2618 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2622 l0u
= l0s
= INTVAL (trueop0
);
2623 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2626 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2628 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2629 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2633 l1u
= l1s
= INTVAL (trueop1
);
2634 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2637 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2638 we have to sign or zero-extend the values. */
2639 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2641 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2642 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2644 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2645 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2647 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2648 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2650 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2651 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2653 equal
= (h0u
== h1u
&& l0u
== l1u
);
2654 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2655 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2656 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2657 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2660 /* Otherwise, there are some code-specific tests we can make. */
2666 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2671 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2672 return const_true_rtx
;
2676 /* Unsigned values are never negative. */
2677 if (trueop1
== const0_rtx
)
2678 return const_true_rtx
;
2682 if (trueop1
== const0_rtx
)
2687 /* Unsigned values are never greater than the largest
2689 if (GET_CODE (trueop1
) == CONST_INT
2690 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2691 && INTEGRAL_MODE_P (mode
))
2692 return const_true_rtx
;
2696 if (GET_CODE (trueop1
) == CONST_INT
2697 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2698 && INTEGRAL_MODE_P (mode
))
2703 /* Optimize abs(x) < 0.0. */
2704 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
2706 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2708 if (GET_CODE (tem
) == ABS
)
2714 /* Optimize abs(x) >= 0.0. */
2715 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
2717 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2719 if (GET_CODE (tem
) == ABS
)
2720 return const_true_rtx
;
2725 /* Optimize ! (abs(x) < 0.0). */
2726 if (trueop1
== CONST0_RTX (mode
))
2728 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2730 if (GET_CODE (tem
) == ABS
)
2731 return const_true_rtx
;
2742 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2748 return equal
? const_true_rtx
: const0_rtx
;
2751 return ! equal
? const_true_rtx
: const0_rtx
;
2754 return op0lt
? const_true_rtx
: const0_rtx
;
2757 return op1lt
? const_true_rtx
: const0_rtx
;
2759 return op0ltu
? const_true_rtx
: const0_rtx
;
2761 return op1ltu
? const_true_rtx
: const0_rtx
;
2764 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2767 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2769 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2771 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2773 return const_true_rtx
;
2781 /* Simplify CODE, an operation with result mode MODE and three operands,
2782 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2783 a constant. Return 0 if no simplifications is possible. */
2786 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
2787 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
2790 unsigned int width
= GET_MODE_BITSIZE (mode
);
2792 /* VOIDmode means "infinite" precision. */
2794 width
= HOST_BITS_PER_WIDE_INT
;
2800 if (GET_CODE (op0
) == CONST_INT
2801 && GET_CODE (op1
) == CONST_INT
2802 && GET_CODE (op2
) == CONST_INT
2803 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2804 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2806 /* Extracting a bit-field from a constant */
2807 HOST_WIDE_INT val
= INTVAL (op0
);
2809 if (BITS_BIG_ENDIAN
)
2810 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2811 - INTVAL (op2
) - INTVAL (op1
));
2813 val
>>= INTVAL (op2
);
2815 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2817 /* First zero-extend. */
2818 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2819 /* If desired, propagate sign bit. */
2820 if (code
== SIGN_EXTRACT
2821 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2822 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2825 /* Clear the bits that don't belong in our mode,
2826 unless they and our sign bit are all one.
2827 So we get either a reasonable negative value or a reasonable
2828 unsigned value for this mode. */
2829 if (width
< HOST_BITS_PER_WIDE_INT
2830 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2831 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2832 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2834 return GEN_INT (val
);
2839 if (GET_CODE (op0
) == CONST_INT
)
2840 return op0
!= const0_rtx
? op1
: op2
;
2842 /* Convert c ? a : a into "a". */
2843 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
2846 /* Convert a != b ? a : b into "a". */
2847 if (GET_CODE (op0
) == NE
2848 && ! side_effects_p (op0
)
2849 && ! HONOR_NANS (mode
)
2850 && ! HONOR_SIGNED_ZEROS (mode
)
2851 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
2852 && rtx_equal_p (XEXP (op0
, 1), op2
))
2853 || (rtx_equal_p (XEXP (op0
, 0), op2
)
2854 && rtx_equal_p (XEXP (op0
, 1), op1
))))
2857 /* Convert a == b ? a : b into "b". */
2858 if (GET_CODE (op0
) == EQ
2859 && ! side_effects_p (op0
)
2860 && ! HONOR_NANS (mode
)
2861 && ! HONOR_SIGNED_ZEROS (mode
)
2862 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
2863 && rtx_equal_p (XEXP (op0
, 1), op2
))
2864 || (rtx_equal_p (XEXP (op0
, 0), op2
)
2865 && rtx_equal_p (XEXP (op0
, 1), op1
))))
2868 if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2870 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2871 ? GET_MODE (XEXP (op0
, 1))
2872 : GET_MODE (XEXP (op0
, 0)));
2874 if (cmp_mode
== VOIDmode
)
2875 cmp_mode
= op0_mode
;
2876 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2877 XEXP (op0
, 0), XEXP (op0
, 1));
2879 /* See if any simplifications were possible. */
2880 if (temp
== const0_rtx
)
2882 else if (temp
== const_true_rtx
)
2887 /* Look for happy constants in op1 and op2. */
2888 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2890 HOST_WIDE_INT t
= INTVAL (op1
);
2891 HOST_WIDE_INT f
= INTVAL (op2
);
2893 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2894 code
= GET_CODE (op0
);
2895 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2898 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2906 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2912 if (GET_MODE (op0
) != mode
2913 || GET_MODE (op1
) != mode
2914 || !VECTOR_MODE_P (mode
))
2916 op2
= avoid_constant_pool_reference (op2
);
2917 if (GET_CODE (op2
) == CONST_INT
)
2919 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2920 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2921 int mask
= (1 << n_elts
) - 1;
2923 if (!(INTVAL (op2
) & mask
))
2925 if ((INTVAL (op2
) & mask
) == mask
)
2928 op0
= avoid_constant_pool_reference (op0
);
2929 op1
= avoid_constant_pool_reference (op1
);
2930 if (GET_CODE (op0
) == CONST_VECTOR
2931 && GET_CODE (op1
) == CONST_VECTOR
)
2933 rtvec v
= rtvec_alloc (n_elts
);
2936 for (i
= 0; i
< n_elts
; i
++)
2937 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
2938 ? CONST_VECTOR_ELT (op0
, i
)
2939 : CONST_VECTOR_ELT (op1
, i
));
2940 return gen_rtx_CONST_VECTOR (mode
, v
);
2952 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2953 Return 0 if no simplifications is possible. */
2955 simplify_subreg (enum machine_mode outermode
, rtx op
,
2956 enum machine_mode innermode
, unsigned int byte
)
2958 /* Little bit of sanity checking. */
2959 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2960 || innermode
== BLKmode
|| outermode
== BLKmode
)
2963 if (GET_MODE (op
) != innermode
2964 && GET_MODE (op
) != VOIDmode
)
2967 if (byte
% GET_MODE_SIZE (outermode
)
2968 || byte
>= GET_MODE_SIZE (innermode
))
2971 if (outermode
== innermode
&& !byte
)
2974 /* Simplify subregs of vector constants. */
2975 if (GET_CODE (op
) == CONST_VECTOR
)
2977 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (innermode
));
2978 const unsigned int offset
= byte
/ elt_size
;
2981 if (GET_MODE_INNER (innermode
) == outermode
)
2983 elt
= CONST_VECTOR_ELT (op
, offset
);
2985 /* ?? We probably don't need this copy_rtx because constants
2986 can be shared. ?? */
2988 return copy_rtx (elt
);
2990 else if (GET_MODE_INNER (innermode
) == GET_MODE_INNER (outermode
)
2991 && GET_MODE_SIZE (innermode
) > GET_MODE_SIZE (outermode
))
2993 return (gen_rtx_CONST_VECTOR
2995 gen_rtvec_v (GET_MODE_NUNITS (outermode
),
2996 &CONST_VECTOR_ELT (op
, offset
))));
2998 else if (GET_MODE_CLASS (outermode
) == MODE_INT
2999 && (GET_MODE_SIZE (outermode
) % elt_size
== 0))
3001 /* This happens when the target register size is smaller then
3002 the vector mode, and we synthesize operations with vectors
3003 of elements that are smaller than the register size. */
3004 HOST_WIDE_INT sum
= 0, high
= 0;
3005 unsigned n_elts
= (GET_MODE_SIZE (outermode
) / elt_size
);
3006 unsigned i
= BYTES_BIG_ENDIAN
? offset
: offset
+ n_elts
- 1;
3007 unsigned step
= BYTES_BIG_ENDIAN
? 1 : -1;
3008 int shift
= BITS_PER_UNIT
* elt_size
;
3009 unsigned HOST_WIDE_INT unit_mask
;
3011 unit_mask
= (unsigned HOST_WIDE_INT
) -1
3012 >> (sizeof (HOST_WIDE_INT
) * BITS_PER_UNIT
- shift
);
3014 for (; n_elts
--; i
+= step
)
3016 elt
= CONST_VECTOR_ELT (op
, i
);
3017 if (GET_CODE (elt
) == CONST_DOUBLE
3018 && GET_MODE_CLASS (GET_MODE (elt
)) == MODE_FLOAT
)
3020 elt
= gen_lowpart_common (int_mode_for_mode (GET_MODE (elt
)),
3025 if (GET_CODE (elt
) != CONST_INT
)
3027 /* Avoid overflow. */
3028 if (high
>> (HOST_BITS_PER_WIDE_INT
- shift
))
3030 high
= high
<< shift
| sum
>> (HOST_BITS_PER_WIDE_INT
- shift
);
3031 sum
= (sum
<< shift
) + (INTVAL (elt
) & unit_mask
);
3033 if (GET_MODE_BITSIZE (outermode
) <= HOST_BITS_PER_WIDE_INT
)
3034 return GEN_INT (trunc_int_for_mode (sum
, outermode
));
3035 else if (GET_MODE_BITSIZE (outermode
) == 2* HOST_BITS_PER_WIDE_INT
)
3036 return immed_double_const (sum
, high
, outermode
);
3040 else if (GET_MODE_CLASS (outermode
) == MODE_INT
3041 && (elt_size
% GET_MODE_SIZE (outermode
) == 0))
3043 enum machine_mode new_mode
3044 = int_mode_for_mode (GET_MODE_INNER (innermode
));
3045 int subbyte
= byte
% elt_size
;
3047 op
= simplify_subreg (new_mode
, op
, innermode
, byte
- subbyte
);
3050 return simplify_subreg (outermode
, op
, new_mode
, subbyte
);
3052 else if (GET_MODE_CLASS (outermode
) == MODE_INT
)
3053 /* This shouldn't happen, but let's not do anything stupid. */
3057 /* Attempt to simplify constant to non-SUBREG expression. */
3058 if (CONSTANT_P (op
))
3061 unsigned HOST_WIDE_INT val
= 0;
3063 if (VECTOR_MODE_P (outermode
))
3065 /* Construct a CONST_VECTOR from individual subregs. */
3066 enum machine_mode submode
= GET_MODE_INNER (outermode
);
3067 int subsize
= GET_MODE_UNIT_SIZE (outermode
);
3068 int i
, elts
= GET_MODE_NUNITS (outermode
);
3069 rtvec v
= rtvec_alloc (elts
);
3072 for (i
= 0; i
< elts
; i
++, byte
+= subsize
)
3074 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
3075 /* ??? It would be nice if we could actually make such subregs
3076 on targets that allow such relocations. */
3077 if (byte
>= GET_MODE_SIZE (innermode
))
3078 elt
= CONST0_RTX (submode
);
3080 elt
= simplify_subreg (submode
, op
, innermode
, byte
);
3083 RTVEC_ELT (v
, i
) = elt
;
3085 return gen_rtx_CONST_VECTOR (outermode
, v
);
3088 /* ??? This code is partly redundant with code below, but can handle
3089 the subregs of floats and similar corner cases.
3090 Later it we should move all simplification code here and rewrite
3091 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
3092 using SIMPLIFY_SUBREG. */
3093 if (subreg_lowpart_offset (outermode
, innermode
) == byte
3094 && GET_CODE (op
) != CONST_VECTOR
)
3096 rtx
new = gen_lowpart_if_possible (outermode
, op
);
3101 /* Similar comment as above apply here. */
3102 if (GET_MODE_SIZE (outermode
) == UNITS_PER_WORD
3103 && GET_MODE_SIZE (innermode
) > UNITS_PER_WORD
3104 && GET_MODE_CLASS (outermode
) == MODE_INT
)
3106 rtx
new = constant_subword (op
,
3107 (byte
/ UNITS_PER_WORD
),
3113 if (GET_MODE_CLASS (outermode
) != MODE_INT
3114 && GET_MODE_CLASS (outermode
) != MODE_CC
)
3116 enum machine_mode new_mode
= int_mode_for_mode (outermode
);
3118 if (new_mode
!= innermode
|| byte
!= 0)
3120 op
= simplify_subreg (new_mode
, op
, innermode
, byte
);
3123 return simplify_subreg (outermode
, op
, new_mode
, 0);
3127 offset
= byte
* BITS_PER_UNIT
;
3128 switch (GET_CODE (op
))
3131 if (GET_MODE (op
) != VOIDmode
)
3134 /* We can't handle this case yet. */
3135 if (GET_MODE_BITSIZE (outermode
) >= HOST_BITS_PER_WIDE_INT
)
3138 part
= offset
>= HOST_BITS_PER_WIDE_INT
;
3139 if ((BITS_PER_WORD
> HOST_BITS_PER_WIDE_INT
3140 && BYTES_BIG_ENDIAN
)
3141 || (BITS_PER_WORD
<= HOST_BITS_PER_WIDE_INT
3142 && WORDS_BIG_ENDIAN
))
3144 val
= part
? CONST_DOUBLE_HIGH (op
) : CONST_DOUBLE_LOW (op
);
3145 offset
%= HOST_BITS_PER_WIDE_INT
;
3147 /* We've already picked the word we want from a double, so
3148 pretend this is actually an integer. */
3149 innermode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
3153 if (GET_CODE (op
) == CONST_INT
)
3156 /* We don't handle synthesizing of non-integral constants yet. */
3157 if (GET_MODE_CLASS (outermode
) != MODE_INT
)
3160 if (BYTES_BIG_ENDIAN
|| WORDS_BIG_ENDIAN
)
3162 if (WORDS_BIG_ENDIAN
)
3163 offset
= (GET_MODE_BITSIZE (innermode
)
3164 - GET_MODE_BITSIZE (outermode
) - offset
);
3165 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
3166 && GET_MODE_SIZE (outermode
) < UNITS_PER_WORD
)
3167 offset
= (offset
+ BITS_PER_WORD
- GET_MODE_BITSIZE (outermode
)
3168 - 2 * (offset
% BITS_PER_WORD
));
3171 if (offset
>= HOST_BITS_PER_WIDE_INT
)
3172 return ((HOST_WIDE_INT
) val
< 0) ? constm1_rtx
: const0_rtx
;
3176 if (GET_MODE_BITSIZE (outermode
) < HOST_BITS_PER_WIDE_INT
)
3177 val
= trunc_int_for_mode (val
, outermode
);
3178 return GEN_INT (val
);
3185 /* Changing mode twice with SUBREG => just change it once,
3186 or not at all if changing back op starting mode. */
3187 if (GET_CODE (op
) == SUBREG
)
3189 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3190 int final_offset
= byte
+ SUBREG_BYTE (op
);
3193 if (outermode
== innermostmode
3194 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3195 return SUBREG_REG (op
);
3197 /* The SUBREG_BYTE represents offset, as if the value were stored
3198 in memory. Irritating exception is paradoxical subreg, where
3199 we define SUBREG_BYTE to be 0. On big endian machines, this
3200 value should be negative. For a moment, undo this exception. */
3201 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3203 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3204 if (WORDS_BIG_ENDIAN
)
3205 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3206 if (BYTES_BIG_ENDIAN
)
3207 final_offset
+= difference
% UNITS_PER_WORD
;
3209 if (SUBREG_BYTE (op
) == 0
3210 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3212 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3213 if (WORDS_BIG_ENDIAN
)
3214 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3215 if (BYTES_BIG_ENDIAN
)
3216 final_offset
+= difference
% UNITS_PER_WORD
;
3219 /* See whether resulting subreg will be paradoxical. */
3220 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3222 /* In nonparadoxical subregs we can't handle negative offsets. */
3223 if (final_offset
< 0)
3225 /* Bail out in case resulting subreg would be incorrect. */
3226 if (final_offset
% GET_MODE_SIZE (outermode
)
3227 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3233 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3235 /* In paradoxical subreg, see if we are still looking on lower part.
3236 If so, our SUBREG_BYTE will be 0. */
3237 if (WORDS_BIG_ENDIAN
)
3238 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3239 if (BYTES_BIG_ENDIAN
)
3240 offset
+= difference
% UNITS_PER_WORD
;
3241 if (offset
== final_offset
)
3247 /* Recurse for further possible simplifications. */
3248 new = simplify_subreg (outermode
, SUBREG_REG (op
),
3249 GET_MODE (SUBREG_REG (op
)),
3253 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3256 /* SUBREG of a hard register => just change the register number
3257 and/or mode. If the hard register is not valid in that mode,
3258 suppress this simplification. If the hard register is the stack,
3259 frame, or argument pointer, leave this as a SUBREG. */
3262 && (! REG_FUNCTION_VALUE_P (op
)
3263 || ! rtx_equal_function_value_matters
)
3264 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3265 #ifdef CANNOT_CHANGE_MODE_CLASS
3266 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3267 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3268 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3270 && ((reload_completed
&& !frame_pointer_needed
)
3271 || (REGNO (op
) != FRAME_POINTER_REGNUM
3272 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3273 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3276 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3277 && REGNO (op
) != ARG_POINTER_REGNUM
3279 && REGNO (op
) != STACK_POINTER_REGNUM
3280 && subreg_offset_representable_p (REGNO (op
), innermode
,
3283 rtx tem
= gen_rtx_SUBREG (outermode
, op
, byte
);
3284 int final_regno
= subreg_hard_regno (tem
, 0);
3286 /* ??? We do allow it if the current REG is not valid for
3287 its mode. This is a kludge to work around how float/complex
3288 arguments are passed on 32-bit SPARC and should be fixed. */
3289 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3290 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
3292 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3294 /* Propagate original regno. We don't have any way to specify
3295 the offset inside original regno, so do so only for lowpart.
3296 The information is used only by alias analysis that can not
3297 grog partial register anyway. */
3299 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3300 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3305 /* If we have a SUBREG of a register that we are replacing and we are
3306 replacing it with a MEM, make a new MEM and try replacing the
3307 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3308 or if we would be widening it. */
3310 if (GET_CODE (op
) == MEM
3311 && ! mode_dependent_address_p (XEXP (op
, 0))
3312 /* Allow splitting of volatile memory references in case we don't
3313 have instruction to move the whole thing. */
3314 && (! MEM_VOLATILE_P (op
)
3315 || ! have_insn_for (SET
, innermode
))
3316 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3317 return adjust_address_nv (op
, outermode
, byte
);
3319 /* Handle complex values represented as CONCAT
3320 of real and imaginary part. */
3321 if (GET_CODE (op
) == CONCAT
)
3323 int is_realpart
= byte
< (unsigned int) GET_MODE_UNIT_SIZE (innermode
);
3324 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
3325 unsigned int final_offset
;
3328 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
3329 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3332 /* We can at least simplify it by referring directly to the relevant part. */
3333 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3338 /* Make a SUBREG operation or equivalent if it folds. */
3341 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
3342 enum machine_mode innermode
, unsigned int byte
)
3345 /* Little bit of sanity checking. */
3346 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3347 || innermode
== BLKmode
|| outermode
== BLKmode
)
3350 if (GET_MODE (op
) != innermode
3351 && GET_MODE (op
) != VOIDmode
)
3354 if (byte
% GET_MODE_SIZE (outermode
)
3355 || byte
>= GET_MODE_SIZE (innermode
))
3358 if (GET_CODE (op
) == QUEUED
)
3361 new = simplify_subreg (outermode
, op
, innermode
, byte
);
3365 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
3368 return gen_rtx_SUBREG (outermode
, op
, byte
);
3370 /* Simplify X, an rtx expression.
3372 Return the simplified expression or NULL if no simplifications
3375 This is the preferred entry point into the simplification routines;
3376 however, we still allow passes to call the more specific routines.
3378 Right now GCC has three (yes, three) major bodies of RTL simplification
3379 code that need to be unified.
3381 1. fold_rtx in cse.c. This code uses various CSE specific
3382 information to aid in RTL simplification.
3384 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3385 it uses combine specific information to aid in RTL
3388 3. The routines in this file.
3391 Long term we want to only have one body of simplification code; to
3392 get to that state I recommend the following steps:
3394 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3395 which are not pass dependent state into these routines.
3397 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3398 use this routine whenever possible.
3400 3. Allow for pass dependent state to be provided to these
3401 routines and add simplifications based on the pass dependent
3402 state. Remove code from cse.c & combine.c that becomes
3405 It will take time, but ultimately the compiler will be easier to
3406 maintain and improve. It's totally silly that when we add a
3407 simplification that it needs to be added to 4 places (3 for RTL
3408 simplification and 1 for tree simplification. */
3411 simplify_rtx (rtx x
)
3413 enum rtx_code code
= GET_CODE (x
);
3414 enum machine_mode mode
= GET_MODE (x
);
3417 switch (GET_RTX_CLASS (code
))
3420 return simplify_unary_operation (code
, mode
,
3421 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3423 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3424 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
3426 /* Fall through.... */
3429 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3433 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3434 XEXP (x
, 0), XEXP (x
, 1),
3438 temp
= simplify_relational_operation (code
,
3439 ((GET_MODE (XEXP (x
, 0))
3441 ? GET_MODE (XEXP (x
, 0))
3442 : GET_MODE (XEXP (x
, 1))),
3443 XEXP (x
, 0), XEXP (x
, 1));
3444 #ifdef FLOAT_STORE_FLAG_VALUE
3445 if (temp
!= 0 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
3447 if (temp
== const0_rtx
)
3448 temp
= CONST0_RTX (mode
);
3450 temp
= CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode
),
3458 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
3459 GET_MODE (SUBREG_REG (x
)),
3461 if (code
== CONSTANT_P_RTX
)
3463 if (CONSTANT_P (XEXP (x
, 0)))
3471 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3472 if (GET_CODE (XEXP (x
, 0)) == HIGH
3473 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))