1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static bool associative_constant_p (rtx
);
59 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
65 neg_const_int (enum machine_mode mode
, rtx i
)
67 return gen_int_mode (- INTVAL (i
), mode
);
71 /* Make a binary operation by properly ordering the operands and
72 seeing if the expression folds. */
75 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
80 /* Put complex operands first and constants second if commutative. */
81 if (GET_RTX_CLASS (code
) == 'c'
82 && swap_commutative_operands_p (op0
, op1
))
83 tem
= op0
, op0
= op1
, op1
= tem
;
85 /* If this simplifies, do it. */
86 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
90 /* Handle addition and subtraction specially. Otherwise, just form
93 if (code
== PLUS
|| code
== MINUS
)
95 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
100 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
103 /* If X is a MEM referencing the constant pool, return the real value.
104 Otherwise return X. */
106 avoid_constant_pool_reference (rtx x
)
109 enum machine_mode cmode
;
111 switch (GET_CODE (x
))
117 /* Handle float extensions of constant pool references. */
119 c
= avoid_constant_pool_reference (tmp
);
120 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
124 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
125 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
135 /* Call target hook to avoid the effects of -fpic etc.... */
136 addr
= (*targetm
.delegitimize_address
) (addr
);
138 if (GET_CODE (addr
) == LO_SUM
)
139 addr
= XEXP (addr
, 1);
141 if (GET_CODE (addr
) != SYMBOL_REF
142 || ! CONSTANT_POOL_ADDRESS_P (addr
))
145 c
= get_pool_constant (addr
);
146 cmode
= get_pool_mode (addr
);
148 /* If we're accessing the constant in a different mode than it was
149 originally stored, attempt to fix that up via subreg simplifications.
150 If that fails we have no choice but to return the original memory. */
151 if (cmode
!= GET_MODE (x
))
153 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
160 /* Make a unary operation by first seeing if it folds and otherwise making
161 the specified operation. */
164 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
165 enum machine_mode op_mode
)
169 /* If this simplifies, use it. */
170 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
173 return gen_rtx_fmt_e (code
, mode
, op
);
176 /* Likewise for ternary operations. */
179 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
180 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
184 /* If this simplifies, use it. */
185 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
189 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
192 /* Return true if X is a MEM referencing the constant pool. */
195 constant_pool_reference_p (rtx x
)
197 return avoid_constant_pool_reference (x
) != x
;
200 /* Likewise, for relational operations.
201 CMP_MODE specifies mode comparison is done in.
205 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
206 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
210 if (cmp_mode
== VOIDmode
)
211 cmp_mode
= GET_MODE (op0
);
212 if (cmp_mode
== VOIDmode
)
213 cmp_mode
= GET_MODE (op1
);
215 if (cmp_mode
!= VOIDmode
216 && ! VECTOR_MODE_P (mode
))
218 tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
);
222 #ifdef FLOAT_STORE_FLAG_VALUE
223 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
226 if (tem
== const0_rtx
)
227 return CONST0_RTX (mode
);
228 if (tem
!= const_true_rtx
)
230 val
= FLOAT_STORE_FLAG_VALUE (mode
);
231 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
238 /* For the following tests, ensure const0_rtx is op1. */
239 if (swap_commutative_operands_p (op0
, op1
)
240 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
241 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
243 /* If op0 is a compare, extract the comparison arguments from it. */
244 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
245 return simplify_gen_relational (code
, mode
, VOIDmode
,
246 XEXP (op0
, 0), XEXP (op0
, 1));
248 /* If op0 is a comparison, extract the comparison arguments form it. */
249 if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && op1
== const0_rtx
)
253 if (GET_MODE (op0
) == mode
)
255 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
256 XEXP (op0
, 0), XEXP (op0
, 1));
260 enum rtx_code
new = reversed_comparison_code (op0
, NULL_RTX
);
262 return simplify_gen_relational (new, mode
, VOIDmode
,
263 XEXP (op0
, 0), XEXP (op0
, 1));
267 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
270 /* Replace all occurrences of OLD in X with NEW and try to simplify the
271 resulting RTX. Return a new RTX which is as simplified as possible. */
274 simplify_replace_rtx (rtx x
, rtx old
, rtx
new)
276 enum rtx_code code
= GET_CODE (x
);
277 enum machine_mode mode
= GET_MODE (x
);
278 enum machine_mode op_mode
;
281 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
282 to build a new expression substituting recursively. If we can't do
283 anything, return our input. */
288 switch (GET_RTX_CLASS (code
))
292 op_mode
= GET_MODE (op0
);
293 op0
= simplify_replace_rtx (op0
, old
, new);
294 if (op0
== XEXP (x
, 0))
296 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
300 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
301 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
302 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
304 return simplify_gen_binary (code
, mode
, op0
, op1
);
309 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
310 op0
= simplify_replace_rtx (op0
, old
, new);
311 op1
= simplify_replace_rtx (op1
, old
, new);
312 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
314 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
319 op_mode
= GET_MODE (op0
);
320 op0
= simplify_replace_rtx (op0
, old
, new);
321 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
322 op2
= simplify_replace_rtx (XEXP (x
, 2), old
, new);
323 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
325 if (op_mode
== VOIDmode
)
326 op_mode
= GET_MODE (op0
);
327 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
330 /* The only case we try to handle is a SUBREG. */
333 op0
= simplify_replace_rtx (SUBREG_REG (x
), old
, new);
334 if (op0
== SUBREG_REG (x
))
336 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
337 GET_MODE (SUBREG_REG (x
)),
339 return op0
? op0
: x
;
346 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
347 if (op0
== XEXP (x
, 0))
349 return replace_equiv_address_nv (x
, op0
);
351 else if (code
== LO_SUM
)
353 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
354 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
356 /* (lo_sum (high x) x) -> x */
357 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
360 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
362 return gen_rtx_LO_SUM (mode
, op0
, op1
);
364 else if (code
== REG
)
366 if (REG_P (old
) && REGNO (x
) == REGNO (old
))
377 /* Try to simplify a unary operation CODE whose output mode is to be
378 MODE with input operand OP whose mode was originally OP_MODE.
379 Return zero if no simplification can be made. */
381 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
382 rtx op
, enum machine_mode op_mode
)
384 unsigned int width
= GET_MODE_BITSIZE (mode
);
385 rtx trueop
= avoid_constant_pool_reference (op
);
387 if (code
== VEC_DUPLICATE
)
389 if (!VECTOR_MODE_P (mode
))
391 if (GET_MODE (trueop
) != VOIDmode
392 && !VECTOR_MODE_P (GET_MODE (trueop
))
393 && GET_MODE_INNER (mode
) != GET_MODE (trueop
))
395 if (GET_MODE (trueop
) != VOIDmode
396 && VECTOR_MODE_P (GET_MODE (trueop
))
397 && GET_MODE_INNER (mode
) != GET_MODE_INNER (GET_MODE (trueop
)))
399 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
400 || GET_CODE (trueop
) == CONST_VECTOR
)
402 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
403 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
404 rtvec v
= rtvec_alloc (n_elts
);
407 if (GET_CODE (trueop
) != CONST_VECTOR
)
408 for (i
= 0; i
< n_elts
; i
++)
409 RTVEC_ELT (v
, i
) = trueop
;
412 enum machine_mode inmode
= GET_MODE (trueop
);
413 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
414 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
416 if (in_n_elts
>= n_elts
|| n_elts
% in_n_elts
)
418 for (i
= 0; i
< n_elts
; i
++)
419 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
421 return gen_rtx_CONST_VECTOR (mode
, v
);
424 else if (GET_CODE (op
) == CONST
)
425 return simplify_unary_operation (code
, mode
, XEXP (op
, 0), op_mode
);
427 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
429 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
430 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
431 enum machine_mode opmode
= GET_MODE (trueop
);
432 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
433 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
434 rtvec v
= rtvec_alloc (n_elts
);
437 if (op_n_elts
!= n_elts
)
440 for (i
= 0; i
< n_elts
; i
++)
442 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
443 CONST_VECTOR_ELT (trueop
, i
),
444 GET_MODE_INNER (opmode
));
447 RTVEC_ELT (v
, i
) = x
;
449 return gen_rtx_CONST_VECTOR (mode
, v
);
452 /* The order of these tests is critical so that, for example, we don't
453 check the wrong mode (input vs. output) for a conversion operation,
454 such as FIX. At some point, this should be simplified. */
456 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
457 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
459 HOST_WIDE_INT hv
, lv
;
462 if (GET_CODE (trueop
) == CONST_INT
)
463 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
465 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
467 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
468 d
= real_value_truncate (mode
, d
);
469 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
471 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
472 && (GET_CODE (trueop
) == CONST_DOUBLE
473 || GET_CODE (trueop
) == CONST_INT
))
475 HOST_WIDE_INT hv
, lv
;
478 if (GET_CODE (trueop
) == CONST_INT
)
479 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
481 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
483 if (op_mode
== VOIDmode
)
485 /* We don't know how to interpret negative-looking numbers in
486 this case, so don't try to fold those. */
490 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
493 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
495 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
496 d
= real_value_truncate (mode
, d
);
497 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
500 if (GET_CODE (trueop
) == CONST_INT
501 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
503 HOST_WIDE_INT arg0
= INTVAL (trueop
);
517 val
= (arg0
>= 0 ? arg0
: - arg0
);
521 /* Don't use ffs here. Instead, get low order bit and then its
522 number. If arg0 is zero, this will return 0, as desired. */
523 arg0
&= GET_MODE_MASK (mode
);
524 val
= exact_log2 (arg0
& (- arg0
)) + 1;
528 arg0
&= GET_MODE_MASK (mode
);
529 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
532 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
536 arg0
&= GET_MODE_MASK (mode
);
539 /* Even if the value at zero is undefined, we have to come
540 up with some replacement. Seems good enough. */
541 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
542 val
= GET_MODE_BITSIZE (mode
);
545 val
= exact_log2 (arg0
& -arg0
);
549 arg0
&= GET_MODE_MASK (mode
);
552 val
++, arg0
&= arg0
- 1;
556 arg0
&= GET_MODE_MASK (mode
);
559 val
++, arg0
&= arg0
- 1;
568 /* When zero-extending a CONST_INT, we need to know its
570 if (op_mode
== VOIDmode
)
572 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
574 /* If we were really extending the mode,
575 we would have to distinguish between zero-extension
576 and sign-extension. */
577 if (width
!= GET_MODE_BITSIZE (op_mode
))
581 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
582 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
588 if (op_mode
== VOIDmode
)
590 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
592 /* If we were really extending the mode,
593 we would have to distinguish between zero-extension
594 and sign-extension. */
595 if (width
!= GET_MODE_BITSIZE (op_mode
))
599 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
602 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
604 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
605 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
622 val
= trunc_int_for_mode (val
, mode
);
624 return GEN_INT (val
);
627 /* We can do some operations on integer CONST_DOUBLEs. Also allow
628 for a DImode operation on a CONST_INT. */
629 else if (GET_MODE (trueop
) == VOIDmode
630 && width
<= HOST_BITS_PER_WIDE_INT
* 2
631 && (GET_CODE (trueop
) == CONST_DOUBLE
632 || GET_CODE (trueop
) == CONST_INT
))
634 unsigned HOST_WIDE_INT l1
, lv
;
635 HOST_WIDE_INT h1
, hv
;
637 if (GET_CODE (trueop
) == CONST_DOUBLE
)
638 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
640 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
650 neg_double (l1
, h1
, &lv
, &hv
);
655 neg_double (l1
, h1
, &lv
, &hv
);
667 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
670 lv
= exact_log2 (l1
& -l1
) + 1;
676 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
677 - HOST_BITS_PER_WIDE_INT
;
679 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
680 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
681 lv
= GET_MODE_BITSIZE (mode
);
687 lv
= exact_log2 (l1
& -l1
);
689 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
690 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
691 lv
= GET_MODE_BITSIZE (mode
);
714 /* This is just a change-of-mode, so do nothing. */
719 if (op_mode
== VOIDmode
)
722 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
726 lv
= l1
& GET_MODE_MASK (op_mode
);
730 if (op_mode
== VOIDmode
731 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
735 lv
= l1
& GET_MODE_MASK (op_mode
);
736 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
737 && (lv
& ((HOST_WIDE_INT
) 1
738 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
739 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
741 hv
= HWI_SIGN_EXTEND (lv
);
752 return immed_double_const (lv
, hv
, mode
);
755 else if (GET_CODE (trueop
) == CONST_DOUBLE
756 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
758 REAL_VALUE_TYPE d
, t
;
759 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
764 if (HONOR_SNANS (mode
) && real_isnan (&d
))
766 real_sqrt (&t
, mode
, &d
);
770 d
= REAL_VALUE_ABS (d
);
773 d
= REAL_VALUE_NEGATE (d
);
776 d
= real_value_truncate (mode
, d
);
779 /* All this does is change the mode. */
782 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
789 real_to_target (tmp
, &d
, GET_MODE (trueop
));
790 for (i
= 0; i
< 4; i
++)
792 real_from_target (&d
, tmp
, mode
);
798 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
801 else if (GET_CODE (trueop
) == CONST_DOUBLE
802 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
803 && GET_MODE_CLASS (mode
) == MODE_INT
804 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
806 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
807 operators are intentionally left unspecified (to ease implementation
808 by target backends), for consistency, this routine implements the
809 same semantics for constant folding as used by the middle-end. */
811 HOST_WIDE_INT xh
, xl
, th
, tl
;
812 REAL_VALUE_TYPE x
, t
;
813 REAL_VALUE_FROM_CONST_DOUBLE (x
, trueop
);
817 if (REAL_VALUE_ISNAN (x
))
820 /* Test against the signed upper bound. */
821 if (width
> HOST_BITS_PER_WIDE_INT
)
823 th
= ((unsigned HOST_WIDE_INT
) 1
824 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
830 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
832 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
833 if (REAL_VALUES_LESS (t
, x
))
840 /* Test against the signed lower bound. */
841 if (width
> HOST_BITS_PER_WIDE_INT
)
843 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
849 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
851 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
852 if (REAL_VALUES_LESS (x
, t
))
858 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
862 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
865 /* Test against the unsigned upper bound. */
866 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
871 else if (width
>= HOST_BITS_PER_WIDE_INT
)
873 th
= ((unsigned HOST_WIDE_INT
) 1
874 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
880 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
882 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
883 if (REAL_VALUES_LESS (t
, x
))
890 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
896 return immed_double_const (xl
, xh
, mode
);
899 /* This was formerly used only for non-IEEE float.
900 eggert@twinsun.com says it is safe for IEEE also. */
903 enum rtx_code reversed
;
906 /* There are some simplifications we can do even if the operands
911 /* (not (not X)) == X. */
912 if (GET_CODE (op
) == NOT
)
915 /* (not (eq X Y)) == (ne X Y), etc. */
916 if (GET_RTX_CLASS (GET_CODE (op
)) == '<'
917 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
918 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
920 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
921 XEXP (op
, 0), XEXP (op
, 1));
923 /* (not (plus X -1)) can become (neg X). */
924 if (GET_CODE (op
) == PLUS
925 && XEXP (op
, 1) == constm1_rtx
)
926 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
928 /* Similarly, (not (neg X)) is (plus X -1). */
929 if (GET_CODE (op
) == NEG
)
930 return plus_constant (XEXP (op
, 0), -1);
932 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
933 if (GET_CODE (op
) == XOR
934 && GET_CODE (XEXP (op
, 1)) == CONST_INT
935 && (temp
= simplify_unary_operation (NOT
, mode
,
938 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
941 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
942 operands other than 1, but that is not valid. We could do a
943 similar simplification for (not (lshiftrt C X)) where C is
944 just the sign bit, but this doesn't seem common enough to
946 if (GET_CODE (op
) == ASHIFT
947 && XEXP (op
, 0) == const1_rtx
)
949 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
950 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
953 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
954 by reversing the comparison code if valid. */
955 if (STORE_FLAG_VALUE
== -1
956 && GET_RTX_CLASS (GET_CODE (op
)) == '<'
957 && (reversed
= reversed_comparison_code (op
, NULL_RTX
))
959 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
960 XEXP (op
, 0), XEXP (op
, 1));
962 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
963 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
964 so we can perform the above simplification. */
966 if (STORE_FLAG_VALUE
== -1
967 && GET_CODE (op
) == ASHIFTRT
968 && GET_CODE (XEXP (op
, 1)) == CONST_INT
969 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
970 return simplify_gen_relational (GE
, mode
, VOIDmode
,
971 XEXP (op
, 0), const0_rtx
);
976 /* (neg (neg X)) == X. */
977 if (GET_CODE (op
) == NEG
)
980 /* (neg (plus X 1)) can become (not X). */
981 if (GET_CODE (op
) == PLUS
982 && XEXP (op
, 1) == const1_rtx
)
983 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
985 /* Similarly, (neg (not X)) is (plus X 1). */
986 if (GET_CODE (op
) == NOT
)
987 return plus_constant (XEXP (op
, 0), 1);
989 /* (neg (minus X Y)) can become (minus Y X). This transformation
990 isn't safe for modes with signed zeros, since if X and Y are
991 both +0, (minus Y X) is the same as (minus X Y). If the
992 rounding mode is towards +infinity (or -infinity) then the two
993 expressions will be rounded differently. */
994 if (GET_CODE (op
) == MINUS
995 && !HONOR_SIGNED_ZEROS (mode
)
996 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
997 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1),
1000 if (GET_CODE (op
) == PLUS
1001 && !HONOR_SIGNED_ZEROS (mode
)
1002 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1004 /* (neg (plus A C)) is simplified to (minus -C A). */
1005 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
1006 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
1008 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1),
1011 return simplify_gen_binary (MINUS
, mode
, temp
,
1015 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1016 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1017 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1020 /* (neg (mult A B)) becomes (mult (neg A) B).
1021 This works even for floating-point values. */
1022 if (GET_CODE (op
) == MULT
1023 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1025 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1026 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
1029 /* NEG commutes with ASHIFT since it is multiplication. Only do
1030 this if we can then eliminate the NEG (e.g., if the operand
1032 if (GET_CODE (op
) == ASHIFT
)
1034 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0),
1037 return simplify_gen_binary (ASHIFT
, mode
, temp
,
1044 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1045 becomes just the MINUS if its mode is MODE. This allows
1046 folding switch statements on machines using casesi (such as
1048 if (GET_CODE (op
) == TRUNCATE
1049 && GET_MODE (XEXP (op
, 0)) == mode
1050 && GET_CODE (XEXP (op
, 0)) == MINUS
1051 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1052 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1053 return XEXP (op
, 0);
1055 /* Check for a sign extension of a subreg of a promoted
1056 variable, where the promotion is sign-extended, and the
1057 target mode is the same as the variable's promotion. */
1058 if (GET_CODE (op
) == SUBREG
1059 && SUBREG_PROMOTED_VAR_P (op
)
1060 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1061 && GET_MODE (XEXP (op
, 0)) == mode
)
1062 return XEXP (op
, 0);
1064 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1065 if (! POINTERS_EXTEND_UNSIGNED
1066 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1068 || (GET_CODE (op
) == SUBREG
1069 && GET_CODE (SUBREG_REG (op
)) == REG
1070 && REG_POINTER (SUBREG_REG (op
))
1071 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1072 return convert_memory_address (Pmode
, op
);
1077 /* Check for a zero extension of a subreg of a promoted
1078 variable, where the promotion is zero-extended, and the
1079 target mode is the same as the variable's promotion. */
1080 if (GET_CODE (op
) == SUBREG
1081 && SUBREG_PROMOTED_VAR_P (op
)
1082 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1083 && GET_MODE (XEXP (op
, 0)) == mode
)
1084 return XEXP (op
, 0);
1086 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1087 if (POINTERS_EXTEND_UNSIGNED
> 0
1088 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1090 || (GET_CODE (op
) == SUBREG
1091 && GET_CODE (SUBREG_REG (op
)) == REG
1092 && REG_POINTER (SUBREG_REG (op
))
1093 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1094 return convert_memory_address (Pmode
, op
);
1106 /* Subroutine of simplify_associative_operation. Return true if rtx OP
1107 is a suitable integer or floating point immediate constant. */
1109 associative_constant_p (rtx op
)
1111 if (GET_CODE (op
) == CONST_INT
1112 || GET_CODE (op
) == CONST_DOUBLE
)
1114 op
= avoid_constant_pool_reference (op
);
1115 return GET_CODE (op
) == CONST_INT
1116 || GET_CODE (op
) == CONST_DOUBLE
;
1119 /* Subroutine of simplify_binary_operation to simplify an associative
1120 binary operation CODE with result mode MODE, operating on OP0 and OP1.
1121 Return 0 if no simplification is possible. */
1123 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1128 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
1129 if (GET_CODE (op0
) == code
1130 && associative_constant_p (op1
)
1131 && associative_constant_p (XEXP (op0
, 1)))
1133 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1136 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1139 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
1140 if (GET_CODE (op0
) == code
1141 && GET_CODE (op1
) == code
1142 && associative_constant_p (XEXP (op0
, 1))
1143 && associative_constant_p (XEXP (op1
, 1)))
1145 rtx c
= simplify_binary_operation (code
, mode
,
1146 XEXP (op0
, 1), XEXP (op1
, 1));
1149 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1150 return simplify_gen_binary (code
, mode
, tem
, c
);
1153 /* Canonicalize (x op c) op y as (x op y) op c. */
1154 if (GET_CODE (op0
) == code
1155 && associative_constant_p (XEXP (op0
, 1)))
1157 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1158 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1161 /* Canonicalize x op (y op c) as (x op y) op c. */
1162 if (GET_CODE (op1
) == code
1163 && associative_constant_p (XEXP (op1
, 1)))
1165 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1166 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1172 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1173 and OP1. Return 0 if no simplification is possible.
1175 Don't use this for relational operations such as EQ or LT.
1176 Use simplify_relational_operation instead. */
1178 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1181 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
1183 unsigned int width
= GET_MODE_BITSIZE (mode
);
1185 rtx trueop0
= avoid_constant_pool_reference (op0
);
1186 rtx trueop1
= avoid_constant_pool_reference (op1
);
1188 /* Relational operations don't work here. We must know the mode
1189 of the operands in order to do the comparison correctly.
1190 Assuming a full word can give incorrect results.
1191 Consider comparing 128 with -128 in QImode. */
1193 if (GET_RTX_CLASS (code
) == '<')
1196 /* Make sure the constant is second. */
1197 if (GET_RTX_CLASS (code
) == 'c'
1198 && swap_commutative_operands_p (trueop0
, trueop1
))
1200 tem
= op0
, op0
= op1
, op1
= tem
;
1201 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
1204 if (VECTOR_MODE_P (mode
)
1205 && code
!= VEC_CONCAT
1206 && GET_CODE (trueop0
) == CONST_VECTOR
1207 && GET_CODE (trueop1
) == CONST_VECTOR
)
1209 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1210 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1211 enum machine_mode op0mode
= GET_MODE (trueop0
);
1212 int op0_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op0mode
));
1213 unsigned op0_n_elts
= (GET_MODE_SIZE (op0mode
) / op0_elt_size
);
1214 enum machine_mode op1mode
= GET_MODE (trueop1
);
1215 int op1_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op1mode
));
1216 unsigned op1_n_elts
= (GET_MODE_SIZE (op1mode
) / op1_elt_size
);
1217 rtvec v
= rtvec_alloc (n_elts
);
1220 if (op0_n_elts
!= n_elts
|| op1_n_elts
!= n_elts
)
1223 for (i
= 0; i
< n_elts
; i
++)
1225 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
1226 CONST_VECTOR_ELT (trueop0
, i
),
1227 CONST_VECTOR_ELT (trueop1
, i
));
1230 RTVEC_ELT (v
, i
) = x
;
1233 return gen_rtx_CONST_VECTOR (mode
, v
);
1236 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1237 && GET_CODE (trueop0
) == CONST_DOUBLE
1238 && GET_CODE (trueop1
) == CONST_DOUBLE
1239 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
1250 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
1252 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
1254 for (i
= 0; i
< 4; i
++)
1258 else if (code
== IOR
)
1260 else if (code
== XOR
)
1265 real_from_target (&r
, tmp0
, mode
);
1266 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
1270 REAL_VALUE_TYPE f0
, f1
, value
;
1272 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
1273 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
1274 f0
= real_value_truncate (mode
, f0
);
1275 f1
= real_value_truncate (mode
, f1
);
1277 if (HONOR_SNANS (mode
)
1278 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
1282 && REAL_VALUES_EQUAL (f1
, dconst0
)
1283 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
1286 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
1287 && flag_trapping_math
1288 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
1290 int s0
= REAL_VALUE_NEGATIVE (f0
);
1291 int s1
= REAL_VALUE_NEGATIVE (f1
);
1296 /* Inf + -Inf = NaN plus exception. */
1301 /* Inf - Inf = NaN plus exception. */
1306 /* Inf / Inf = NaN plus exception. */
1313 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
1314 && flag_trapping_math
1315 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
1316 || (REAL_VALUE_ISINF (f1
)
1317 && REAL_VALUES_EQUAL (f0
, dconst0
))))
1318 /* Inf * 0 = NaN plus exception. */
1321 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
1323 value
= real_value_truncate (mode
, value
);
1324 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
1328 /* We can fold some multi-word operations. */
1329 if (GET_MODE_CLASS (mode
) == MODE_INT
1330 && width
== HOST_BITS_PER_WIDE_INT
* 2
1331 && (GET_CODE (trueop0
) == CONST_DOUBLE
1332 || GET_CODE (trueop0
) == CONST_INT
)
1333 && (GET_CODE (trueop1
) == CONST_DOUBLE
1334 || GET_CODE (trueop1
) == CONST_INT
))
1336 unsigned HOST_WIDE_INT l1
, l2
, lv
;
1337 HOST_WIDE_INT h1
, h2
, hv
;
1339 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1340 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
1342 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
1344 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1345 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
1347 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
1352 /* A - B == A + (-B). */
1353 neg_double (l2
, h2
, &lv
, &hv
);
1356 /* Fall through.... */
1359 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1363 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1366 case DIV
: case MOD
: case UDIV
: case UMOD
:
1367 /* We'd need to include tree.h to do this and it doesn't seem worth
1372 lv
= l1
& l2
, hv
= h1
& h2
;
1376 lv
= l1
| l2
, hv
= h1
| h2
;
1380 lv
= l1
^ l2
, hv
= h1
^ h2
;
1386 && ((unsigned HOST_WIDE_INT
) l1
1387 < (unsigned HOST_WIDE_INT
) l2
)))
1396 && ((unsigned HOST_WIDE_INT
) l1
1397 > (unsigned HOST_WIDE_INT
) l2
)))
1404 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1406 && ((unsigned HOST_WIDE_INT
) l1
1407 < (unsigned HOST_WIDE_INT
) l2
)))
1414 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1416 && ((unsigned HOST_WIDE_INT
) l1
1417 > (unsigned HOST_WIDE_INT
) l2
)))
1423 case LSHIFTRT
: case ASHIFTRT
:
1425 case ROTATE
: case ROTATERT
:
1426 #ifdef SHIFT_COUNT_TRUNCATED
1427 if (SHIFT_COUNT_TRUNCATED
)
1428 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1431 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1434 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1435 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1437 else if (code
== ASHIFT
)
1438 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1439 else if (code
== ROTATE
)
1440 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1441 else /* code == ROTATERT */
1442 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1449 return immed_double_const (lv
, hv
, mode
);
1452 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1453 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1455 /* Even if we can't compute a constant result,
1456 there are some cases worth simplifying. */
1461 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1462 when x is NaN, infinite, or finite and nonzero. They aren't
1463 when x is -0 and the rounding mode is not towards -infinity,
1464 since (-0) + 0 is then 0. */
1465 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1468 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1469 transformations are safe even for IEEE. */
1470 if (GET_CODE (op0
) == NEG
)
1471 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1472 else if (GET_CODE (op1
) == NEG
)
1473 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1475 /* (~a) + 1 -> -a */
1476 if (INTEGRAL_MODE_P (mode
)
1477 && GET_CODE (op0
) == NOT
1478 && trueop1
== const1_rtx
)
1479 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1481 /* Handle both-operands-constant cases. We can only add
1482 CONST_INTs to constants since the sum of relocatable symbols
1483 can't be handled by most assemblers. Don't add CONST_INT
1484 to CONST_INT since overflow won't be computed properly if wider
1485 than HOST_BITS_PER_WIDE_INT. */
1487 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1488 && GET_CODE (op1
) == CONST_INT
)
1489 return plus_constant (op0
, INTVAL (op1
));
1490 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1491 && GET_CODE (op0
) == CONST_INT
)
1492 return plus_constant (op1
, INTVAL (op0
));
1494 /* See if this is something like X * C - X or vice versa or
1495 if the multiplication is written as a shift. If so, we can
1496 distribute and make a new multiply, shift, or maybe just
1497 have X (if C is 2 in the example above). But don't make
1498 real multiply if we didn't have one before. */
1500 if (! FLOAT_MODE_P (mode
))
1502 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1503 rtx lhs
= op0
, rhs
= op1
;
1506 if (GET_CODE (lhs
) == NEG
)
1507 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1508 else if (GET_CODE (lhs
) == MULT
1509 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1511 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1514 else if (GET_CODE (lhs
) == ASHIFT
1515 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1516 && INTVAL (XEXP (lhs
, 1)) >= 0
1517 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1519 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1520 lhs
= XEXP (lhs
, 0);
1523 if (GET_CODE (rhs
) == NEG
)
1524 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1525 else if (GET_CODE (rhs
) == MULT
1526 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1528 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1531 else if (GET_CODE (rhs
) == ASHIFT
1532 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1533 && INTVAL (XEXP (rhs
, 1)) >= 0
1534 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1536 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1537 rhs
= XEXP (rhs
, 0);
1540 if (rtx_equal_p (lhs
, rhs
))
1542 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1543 GEN_INT (coeff0
+ coeff1
));
1544 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1548 /* If one of the operands is a PLUS or a MINUS, see if we can
1549 simplify this by the associative law.
1550 Don't use the associative law for floating point.
1551 The inaccuracy makes it nonassociative,
1552 and subtle programs can break if operations are associated. */
1554 if (INTEGRAL_MODE_P (mode
)
1555 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1556 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1557 || (GET_CODE (op0
) == CONST
1558 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1559 || (GET_CODE (op1
) == CONST
1560 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1561 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1564 /* Reassociate floating point addition only when the user
1565 specifies unsafe math optimizations. */
1566 if (FLOAT_MODE_P (mode
)
1567 && flag_unsafe_math_optimizations
)
1569 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1577 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1578 using cc0, in which case we want to leave it as a COMPARE
1579 so we can distinguish it from a register-register-copy.
1581 In IEEE floating point, x-0 is not the same as x. */
1583 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1584 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1585 && trueop1
== CONST0_RTX (mode
))
1589 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1590 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1591 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1592 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1594 rtx xop00
= XEXP (op0
, 0);
1595 rtx xop10
= XEXP (op1
, 0);
1598 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1600 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1601 && GET_MODE (xop00
) == GET_MODE (xop10
)
1602 && REGNO (xop00
) == REGNO (xop10
)
1603 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1604 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1611 /* We can't assume x-x is 0 even with non-IEEE floating point,
1612 but since it is zero except in very strange circumstances, we
1613 will treat it as zero with -funsafe-math-optimizations. */
1614 if (rtx_equal_p (trueop0
, trueop1
)
1615 && ! side_effects_p (op0
)
1616 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1617 return CONST0_RTX (mode
);
1619 /* Change subtraction from zero into negation. (0 - x) is the
1620 same as -x when x is NaN, infinite, or finite and nonzero.
1621 But if the mode has signed zeros, and does not round towards
1622 -infinity, then 0 - 0 is 0, not -0. */
1623 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1624 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1626 /* (-1 - a) is ~a. */
1627 if (trueop0
== constm1_rtx
)
1628 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1630 /* Subtracting 0 has no effect unless the mode has signed zeros
1631 and supports rounding towards -infinity. In such a case,
1633 if (!(HONOR_SIGNED_ZEROS (mode
)
1634 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1635 && trueop1
== CONST0_RTX (mode
))
1638 /* See if this is something like X * C - X or vice versa or
1639 if the multiplication is written as a shift. If so, we can
1640 distribute and make a new multiply, shift, or maybe just
1641 have X (if C is 2 in the example above). But don't make
1642 real multiply if we didn't have one before. */
1644 if (! FLOAT_MODE_P (mode
))
1646 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1647 rtx lhs
= op0
, rhs
= op1
;
1650 if (GET_CODE (lhs
) == NEG
)
1651 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1652 else if (GET_CODE (lhs
) == MULT
1653 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1655 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1658 else if (GET_CODE (lhs
) == ASHIFT
1659 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1660 && INTVAL (XEXP (lhs
, 1)) >= 0
1661 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1663 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1664 lhs
= XEXP (lhs
, 0);
1667 if (GET_CODE (rhs
) == NEG
)
1668 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1669 else if (GET_CODE (rhs
) == MULT
1670 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1672 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1675 else if (GET_CODE (rhs
) == ASHIFT
1676 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1677 && INTVAL (XEXP (rhs
, 1)) >= 0
1678 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1680 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1681 rhs
= XEXP (rhs
, 0);
1684 if (rtx_equal_p (lhs
, rhs
))
1686 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1687 GEN_INT (coeff0
- coeff1
));
1688 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1692 /* (a - (-b)) -> (a + b). True even for IEEE. */
1693 if (GET_CODE (op1
) == NEG
)
1694 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1696 /* (-x - c) may be simplified as (-c - x). */
1697 if (GET_CODE (op0
) == NEG
1698 && (GET_CODE (op1
) == CONST_INT
1699 || GET_CODE (op1
) == CONST_DOUBLE
))
1701 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1703 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1706 /* If one of the operands is a PLUS or a MINUS, see if we can
1707 simplify this by the associative law.
1708 Don't use the associative law for floating point.
1709 The inaccuracy makes it nonassociative,
1710 and subtle programs can break if operations are associated. */
1712 if (INTEGRAL_MODE_P (mode
)
1713 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1714 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1715 || (GET_CODE (op0
) == CONST
1716 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1717 || (GET_CODE (op1
) == CONST
1718 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1719 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1722 /* Don't let a relocatable value get a negative coeff. */
1723 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1724 return simplify_gen_binary (PLUS
, mode
,
1726 neg_const_int (mode
, op1
));
1728 /* (x - (x & y)) -> (x & ~y) */
1729 if (GET_CODE (op1
) == AND
)
1731 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1733 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1734 GET_MODE (XEXP (op1
, 1)));
1735 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1737 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1739 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1740 GET_MODE (XEXP (op1
, 0)));
1741 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1747 if (trueop1
== constm1_rtx
)
1748 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1750 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1751 x is NaN, since x * 0 is then also NaN. Nor is it valid
1752 when the mode has signed zeros, since multiplying a negative
1753 number by 0 will give -0, not 0. */
1754 if (!HONOR_NANS (mode
)
1755 && !HONOR_SIGNED_ZEROS (mode
)
1756 && trueop1
== CONST0_RTX (mode
)
1757 && ! side_effects_p (op0
))
1760 /* In IEEE floating point, x*1 is not equivalent to x for
1762 if (!HONOR_SNANS (mode
)
1763 && trueop1
== CONST1_RTX (mode
))
1766 /* Convert multiply by constant power of two into shift unless
1767 we are still generating RTL. This test is a kludge. */
1768 if (GET_CODE (trueop1
) == CONST_INT
1769 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1770 /* If the mode is larger than the host word size, and the
1771 uppermost bit is set, then this isn't a power of two due
1772 to implicit sign extension. */
1773 && (width
<= HOST_BITS_PER_WIDE_INT
1774 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1775 && ! rtx_equal_function_value_matters
)
1776 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1778 /* x*2 is x+x and x*(-1) is -x */
1779 if (GET_CODE (trueop1
) == CONST_DOUBLE
1780 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1781 && GET_MODE (op0
) == mode
)
1784 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1786 if (REAL_VALUES_EQUAL (d
, dconst2
))
1787 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1789 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1790 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1793 /* Reassociate multiplication, but for floating point MULTs
1794 only when the user specifies unsafe math optimizations. */
1795 if (! FLOAT_MODE_P (mode
)
1796 || flag_unsafe_math_optimizations
)
1798 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1805 if (trueop1
== const0_rtx
)
1807 if (GET_CODE (trueop1
) == CONST_INT
1808 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1809 == GET_MODE_MASK (mode
)))
1811 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1813 /* A | (~A) -> -1 */
1814 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1815 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1816 && ! side_effects_p (op0
)
1817 && GET_MODE_CLASS (mode
) != MODE_CC
)
1819 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1825 if (trueop1
== const0_rtx
)
1827 if (GET_CODE (trueop1
) == CONST_INT
1828 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1829 == GET_MODE_MASK (mode
)))
1830 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1831 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1832 && GET_MODE_CLASS (mode
) != MODE_CC
)
1834 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1840 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1842 if (GET_CODE (trueop1
) == CONST_INT
1843 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1844 == GET_MODE_MASK (mode
)))
1846 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1847 && GET_MODE_CLASS (mode
) != MODE_CC
)
1850 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1851 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1852 && ! side_effects_p (op0
)
1853 && GET_MODE_CLASS (mode
) != MODE_CC
)
1855 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1861 /* Convert divide by power of two into shift (divide by 1 handled
1863 if (GET_CODE (trueop1
) == CONST_INT
1864 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1865 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (arg1
));
1867 /* Fall through.... */
1870 if (trueop1
== CONST1_RTX (mode
))
1872 /* On some platforms DIV uses narrower mode than its
1874 rtx x
= gen_lowpart_common (mode
, op0
);
1877 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1878 return gen_lowpart_SUBREG (mode
, op0
);
1883 /* Maybe change 0 / x to 0. This transformation isn't safe for
1884 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1885 Nor is it safe for modes with signed zeros, since dividing
1886 0 by a negative number gives -0, not 0. */
1887 if (!HONOR_NANS (mode
)
1888 && !HONOR_SIGNED_ZEROS (mode
)
1889 && trueop0
== CONST0_RTX (mode
)
1890 && ! side_effects_p (op1
))
1893 /* Change division by a constant into multiplication. Only do
1894 this with -funsafe-math-optimizations. */
1895 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1896 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1897 && trueop1
!= CONST0_RTX (mode
)
1898 && flag_unsafe_math_optimizations
)
1901 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1903 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1905 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1906 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1907 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
1913 /* Handle modulus by power of two (mod with 1 handled below). */
1914 if (GET_CODE (trueop1
) == CONST_INT
1915 && exact_log2 (INTVAL (trueop1
)) > 0)
1916 return simplify_gen_binary (AND
, mode
, op0
,
1917 GEN_INT (INTVAL (op1
) - 1));
1919 /* Fall through.... */
1922 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1923 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1930 /* Rotating ~0 always results in ~0. */
1931 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1932 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1933 && ! side_effects_p (op1
))
1936 /* Fall through.... */
1940 if (trueop1
== const0_rtx
)
1942 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1947 if (width
<= HOST_BITS_PER_WIDE_INT
1948 && GET_CODE (trueop1
) == CONST_INT
1949 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1950 && ! side_effects_p (op0
))
1952 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1954 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1960 if (width
<= HOST_BITS_PER_WIDE_INT
1961 && GET_CODE (trueop1
) == CONST_INT
1962 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1963 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1964 && ! side_effects_p (op0
))
1966 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1968 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1974 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1976 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1978 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1984 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1986 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1988 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1997 /* ??? There are simplifications that can be done. */
2001 if (!VECTOR_MODE_P (mode
))
2003 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
2005 != GET_MODE_INNER (GET_MODE (trueop0
)))
2006 || GET_CODE (trueop1
) != PARALLEL
2007 || XVECLEN (trueop1
, 0) != 1
2008 || GET_CODE (XVECEXP (trueop1
, 0, 0)) != CONST_INT
)
2011 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2012 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP (trueop1
, 0, 0)));
2016 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
2017 || (GET_MODE_INNER (mode
)
2018 != GET_MODE_INNER (GET_MODE (trueop0
)))
2019 || GET_CODE (trueop1
) != PARALLEL
)
2022 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2024 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2025 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2026 rtvec v
= rtvec_alloc (n_elts
);
2029 if (XVECLEN (trueop1
, 0) != (int) n_elts
)
2031 for (i
= 0; i
< n_elts
; i
++)
2033 rtx x
= XVECEXP (trueop1
, 0, i
);
2035 if (GET_CODE (x
) != CONST_INT
)
2037 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, INTVAL (x
));
2040 return gen_rtx_CONST_VECTOR (mode
, v
);
2046 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2047 ? GET_MODE (trueop0
)
2048 : GET_MODE_INNER (mode
));
2049 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2050 ? GET_MODE (trueop1
)
2051 : GET_MODE_INNER (mode
));
2053 if (!VECTOR_MODE_P (mode
)
2054 || (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2055 != GET_MODE_SIZE (mode
)))
2058 if ((VECTOR_MODE_P (op0_mode
)
2059 && (GET_MODE_INNER (mode
)
2060 != GET_MODE_INNER (op0_mode
)))
2061 || (!VECTOR_MODE_P (op0_mode
)
2062 && GET_MODE_INNER (mode
) != op0_mode
))
2065 if ((VECTOR_MODE_P (op1_mode
)
2066 && (GET_MODE_INNER (mode
)
2067 != GET_MODE_INNER (op1_mode
)))
2068 || (!VECTOR_MODE_P (op1_mode
)
2069 && GET_MODE_INNER (mode
) != op1_mode
))
2072 if ((GET_CODE (trueop0
) == CONST_VECTOR
2073 || GET_CODE (trueop0
) == CONST_INT
2074 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2075 && (GET_CODE (trueop1
) == CONST_VECTOR
2076 || GET_CODE (trueop1
) == CONST_INT
2077 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2079 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2080 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2081 rtvec v
= rtvec_alloc (n_elts
);
2083 unsigned in_n_elts
= 1;
2085 if (VECTOR_MODE_P (op0_mode
))
2086 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2087 for (i
= 0; i
< n_elts
; i
++)
2091 if (!VECTOR_MODE_P (op0_mode
))
2092 RTVEC_ELT (v
, i
) = trueop0
;
2094 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2098 if (!VECTOR_MODE_P (op1_mode
))
2099 RTVEC_ELT (v
, i
) = trueop1
;
2101 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2106 return gen_rtx_CONST_VECTOR (mode
, v
);
2118 /* Get the integer argument values in two forms:
2119 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2121 arg0
= INTVAL (trueop0
);
2122 arg1
= INTVAL (trueop1
);
2124 if (width
< HOST_BITS_PER_WIDE_INT
)
2126 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2127 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2130 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2131 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2134 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2135 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2143 /* Compute the value of the arithmetic. */
2148 val
= arg0s
+ arg1s
;
2152 val
= arg0s
- arg1s
;
2156 val
= arg0s
* arg1s
;
2161 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2164 val
= arg0s
/ arg1s
;
2169 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2172 val
= arg0s
% arg1s
;
2177 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2180 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2185 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2188 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2204 /* If shift count is undefined, don't fold it; let the machine do
2205 what it wants. But truncate it if the machine will do that. */
2209 #ifdef SHIFT_COUNT_TRUNCATED
2210 if (SHIFT_COUNT_TRUNCATED
)
2214 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
2221 #ifdef SHIFT_COUNT_TRUNCATED
2222 if (SHIFT_COUNT_TRUNCATED
)
2226 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
2233 #ifdef SHIFT_COUNT_TRUNCATED
2234 if (SHIFT_COUNT_TRUNCATED
)
2238 val
= arg0s
>> arg1
;
2240 /* Bootstrap compiler may not have sign extended the right shift.
2241 Manually extend the sign to insure bootstrap cc matches gcc. */
2242 if (arg0s
< 0 && arg1
> 0)
2243 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
2252 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2253 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2261 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2262 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2266 /* Do nothing here. */
2270 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2274 val
= ((unsigned HOST_WIDE_INT
) arg0
2275 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2279 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2283 val
= ((unsigned HOST_WIDE_INT
) arg0
2284 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2291 /* ??? There are simplifications that can be done. */
2298 val
= trunc_int_for_mode (val
, mode
);
2300 return GEN_INT (val
);
2303 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2306 Rather than test for specific case, we do this by a brute-force method
2307 and do all possible simplifications until no more changes occur. Then
2308 we rebuild the operation.
2310 If FORCE is true, then always generate the rtx. This is used to
2311 canonicalize stuff emitted from simplify_gen_binary. Note that this
2312 can still fail if the rtx is too complex. It won't fail just because
2313 the result is not 'simpler' than the input, however. */
2315 struct simplify_plus_minus_op_data
2322 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2324 const struct simplify_plus_minus_op_data
*d1
= p1
;
2325 const struct simplify_plus_minus_op_data
*d2
= p2
;
2327 return (commutative_operand_precedence (d2
->op
)
2328 - commutative_operand_precedence (d1
->op
));
2332 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2335 struct simplify_plus_minus_op_data ops
[8];
2337 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2341 memset (ops
, 0, sizeof ops
);
2343 /* Set up the two operands and then expand them until nothing has been
2344 changed. If we run out of room in our array, give up; this should
2345 almost never happen. */
2350 ops
[1].neg
= (code
== MINUS
);
2356 for (i
= 0; i
< n_ops
; i
++)
2358 rtx this_op
= ops
[i
].op
;
2359 int this_neg
= ops
[i
].neg
;
2360 enum rtx_code this_code
= GET_CODE (this_op
);
2369 ops
[n_ops
].op
= XEXP (this_op
, 1);
2370 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2373 ops
[i
].op
= XEXP (this_op
, 0);
2379 ops
[i
].op
= XEXP (this_op
, 0);
2380 ops
[i
].neg
= ! this_neg
;
2386 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2387 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2388 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2390 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2391 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2392 ops
[n_ops
].neg
= this_neg
;
2400 /* ~a -> (-a - 1) */
2403 ops
[n_ops
].op
= constm1_rtx
;
2404 ops
[n_ops
++].neg
= this_neg
;
2405 ops
[i
].op
= XEXP (this_op
, 0);
2406 ops
[i
].neg
= !this_neg
;
2414 ops
[i
].op
= neg_const_int (mode
, this_op
);
2427 /* If we only have two operands, we can't do anything. */
2428 if (n_ops
<= 2 && !force
)
2431 /* Count the number of CONSTs we didn't split above. */
2432 for (i
= 0; i
< n_ops
; i
++)
2433 if (GET_CODE (ops
[i
].op
) == CONST
)
2436 /* Now simplify each pair of operands until nothing changes. The first
2437 time through just simplify constants against each other. */
2444 for (i
= 0; i
< n_ops
- 1; i
++)
2445 for (j
= i
+ 1; j
< n_ops
; j
++)
2447 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2448 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2450 if (lhs
!= 0 && rhs
!= 0
2451 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2453 enum rtx_code ncode
= PLUS
;
2459 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2461 else if (swap_commutative_operands_p (lhs
, rhs
))
2462 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2464 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2466 /* Reject "simplifications" that just wrap the two
2467 arguments in a CONST. Failure to do so can result
2468 in infinite recursion with simplify_binary_operation
2469 when it calls us to simplify CONST operations. */
2471 && ! (GET_CODE (tem
) == CONST
2472 && GET_CODE (XEXP (tem
, 0)) == ncode
2473 && XEXP (XEXP (tem
, 0), 0) == lhs
2474 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2475 /* Don't allow -x + -1 -> ~x simplifications in the
2476 first pass. This allows us the chance to combine
2477 the -1 with other constants. */
2479 && GET_CODE (tem
) == NOT
2480 && XEXP (tem
, 0) == rhs
))
2483 if (GET_CODE (tem
) == NEG
)
2484 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2485 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2486 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2490 ops
[j
].op
= NULL_RTX
;
2500 /* Pack all the operands to the lower-numbered entries. */
2501 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2506 /* Sort the operations based on swap_commutative_operands_p. */
2507 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2509 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2511 && GET_CODE (ops
[1].op
) == CONST_INT
2512 && CONSTANT_P (ops
[0].op
)
2514 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
2516 /* We suppressed creation of trivial CONST expressions in the
2517 combination loop to avoid recursion. Create one manually now.
2518 The combination loop should have ensured that there is exactly
2519 one CONST_INT, and the sort will have ensured that it is last
2520 in the array and that any other constant will be next-to-last. */
2523 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2524 && CONSTANT_P (ops
[n_ops
- 2].op
))
2526 rtx value
= ops
[n_ops
- 1].op
;
2527 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2528 value
= neg_const_int (mode
, value
);
2529 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2533 /* Count the number of CONSTs that we generated. */
2535 for (i
= 0; i
< n_ops
; i
++)
2536 if (GET_CODE (ops
[i
].op
) == CONST
)
2539 /* Give up if we didn't reduce the number of operands we had. Make
2540 sure we count a CONST as two operands. If we have the same
2541 number of operands, but have made more CONSTs than before, this
2542 is also an improvement, so accept it. */
2544 && (n_ops
+ n_consts
> input_ops
2545 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2548 /* Put a non-negated operand first, if possible. */
2550 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2553 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
2562 /* Now make the result by performing the requested operations. */
2564 for (i
= 1; i
< n_ops
; i
++)
2565 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2566 mode
, result
, ops
[i
].op
);
2571 /* Like simplify_binary_operation except used for relational operators.
2572 MODE is the mode of the operands, not that of the result. If MODE
2573 is VOIDmode, both operands must also be VOIDmode and we compare the
2574 operands in "infinite precision".
2576 If no simplification is possible, this function returns zero. Otherwise,
2577 it returns either const_true_rtx or const0_rtx. */
2580 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2583 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2588 if (mode
== VOIDmode
2589 && (GET_MODE (op0
) != VOIDmode
2590 || GET_MODE (op1
) != VOIDmode
))
2593 /* If op0 is a compare, extract the comparison arguments from it. */
2594 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2595 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2597 trueop0
= avoid_constant_pool_reference (op0
);
2598 trueop1
= avoid_constant_pool_reference (op1
);
2600 /* We can't simplify MODE_CC values since we don't know what the
2601 actual comparison is. */
2602 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2605 /* Make sure the constant is second. */
2606 if (swap_commutative_operands_p (trueop0
, trueop1
))
2608 tem
= op0
, op0
= op1
, op1
= tem
;
2609 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
2610 code
= swap_condition (code
);
2613 /* For integer comparisons of A and B maybe we can simplify A - B and can
2614 then simplify a comparison of that with zero. If A and B are both either
2615 a register or a CONST_INT, this can't help; testing for these cases will
2616 prevent infinite recursion here and speed things up.
2618 If CODE is an unsigned comparison, then we can never do this optimization,
2619 because it gives an incorrect result if the subtraction wraps around zero.
2620 ANSI C defines unsigned operations such that they never overflow, and
2621 thus such cases can not be ignored. */
2623 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2624 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
2625 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
2626 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2627 /* We cannot do this for == or != if tem is a nonzero address. */
2628 && ((code
!= EQ
&& code
!= NE
) || ! nonzero_address_p (tem
))
2629 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2630 return simplify_relational_operation (signed_condition (code
),
2631 mode
, tem
, const0_rtx
);
2633 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2634 return const_true_rtx
;
2636 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2639 /* For modes without NaNs, if the two operands are equal, we know the
2640 result except if they have side-effects. */
2641 if (! HONOR_NANS (GET_MODE (trueop0
))
2642 && rtx_equal_p (trueop0
, trueop1
)
2643 && ! side_effects_p (trueop0
))
2644 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2646 /* If the operands are floating-point constants, see if we can fold
2648 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2649 && GET_CODE (trueop1
) == CONST_DOUBLE
2650 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2652 REAL_VALUE_TYPE d0
, d1
;
2654 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2655 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2657 /* Comparisons are unordered iff at least one of the values is NaN. */
2658 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2668 return const_true_rtx
;
2681 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2682 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2683 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2686 /* Otherwise, see if the operands are both integers. */
2687 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2688 && (GET_CODE (trueop0
) == CONST_DOUBLE
2689 || GET_CODE (trueop0
) == CONST_INT
)
2690 && (GET_CODE (trueop1
) == CONST_DOUBLE
2691 || GET_CODE (trueop1
) == CONST_INT
))
2693 int width
= GET_MODE_BITSIZE (mode
);
2694 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2695 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2697 /* Get the two words comprising each integer constant. */
2698 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2700 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2701 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2705 l0u
= l0s
= INTVAL (trueop0
);
2706 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2709 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2711 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2712 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2716 l1u
= l1s
= INTVAL (trueop1
);
2717 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2720 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2721 we have to sign or zero-extend the values. */
2722 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2724 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2725 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2727 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2728 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2730 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2731 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2733 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2734 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2736 equal
= (h0u
== h1u
&& l0u
== l1u
);
2737 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2738 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2739 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2740 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2743 /* Otherwise, there are some code-specific tests we can make. */
2749 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2754 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2755 return const_true_rtx
;
2759 /* Unsigned values are never negative. */
2760 if (trueop1
== const0_rtx
)
2761 return const_true_rtx
;
2765 if (trueop1
== const0_rtx
)
2770 /* Unsigned values are never greater than the largest
2772 if (GET_CODE (trueop1
) == CONST_INT
2773 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2774 && INTEGRAL_MODE_P (mode
))
2775 return const_true_rtx
;
2779 if (GET_CODE (trueop1
) == CONST_INT
2780 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2781 && INTEGRAL_MODE_P (mode
))
2786 /* Optimize abs(x) < 0.0. */
2787 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
2789 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2791 if (GET_CODE (tem
) == ABS
)
2797 /* Optimize abs(x) >= 0.0. */
2798 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
2800 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2802 if (GET_CODE (tem
) == ABS
)
2803 return const_true_rtx
;
2808 /* Optimize ! (abs(x) < 0.0). */
2809 if (trueop1
== CONST0_RTX (mode
))
2811 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2813 if (GET_CODE (tem
) == ABS
)
2814 return const_true_rtx
;
2825 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2831 return equal
? const_true_rtx
: const0_rtx
;
2834 return ! equal
? const_true_rtx
: const0_rtx
;
2837 return op0lt
? const_true_rtx
: const0_rtx
;
2840 return op1lt
? const_true_rtx
: const0_rtx
;
2842 return op0ltu
? const_true_rtx
: const0_rtx
;
2844 return op1ltu
? const_true_rtx
: const0_rtx
;
2847 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2850 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2852 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2854 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2856 return const_true_rtx
;
2864 /* Simplify CODE, an operation with result mode MODE and three operands,
2865 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2866 a constant. Return 0 if no simplifications is possible. */
2869 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
2870 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
2873 unsigned int width
= GET_MODE_BITSIZE (mode
);
2875 /* VOIDmode means "infinite" precision. */
2877 width
= HOST_BITS_PER_WIDE_INT
;
2883 if (GET_CODE (op0
) == CONST_INT
2884 && GET_CODE (op1
) == CONST_INT
2885 && GET_CODE (op2
) == CONST_INT
2886 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2887 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2889 /* Extracting a bit-field from a constant */
2890 HOST_WIDE_INT val
= INTVAL (op0
);
2892 if (BITS_BIG_ENDIAN
)
2893 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2894 - INTVAL (op2
) - INTVAL (op1
));
2896 val
>>= INTVAL (op2
);
2898 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2900 /* First zero-extend. */
2901 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2902 /* If desired, propagate sign bit. */
2903 if (code
== SIGN_EXTRACT
2904 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2905 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2908 /* Clear the bits that don't belong in our mode,
2909 unless they and our sign bit are all one.
2910 So we get either a reasonable negative value or a reasonable
2911 unsigned value for this mode. */
2912 if (width
< HOST_BITS_PER_WIDE_INT
2913 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2914 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2915 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2917 return GEN_INT (val
);
2922 if (GET_CODE (op0
) == CONST_INT
)
2923 return op0
!= const0_rtx
? op1
: op2
;
2925 /* Convert c ? a : a into "a". */
2926 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
2929 /* Convert a != b ? a : b into "a". */
2930 if (GET_CODE (op0
) == NE
2931 && ! side_effects_p (op0
)
2932 && ! HONOR_NANS (mode
)
2933 && ! HONOR_SIGNED_ZEROS (mode
)
2934 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
2935 && rtx_equal_p (XEXP (op0
, 1), op2
))
2936 || (rtx_equal_p (XEXP (op0
, 0), op2
)
2937 && rtx_equal_p (XEXP (op0
, 1), op1
))))
2940 /* Convert a == b ? a : b into "b". */
2941 if (GET_CODE (op0
) == EQ
2942 && ! side_effects_p (op0
)
2943 && ! HONOR_NANS (mode
)
2944 && ! HONOR_SIGNED_ZEROS (mode
)
2945 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
2946 && rtx_equal_p (XEXP (op0
, 1), op2
))
2947 || (rtx_equal_p (XEXP (op0
, 0), op2
)
2948 && rtx_equal_p (XEXP (op0
, 1), op1
))))
2951 if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2953 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2954 ? GET_MODE (XEXP (op0
, 1))
2955 : GET_MODE (XEXP (op0
, 0)));
2957 if (cmp_mode
== VOIDmode
)
2958 cmp_mode
= op0_mode
;
2959 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2960 XEXP (op0
, 0), XEXP (op0
, 1));
2962 /* See if any simplifications were possible. */
2963 if (temp
== const0_rtx
)
2965 else if (temp
== const_true_rtx
)
2970 /* Look for happy constants in op1 and op2. */
2971 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2973 HOST_WIDE_INT t
= INTVAL (op1
);
2974 HOST_WIDE_INT f
= INTVAL (op2
);
2976 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2977 code
= GET_CODE (op0
);
2978 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2981 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2989 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2995 if (GET_MODE (op0
) != mode
2996 || GET_MODE (op1
) != mode
2997 || !VECTOR_MODE_P (mode
))
2999 op2
= avoid_constant_pool_reference (op2
);
3000 if (GET_CODE (op2
) == CONST_INT
)
3002 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3003 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3004 int mask
= (1 << n_elts
) - 1;
3006 if (!(INTVAL (op2
) & mask
))
3008 if ((INTVAL (op2
) & mask
) == mask
)
3011 op0
= avoid_constant_pool_reference (op0
);
3012 op1
= avoid_constant_pool_reference (op1
);
3013 if (GET_CODE (op0
) == CONST_VECTOR
3014 && GET_CODE (op1
) == CONST_VECTOR
)
3016 rtvec v
= rtvec_alloc (n_elts
);
3019 for (i
= 0; i
< n_elts
; i
++)
3020 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
3021 ? CONST_VECTOR_ELT (op0
, i
)
3022 : CONST_VECTOR_ELT (op1
, i
));
3023 return gen_rtx_CONST_VECTOR (mode
, v
);
3035 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3036 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3038 Works by unpacking OP into a collection of 8-bit values
3039 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3040 and then repacking them again for OUTERMODE. */
3043 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
3044 enum machine_mode innermode
, unsigned int byte
)
3046 /* We support up to 512-bit values (for V8DFmode). */
3050 value_mask
= (1 << value_bit
) - 1
3052 unsigned char value
[max_bitsize
/ value_bit
];
3061 rtvec result_v
= NULL
;
3062 enum mode_class outer_class
;
3063 enum machine_mode outer_submode
;
3065 /* Some ports misuse CCmode. */
3066 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
3069 /* Unpack the value. */
3071 if (GET_CODE (op
) == CONST_VECTOR
)
3073 num_elem
= CONST_VECTOR_NUNITS (op
);
3074 elems
= &CONST_VECTOR_ELT (op
, 0);
3075 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
3081 elem_bitsize
= max_bitsize
;
3084 if (BITS_PER_UNIT
% value_bit
!= 0)
3085 abort (); /* Too complicated; reducing value_bit may help. */
3086 if (elem_bitsize
% BITS_PER_UNIT
!= 0)
3087 abort (); /* I don't know how to handle endianness of sub-units. */
3089 for (elem
= 0; elem
< num_elem
; elem
++)
3092 rtx el
= elems
[elem
];
3094 /* Vectors are kept in target memory order. (This is probably
3097 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3098 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3100 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3101 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3102 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3103 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3104 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3107 switch (GET_CODE (el
))
3111 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3113 *vp
++ = INTVAL (el
) >> i
;
3114 /* CONST_INTs are always logically sign-extended. */
3115 for (; i
< elem_bitsize
; i
+= value_bit
)
3116 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
3120 if (GET_MODE (el
) == VOIDmode
)
3122 /* If this triggers, someone should have generated a
3123 CONST_INT instead. */
3124 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3127 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
3128 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
3129 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
3132 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
3135 /* It shouldn't matter what's done here, so fill it with
3137 for (; i
< elem_bitsize
; i
+= value_bit
)
3140 else if (GET_MODE_CLASS (GET_MODE (el
)) == MODE_FLOAT
)
3142 long tmp
[max_bitsize
/ 32];
3143 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
3145 if (bitsize
> elem_bitsize
)
3147 if (bitsize
% value_bit
!= 0)
3150 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
3153 /* real_to_target produces its result in words affected by
3154 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3155 and use WORDS_BIG_ENDIAN instead; see the documentation
3156 of SUBREG in rtl.texi. */
3157 for (i
= 0; i
< bitsize
; i
+= value_bit
)
3160 if (WORDS_BIG_ENDIAN
)
3161 ibase
= bitsize
- 1 - i
;
3164 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
3167 /* It shouldn't matter what's done here, so fill it with
3169 for (; i
< elem_bitsize
; i
+= value_bit
)
3181 /* Now, pick the right byte to start with. */
3182 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3183 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3184 will already have offset 0. */
3185 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
3187 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
3189 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3190 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3191 byte
= (subword_byte
% UNITS_PER_WORD
3192 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3195 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3196 so if it's become negative it will instead be very large.) */
3197 if (byte
>= GET_MODE_SIZE (innermode
))
3200 /* Convert from bytes to chunks of size value_bit. */
3201 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
3203 /* Re-pack the value. */
3205 if (VECTOR_MODE_P (outermode
))
3207 num_elem
= GET_MODE_NUNITS (outermode
);
3208 result_v
= rtvec_alloc (num_elem
);
3209 elems
= &RTVEC_ELT (result_v
, 0);
3210 outer_submode
= GET_MODE_INNER (outermode
);
3216 outer_submode
= outermode
;
3219 outer_class
= GET_MODE_CLASS (outer_submode
);
3220 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
3222 if (elem_bitsize
% value_bit
!= 0)
3224 if (elem_bitsize
+ value_start
* value_bit
> max_bitsize
)
3227 for (elem
= 0; elem
< num_elem
; elem
++)
3231 /* Vectors are stored in target memory order. (This is probably
3234 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3235 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3237 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3238 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3239 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3240 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3241 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3244 switch (outer_class
)
3247 case MODE_PARTIAL_INT
:
3249 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
3252 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3254 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
3255 for (; i
< elem_bitsize
; i
+= value_bit
)
3256 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
3257 << (i
- HOST_BITS_PER_WIDE_INT
));
3259 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3261 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3262 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
3264 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
3271 long tmp
[max_bitsize
/ 32];
3273 /* real_from_target wants its input in words affected by
3274 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3275 and use WORDS_BIG_ENDIAN instead; see the documentation
3276 of SUBREG in rtl.texi. */
3277 for (i
= 0; i
< max_bitsize
/ 32; i
++)
3279 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
3282 if (WORDS_BIG_ENDIAN
)
3283 ibase
= elem_bitsize
- 1 - i
;
3286 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
3289 real_from_target (&r
, tmp
, outer_submode
);
3290 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
3298 if (VECTOR_MODE_P (outermode
))
3299 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
3304 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3305 Return 0 if no simplifications are possible. */
3307 simplify_subreg (enum machine_mode outermode
, rtx op
,
3308 enum machine_mode innermode
, unsigned int byte
)
3310 /* Little bit of sanity checking. */
3311 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3312 || innermode
== BLKmode
|| outermode
== BLKmode
)
3315 if (GET_MODE (op
) != innermode
3316 && GET_MODE (op
) != VOIDmode
)
3319 if (byte
% GET_MODE_SIZE (outermode
)
3320 || byte
>= GET_MODE_SIZE (innermode
))
3323 if (outermode
== innermode
&& !byte
)
3326 if (GET_CODE (op
) == CONST_INT
3327 || GET_CODE (op
) == CONST_DOUBLE
3328 || GET_CODE (op
) == CONST_VECTOR
)
3329 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
3331 /* Changing mode twice with SUBREG => just change it once,
3332 or not at all if changing back op starting mode. */
3333 if (GET_CODE (op
) == SUBREG
)
3335 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3336 int final_offset
= byte
+ SUBREG_BYTE (op
);
3339 if (outermode
== innermostmode
3340 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3341 return SUBREG_REG (op
);
3343 /* The SUBREG_BYTE represents offset, as if the value were stored
3344 in memory. Irritating exception is paradoxical subreg, where
3345 we define SUBREG_BYTE to be 0. On big endian machines, this
3346 value should be negative. For a moment, undo this exception. */
3347 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3349 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3350 if (WORDS_BIG_ENDIAN
)
3351 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3352 if (BYTES_BIG_ENDIAN
)
3353 final_offset
+= difference
% UNITS_PER_WORD
;
3355 if (SUBREG_BYTE (op
) == 0
3356 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3358 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3359 if (WORDS_BIG_ENDIAN
)
3360 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3361 if (BYTES_BIG_ENDIAN
)
3362 final_offset
+= difference
% UNITS_PER_WORD
;
3365 /* See whether resulting subreg will be paradoxical. */
3366 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3368 /* In nonparadoxical subregs we can't handle negative offsets. */
3369 if (final_offset
< 0)
3371 /* Bail out in case resulting subreg would be incorrect. */
3372 if (final_offset
% GET_MODE_SIZE (outermode
)
3373 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3379 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3381 /* In paradoxical subreg, see if we are still looking on lower part.
3382 If so, our SUBREG_BYTE will be 0. */
3383 if (WORDS_BIG_ENDIAN
)
3384 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3385 if (BYTES_BIG_ENDIAN
)
3386 offset
+= difference
% UNITS_PER_WORD
;
3387 if (offset
== final_offset
)
3393 /* Recurse for further possible simplifications. */
3394 new = simplify_subreg (outermode
, SUBREG_REG (op
),
3395 GET_MODE (SUBREG_REG (op
)),
3399 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3402 /* SUBREG of a hard register => just change the register number
3403 and/or mode. If the hard register is not valid in that mode,
3404 suppress this simplification. If the hard register is the stack,
3405 frame, or argument pointer, leave this as a SUBREG. */
3408 && (! REG_FUNCTION_VALUE_P (op
)
3409 || ! rtx_equal_function_value_matters
)
3410 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3411 #ifdef CANNOT_CHANGE_MODE_CLASS
3412 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3413 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3414 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3416 && ((reload_completed
&& !frame_pointer_needed
)
3417 || (REGNO (op
) != FRAME_POINTER_REGNUM
3418 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3419 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3422 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3423 && REGNO (op
) != ARG_POINTER_REGNUM
3425 && REGNO (op
) != STACK_POINTER_REGNUM
3426 && subreg_offset_representable_p (REGNO (op
), innermode
,
3429 rtx tem
= gen_rtx_SUBREG (outermode
, op
, byte
);
3430 int final_regno
= subreg_hard_regno (tem
, 0);
3432 /* ??? We do allow it if the current REG is not valid for
3433 its mode. This is a kludge to work around how float/complex
3434 arguments are passed on 32-bit SPARC and should be fixed. */
3435 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3436 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
3438 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3440 /* Propagate original regno. We don't have any way to specify
3441 the offset inside original regno, so do so only for lowpart.
3442 The information is used only by alias analysis that can not
3443 grog partial register anyway. */
3445 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3446 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3451 /* If we have a SUBREG of a register that we are replacing and we are
3452 replacing it with a MEM, make a new MEM and try replacing the
3453 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3454 or if we would be widening it. */
3456 if (GET_CODE (op
) == MEM
3457 && ! mode_dependent_address_p (XEXP (op
, 0))
3458 /* Allow splitting of volatile memory references in case we don't
3459 have instruction to move the whole thing. */
3460 && (! MEM_VOLATILE_P (op
)
3461 || ! have_insn_for (SET
, innermode
))
3462 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3463 return adjust_address_nv (op
, outermode
, byte
);
3465 /* Handle complex values represented as CONCAT
3466 of real and imaginary part. */
3467 if (GET_CODE (op
) == CONCAT
)
3469 int is_realpart
= byte
< (unsigned int) GET_MODE_UNIT_SIZE (innermode
);
3470 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
3471 unsigned int final_offset
;
3474 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
3475 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3478 /* We can at least simplify it by referring directly to the relevant part. */
3479 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3485 /* Make a SUBREG operation or equivalent if it folds. */
3488 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
3489 enum machine_mode innermode
, unsigned int byte
)
3492 /* Little bit of sanity checking. */
3493 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3494 || innermode
== BLKmode
|| outermode
== BLKmode
)
3497 if (GET_MODE (op
) != innermode
3498 && GET_MODE (op
) != VOIDmode
)
3501 if (byte
% GET_MODE_SIZE (outermode
)
3502 || byte
>= GET_MODE_SIZE (innermode
))
3505 if (GET_CODE (op
) == QUEUED
)
3508 new = simplify_subreg (outermode
, op
, innermode
, byte
);
3512 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
3515 return gen_rtx_SUBREG (outermode
, op
, byte
);
3517 /* Simplify X, an rtx expression.
3519 Return the simplified expression or NULL if no simplifications
3522 This is the preferred entry point into the simplification routines;
3523 however, we still allow passes to call the more specific routines.
3525 Right now GCC has three (yes, three) major bodies of RTL simplification
3526 code that need to be unified.
3528 1. fold_rtx in cse.c. This code uses various CSE specific
3529 information to aid in RTL simplification.
3531 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3532 it uses combine specific information to aid in RTL
3535 3. The routines in this file.
3538 Long term we want to only have one body of simplification code; to
3539 get to that state I recommend the following steps:
3541 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3542 which are not pass dependent state into these routines.
3544 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3545 use this routine whenever possible.
3547 3. Allow for pass dependent state to be provided to these
3548 routines and add simplifications based on the pass dependent
3549 state. Remove code from cse.c & combine.c that becomes
3552 It will take time, but ultimately the compiler will be easier to
3553 maintain and improve. It's totally silly that when we add a
3554 simplification that it needs to be added to 4 places (3 for RTL
3555 simplification and 1 for tree simplification. */
3558 simplify_rtx (rtx x
)
3560 enum rtx_code code
= GET_CODE (x
);
3561 enum machine_mode mode
= GET_MODE (x
);
3564 switch (GET_RTX_CLASS (code
))
3567 return simplify_unary_operation (code
, mode
,
3568 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3570 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3571 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
3573 /* Fall through.... */
3576 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3580 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3581 XEXP (x
, 0), XEXP (x
, 1),
3585 if (VECTOR_MODE_P (mode
))
3587 temp
= simplify_relational_operation (code
,
3588 ((GET_MODE (XEXP (x
, 0))
3590 ? GET_MODE (XEXP (x
, 0))
3591 : GET_MODE (XEXP (x
, 1))),
3592 XEXP (x
, 0), XEXP (x
, 1));
3593 #ifdef FLOAT_STORE_FLAG_VALUE
3594 if (temp
!= 0 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
3596 if (temp
== const0_rtx
)
3597 temp
= CONST0_RTX (mode
);
3599 temp
= CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode
),
3607 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
3608 GET_MODE (SUBREG_REG (x
)),
3610 if (code
== CONSTANT_P_RTX
)
3612 if (CONSTANT_P (XEXP (x
, 0)))
3620 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3621 if (GET_CODE (XEXP (x
, 0)) == HIGH
3622 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))