1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static bool associative_constant_p (rtx
);
59 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
65 neg_const_int (enum machine_mode mode
, rtx i
)
67 return gen_int_mode (- INTVAL (i
), mode
);
71 /* Make a binary operation by properly ordering the operands and
72 seeing if the expression folds. */
75 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
80 /* Put complex operands first and constants second if commutative. */
81 if (GET_RTX_CLASS (code
) == 'c'
82 && swap_commutative_operands_p (op0
, op1
))
83 tem
= op0
, op0
= op1
, op1
= tem
;
85 /* If this simplifies, do it. */
86 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
90 /* Handle addition and subtraction specially. Otherwise, just form
93 if (code
== PLUS
|| code
== MINUS
)
95 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
100 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
103 /* If X is a MEM referencing the constant pool, return the real value.
104 Otherwise return X. */
106 avoid_constant_pool_reference (rtx x
)
109 enum machine_mode cmode
;
111 switch (GET_CODE (x
))
117 /* Handle float extensions of constant pool references. */
119 c
= avoid_constant_pool_reference (tmp
);
120 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
124 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
125 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
135 /* Call target hook to avoid the effects of -fpic etc.... */
136 addr
= (*targetm
.delegitimize_address
) (addr
);
138 if (GET_CODE (addr
) == LO_SUM
)
139 addr
= XEXP (addr
, 1);
141 if (GET_CODE (addr
) != SYMBOL_REF
142 || ! CONSTANT_POOL_ADDRESS_P (addr
))
145 c
= get_pool_constant (addr
);
146 cmode
= get_pool_mode (addr
);
148 /* If we're accessing the constant in a different mode than it was
149 originally stored, attempt to fix that up via subreg simplifications.
150 If that fails we have no choice but to return the original memory. */
151 if (cmode
!= GET_MODE (x
))
153 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
160 /* Make a unary operation by first seeing if it folds and otherwise making
161 the specified operation. */
164 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
165 enum machine_mode op_mode
)
169 /* If this simplifies, use it. */
170 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
173 return gen_rtx_fmt_e (code
, mode
, op
);
176 /* Likewise for ternary operations. */
179 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
180 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
184 /* If this simplifies, use it. */
185 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
189 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
192 /* Likewise, for relational operations.
193 CMP_MODE specifies mode comparison is done in.
197 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
198 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
202 if (cmp_mode
== VOIDmode
)
203 cmp_mode
= GET_MODE (op0
);
204 if (cmp_mode
== VOIDmode
)
205 cmp_mode
= GET_MODE (op1
);
207 if (cmp_mode
!= VOIDmode
)
209 tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
);
213 #ifdef FLOAT_STORE_FLAG_VALUE
214 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
217 if (tem
== const0_rtx
)
218 return CONST0_RTX (mode
);
219 if (tem
!= const_true_rtx
)
221 val
= FLOAT_STORE_FLAG_VALUE (mode
);
222 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
229 /* For the following tests, ensure const0_rtx is op1. */
230 if (swap_commutative_operands_p (op0
, op1
)
231 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
232 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
234 /* If op0 is a compare, extract the comparison arguments from it. */
235 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
236 return simplify_gen_relational (code
, mode
, VOIDmode
,
237 XEXP (op0
, 0), XEXP (op0
, 1));
239 /* If op0 is a comparison, extract the comparison arguments form it. */
240 if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && op1
== const0_rtx
)
244 if (GET_MODE (op0
) == mode
)
246 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
247 XEXP (op0
, 0), XEXP (op0
, 1));
251 enum rtx_code
new = reversed_comparison_code (op0
, NULL_RTX
);
253 return simplify_gen_relational (new, mode
, VOIDmode
,
254 XEXP (op0
, 0), XEXP (op0
, 1));
258 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
261 /* Replace all occurrences of OLD in X with NEW and try to simplify the
262 resulting RTX. Return a new RTX which is as simplified as possible. */
265 simplify_replace_rtx (rtx x
, rtx old
, rtx
new)
267 enum rtx_code code
= GET_CODE (x
);
268 enum machine_mode mode
= GET_MODE (x
);
269 enum machine_mode op_mode
;
272 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
273 to build a new expression substituting recursively. If we can't do
274 anything, return our input. */
279 switch (GET_RTX_CLASS (code
))
283 op_mode
= GET_MODE (op0
);
284 op0
= simplify_replace_rtx (op0
, old
, new);
285 if (op0
== XEXP (x
, 0))
287 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
291 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
292 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
293 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
295 return simplify_gen_binary (code
, mode
, op0
, op1
);
300 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
301 op0
= simplify_replace_rtx (op0
, old
, new);
302 op1
= simplify_replace_rtx (op1
, old
, new);
303 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
305 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
310 op_mode
= GET_MODE (op0
);
311 op0
= simplify_replace_rtx (op0
, old
, new);
312 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
313 op2
= simplify_replace_rtx (XEXP (x
, 2), old
, new);
314 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
316 if (op_mode
== VOIDmode
)
317 op_mode
= GET_MODE (op0
);
318 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
321 /* The only case we try to handle is a SUBREG. */
324 op0
= simplify_replace_rtx (SUBREG_REG (x
), old
, new);
325 if (op0
== SUBREG_REG (x
))
327 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
328 GET_MODE (SUBREG_REG (x
)),
330 return op0
? op0
: x
;
337 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
338 if (op0
== XEXP (x
, 0))
340 return replace_equiv_address_nv (x
, op0
);
342 else if (code
== LO_SUM
)
344 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
345 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
347 /* (lo_sum (high x) x) -> x */
348 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
351 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
353 return gen_rtx_LO_SUM (mode
, op0
, op1
);
355 else if (code
== REG
)
357 if (REG_P (old
) && REGNO (x
) == REGNO (old
))
368 /* Try to simplify a unary operation CODE whose output mode is to be
369 MODE with input operand OP whose mode was originally OP_MODE.
370 Return zero if no simplification can be made. */
372 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
373 rtx op
, enum machine_mode op_mode
)
375 unsigned int width
= GET_MODE_BITSIZE (mode
);
376 rtx trueop
= avoid_constant_pool_reference (op
);
378 if (code
== VEC_DUPLICATE
)
380 if (!VECTOR_MODE_P (mode
))
382 if (GET_MODE (trueop
) != VOIDmode
383 && !VECTOR_MODE_P (GET_MODE (trueop
))
384 && GET_MODE_INNER (mode
) != GET_MODE (trueop
))
386 if (GET_MODE (trueop
) != VOIDmode
387 && VECTOR_MODE_P (GET_MODE (trueop
))
388 && GET_MODE_INNER (mode
) != GET_MODE_INNER (GET_MODE (trueop
)))
390 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
391 || GET_CODE (trueop
) == CONST_VECTOR
)
393 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
394 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
395 rtvec v
= rtvec_alloc (n_elts
);
398 if (GET_CODE (trueop
) != CONST_VECTOR
)
399 for (i
= 0; i
< n_elts
; i
++)
400 RTVEC_ELT (v
, i
) = trueop
;
403 enum machine_mode inmode
= GET_MODE (trueop
);
404 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
405 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
407 if (in_n_elts
>= n_elts
|| n_elts
% in_n_elts
)
409 for (i
= 0; i
< n_elts
; i
++)
410 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
412 return gen_rtx_CONST_VECTOR (mode
, v
);
415 else if (GET_CODE (op
) == CONST
)
416 return simplify_unary_operation (code
, mode
, XEXP (op
, 0), op_mode
);
418 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
420 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
421 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
422 enum machine_mode opmode
= GET_MODE (trueop
);
423 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
424 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
425 rtvec v
= rtvec_alloc (n_elts
);
428 if (op_n_elts
!= n_elts
)
431 for (i
= 0; i
< n_elts
; i
++)
433 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
434 CONST_VECTOR_ELT (trueop
, i
),
435 GET_MODE_INNER (opmode
));
438 RTVEC_ELT (v
, i
) = x
;
440 return gen_rtx_CONST_VECTOR (mode
, v
);
443 /* The order of these tests is critical so that, for example, we don't
444 check the wrong mode (input vs. output) for a conversion operation,
445 such as FIX. At some point, this should be simplified. */
447 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
448 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
450 HOST_WIDE_INT hv
, lv
;
453 if (GET_CODE (trueop
) == CONST_INT
)
454 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
456 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
458 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
459 d
= real_value_truncate (mode
, d
);
460 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
462 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
463 && (GET_CODE (trueop
) == CONST_DOUBLE
464 || GET_CODE (trueop
) == CONST_INT
))
466 HOST_WIDE_INT hv
, lv
;
469 if (GET_CODE (trueop
) == CONST_INT
)
470 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
472 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
474 if (op_mode
== VOIDmode
)
476 /* We don't know how to interpret negative-looking numbers in
477 this case, so don't try to fold those. */
481 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
484 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
486 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
487 d
= real_value_truncate (mode
, d
);
488 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
491 if (GET_CODE (trueop
) == CONST_INT
492 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
494 HOST_WIDE_INT arg0
= INTVAL (trueop
);
508 val
= (arg0
>= 0 ? arg0
: - arg0
);
512 /* Don't use ffs here. Instead, get low order bit and then its
513 number. If arg0 is zero, this will return 0, as desired. */
514 arg0
&= GET_MODE_MASK (mode
);
515 val
= exact_log2 (arg0
& (- arg0
)) + 1;
519 arg0
&= GET_MODE_MASK (mode
);
520 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
523 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
527 arg0
&= GET_MODE_MASK (mode
);
530 /* Even if the value at zero is undefined, we have to come
531 up with some replacement. Seems good enough. */
532 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
533 val
= GET_MODE_BITSIZE (mode
);
536 val
= exact_log2 (arg0
& -arg0
);
540 arg0
&= GET_MODE_MASK (mode
);
543 val
++, arg0
&= arg0
- 1;
547 arg0
&= GET_MODE_MASK (mode
);
550 val
++, arg0
&= arg0
- 1;
559 /* When zero-extending a CONST_INT, we need to know its
561 if (op_mode
== VOIDmode
)
563 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
565 /* If we were really extending the mode,
566 we would have to distinguish between zero-extension
567 and sign-extension. */
568 if (width
!= GET_MODE_BITSIZE (op_mode
))
572 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
573 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
579 if (op_mode
== VOIDmode
)
581 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
583 /* If we were really extending the mode,
584 we would have to distinguish between zero-extension
585 and sign-extension. */
586 if (width
!= GET_MODE_BITSIZE (op_mode
))
590 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
593 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
595 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
596 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
613 val
= trunc_int_for_mode (val
, mode
);
615 return GEN_INT (val
);
618 /* We can do some operations on integer CONST_DOUBLEs. Also allow
619 for a DImode operation on a CONST_INT. */
620 else if (GET_MODE (trueop
) == VOIDmode
621 && width
<= HOST_BITS_PER_WIDE_INT
* 2
622 && (GET_CODE (trueop
) == CONST_DOUBLE
623 || GET_CODE (trueop
) == CONST_INT
))
625 unsigned HOST_WIDE_INT l1
, lv
;
626 HOST_WIDE_INT h1
, hv
;
628 if (GET_CODE (trueop
) == CONST_DOUBLE
)
629 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
631 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
641 neg_double (l1
, h1
, &lv
, &hv
);
646 neg_double (l1
, h1
, &lv
, &hv
);
658 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
661 lv
= exact_log2 (l1
& -l1
) + 1;
667 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
668 - HOST_BITS_PER_WIDE_INT
;
670 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
671 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
672 lv
= GET_MODE_BITSIZE (mode
);
678 lv
= exact_log2 (l1
& -l1
);
680 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
681 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
682 lv
= GET_MODE_BITSIZE (mode
);
705 /* This is just a change-of-mode, so do nothing. */
710 if (op_mode
== VOIDmode
)
713 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
717 lv
= l1
& GET_MODE_MASK (op_mode
);
721 if (op_mode
== VOIDmode
722 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
726 lv
= l1
& GET_MODE_MASK (op_mode
);
727 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
728 && (lv
& ((HOST_WIDE_INT
) 1
729 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
730 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
732 hv
= HWI_SIGN_EXTEND (lv
);
743 return immed_double_const (lv
, hv
, mode
);
746 else if (GET_CODE (trueop
) == CONST_DOUBLE
747 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
749 REAL_VALUE_TYPE d
, t
;
750 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
755 if (HONOR_SNANS (mode
) && real_isnan (&d
))
757 real_sqrt (&t
, mode
, &d
);
761 d
= REAL_VALUE_ABS (d
);
764 d
= REAL_VALUE_NEGATE (d
);
767 d
= real_value_truncate (mode
, d
);
770 /* All this does is change the mode. */
773 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
779 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
782 else if (GET_CODE (trueop
) == CONST_DOUBLE
783 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
784 && GET_MODE_CLASS (mode
) == MODE_INT
785 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
787 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
788 operators are intentionally left unspecified (to ease implementation
789 by target backends), for consistency, this routine implements the
790 same semantics for constant folding as used by the middle-end. */
792 HOST_WIDE_INT xh
, xl
, th
, tl
;
793 REAL_VALUE_TYPE x
, t
;
794 REAL_VALUE_FROM_CONST_DOUBLE (x
, trueop
);
798 if (REAL_VALUE_ISNAN (x
))
801 /* Test against the signed upper bound. */
802 if (width
> HOST_BITS_PER_WIDE_INT
)
804 th
= ((unsigned HOST_WIDE_INT
) 1
805 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
811 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
813 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
814 if (REAL_VALUES_LESS (t
, x
))
821 /* Test against the signed lower bound. */
822 if (width
> HOST_BITS_PER_WIDE_INT
)
824 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
830 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
832 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
833 if (REAL_VALUES_LESS (x
, t
))
839 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
843 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
846 /* Test against the unsigned upper bound. */
847 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
852 else if (width
>= HOST_BITS_PER_WIDE_INT
)
854 th
= ((unsigned HOST_WIDE_INT
) 1
855 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
861 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
863 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
864 if (REAL_VALUES_LESS (t
, x
))
871 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
877 return immed_double_const (xl
, xh
, mode
);
880 /* This was formerly used only for non-IEEE float.
881 eggert@twinsun.com says it is safe for IEEE also. */
884 enum rtx_code reversed
;
887 /* There are some simplifications we can do even if the operands
892 /* (not (not X)) == X. */
893 if (GET_CODE (op
) == NOT
)
896 /* (not (eq X Y)) == (ne X Y), etc. */
897 if (GET_RTX_CLASS (GET_CODE (op
)) == '<'
898 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
899 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
901 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
902 XEXP (op
, 0), XEXP (op
, 1));
904 /* (not (plus X -1)) can become (neg X). */
905 if (GET_CODE (op
) == PLUS
906 && XEXP (op
, 1) == constm1_rtx
)
907 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
909 /* Similarly, (not (neg X)) is (plus X -1). */
910 if (GET_CODE (op
) == NEG
)
911 return plus_constant (XEXP (op
, 0), -1);
913 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
914 if (GET_CODE (op
) == XOR
915 && GET_CODE (XEXP (op
, 1)) == CONST_INT
916 && (temp
= simplify_unary_operation (NOT
, mode
,
919 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
922 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
923 operands other than 1, but that is not valid. We could do a
924 similar simplification for (not (lshiftrt C X)) where C is
925 just the sign bit, but this doesn't seem common enough to
927 if (GET_CODE (op
) == ASHIFT
928 && XEXP (op
, 0) == const1_rtx
)
930 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
931 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
934 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
935 by reversing the comparison code if valid. */
936 if (STORE_FLAG_VALUE
== -1
937 && GET_RTX_CLASS (GET_CODE (op
)) == '<'
938 && (reversed
= reversed_comparison_code (op
, NULL_RTX
))
940 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
941 XEXP (op
, 0), XEXP (op
, 1));
943 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
944 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
945 so we can perform the above simplification. */
947 if (STORE_FLAG_VALUE
== -1
948 && GET_CODE (op
) == ASHIFTRT
949 && GET_CODE (XEXP (op
, 1)) == CONST_INT
950 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
951 return simplify_gen_relational (GE
, mode
, VOIDmode
,
952 XEXP (op
, 0), const0_rtx
);
957 /* (neg (neg X)) == X. */
958 if (GET_CODE (op
) == NEG
)
961 /* (neg (plus X 1)) can become (not X). */
962 if (GET_CODE (op
) == PLUS
963 && XEXP (op
, 1) == const1_rtx
)
964 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
966 /* Similarly, (neg (not X)) is (plus X 1). */
967 if (GET_CODE (op
) == NOT
)
968 return plus_constant (XEXP (op
, 0), 1);
970 /* (neg (minus X Y)) can become (minus Y X). This transformation
971 isn't safe for modes with signed zeros, since if X and Y are
972 both +0, (minus Y X) is the same as (minus X Y). If the
973 rounding mode is towards +infinity (or -infinity) then the two
974 expressions will be rounded differently. */
975 if (GET_CODE (op
) == MINUS
976 && !HONOR_SIGNED_ZEROS (mode
)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
978 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1),
981 if (GET_CODE (op
) == PLUS
982 && !HONOR_SIGNED_ZEROS (mode
)
983 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
985 /* (neg (plus A C)) is simplified to (minus -C A). */
986 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
987 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
989 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1),
992 return simplify_gen_binary (MINUS
, mode
, temp
,
996 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
997 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
998 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1001 /* (neg (mult A B)) becomes (mult (neg A) B).
1002 This works even for floating-point values. */
1003 if (GET_CODE (op
) == MULT
1004 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1006 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1007 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
1010 /* NEG commutes with ASHIFT since it is multiplication. Only do
1011 this if we can then eliminate the NEG (e.g., if the operand
1013 if (GET_CODE (op
) == ASHIFT
)
1015 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0),
1018 return simplify_gen_binary (ASHIFT
, mode
, temp
,
1025 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1026 becomes just the MINUS if its mode is MODE. This allows
1027 folding switch statements on machines using casesi (such as
1029 if (GET_CODE (op
) == TRUNCATE
1030 && GET_MODE (XEXP (op
, 0)) == mode
1031 && GET_CODE (XEXP (op
, 0)) == MINUS
1032 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1033 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1034 return XEXP (op
, 0);
1036 /* Check for a sign extension of a subreg of a promoted
1037 variable, where the promotion is sign-extended, and the
1038 target mode is the same as the variable's promotion. */
1039 if (GET_CODE (op
) == SUBREG
1040 && SUBREG_PROMOTED_VAR_P (op
)
1041 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1042 && GET_MODE (XEXP (op
, 0)) == mode
)
1043 return XEXP (op
, 0);
1045 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1046 if (! POINTERS_EXTEND_UNSIGNED
1047 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1049 || (GET_CODE (op
) == SUBREG
1050 && GET_CODE (SUBREG_REG (op
)) == REG
1051 && REG_POINTER (SUBREG_REG (op
))
1052 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1053 return convert_memory_address (Pmode
, op
);
1058 /* Check for a zero extension of a subreg of a promoted
1059 variable, where the promotion is zero-extended, and the
1060 target mode is the same as the variable's promotion. */
1061 if (GET_CODE (op
) == SUBREG
1062 && SUBREG_PROMOTED_VAR_P (op
)
1063 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1064 && GET_MODE (XEXP (op
, 0)) == mode
)
1065 return XEXP (op
, 0);
1067 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1068 if (POINTERS_EXTEND_UNSIGNED
> 0
1069 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1071 || (GET_CODE (op
) == SUBREG
1072 && GET_CODE (SUBREG_REG (op
)) == REG
1073 && REG_POINTER (SUBREG_REG (op
))
1074 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1075 return convert_memory_address (Pmode
, op
);
1087 /* Subroutine of simplify_associative_operation. Return true if rtx OP
1088 is a suitable integer or floating point immediate constant. */
1090 associative_constant_p (rtx op
)
1092 if (GET_CODE (op
) == CONST_INT
1093 || GET_CODE (op
) == CONST_DOUBLE
)
1095 op
= avoid_constant_pool_reference (op
);
1096 return GET_CODE (op
) == CONST_INT
1097 || GET_CODE (op
) == CONST_DOUBLE
;
1100 /* Subroutine of simplify_binary_operation to simplify an associative
1101 binary operation CODE with result mode MODE, operating on OP0 and OP1.
1102 Return 0 if no simplification is possible. */
1104 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1109 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
1110 if (GET_CODE (op0
) == code
1111 && associative_constant_p (op1
)
1112 && associative_constant_p (XEXP (op0
, 1)))
1114 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1117 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1120 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
1121 if (GET_CODE (op0
) == code
1122 && GET_CODE (op1
) == code
1123 && associative_constant_p (XEXP (op0
, 1))
1124 && associative_constant_p (XEXP (op1
, 1)))
1126 rtx c
= simplify_binary_operation (code
, mode
,
1127 XEXP (op0
, 1), XEXP (op1
, 1));
1130 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1131 return simplify_gen_binary (code
, mode
, tem
, c
);
1134 /* Canonicalize (x op c) op y as (x op y) op c. */
1135 if (GET_CODE (op0
) == code
1136 && associative_constant_p (XEXP (op0
, 1)))
1138 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1139 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1142 /* Canonicalize x op (y op c) as (x op y) op c. */
1143 if (GET_CODE (op1
) == code
1144 && associative_constant_p (XEXP (op1
, 1)))
1146 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1147 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1153 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1154 and OP1. Return 0 if no simplification is possible.
1156 Don't use this for relational operations such as EQ or LT.
1157 Use simplify_relational_operation instead. */
1159 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1162 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
1164 unsigned int width
= GET_MODE_BITSIZE (mode
);
1166 rtx trueop0
= avoid_constant_pool_reference (op0
);
1167 rtx trueop1
= avoid_constant_pool_reference (op1
);
1169 /* Relational operations don't work here. We must know the mode
1170 of the operands in order to do the comparison correctly.
1171 Assuming a full word can give incorrect results.
1172 Consider comparing 128 with -128 in QImode. */
1174 if (GET_RTX_CLASS (code
) == '<')
1177 /* Make sure the constant is second. */
1178 if (GET_RTX_CLASS (code
) == 'c'
1179 && swap_commutative_operands_p (trueop0
, trueop1
))
1181 tem
= op0
, op0
= op1
, op1
= tem
;
1182 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
1185 if (VECTOR_MODE_P (mode
)
1186 && GET_CODE (trueop0
) == CONST_VECTOR
1187 && GET_CODE (trueop1
) == CONST_VECTOR
)
1189 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1190 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1191 enum machine_mode op0mode
= GET_MODE (trueop0
);
1192 int op0_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op0mode
));
1193 unsigned op0_n_elts
= (GET_MODE_SIZE (op0mode
) / op0_elt_size
);
1194 enum machine_mode op1mode
= GET_MODE (trueop1
);
1195 int op1_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op1mode
));
1196 unsigned op1_n_elts
= (GET_MODE_SIZE (op1mode
) / op1_elt_size
);
1197 rtvec v
= rtvec_alloc (n_elts
);
1200 if (op0_n_elts
!= n_elts
|| op1_n_elts
!= n_elts
)
1203 for (i
= 0; i
< n_elts
; i
++)
1205 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
1206 CONST_VECTOR_ELT (trueop0
, i
),
1207 CONST_VECTOR_ELT (trueop1
, i
));
1210 RTVEC_ELT (v
, i
) = x
;
1213 return gen_rtx_CONST_VECTOR (mode
, v
);
1216 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1217 && GET_CODE (trueop0
) == CONST_DOUBLE
1218 && GET_CODE (trueop1
) == CONST_DOUBLE
1219 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
1221 REAL_VALUE_TYPE f0
, f1
, value
;
1223 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
1224 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
1225 f0
= real_value_truncate (mode
, f0
);
1226 f1
= real_value_truncate (mode
, f1
);
1228 if (HONOR_SNANS (mode
)
1229 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
1233 && REAL_VALUES_EQUAL (f1
, dconst0
)
1234 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
1237 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
1239 value
= real_value_truncate (mode
, value
);
1240 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
1243 /* We can fold some multi-word operations. */
1244 if (GET_MODE_CLASS (mode
) == MODE_INT
1245 && width
== HOST_BITS_PER_WIDE_INT
* 2
1246 && (GET_CODE (trueop0
) == CONST_DOUBLE
1247 || GET_CODE (trueop0
) == CONST_INT
)
1248 && (GET_CODE (trueop1
) == CONST_DOUBLE
1249 || GET_CODE (trueop1
) == CONST_INT
))
1251 unsigned HOST_WIDE_INT l1
, l2
, lv
;
1252 HOST_WIDE_INT h1
, h2
, hv
;
1254 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1255 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
1257 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
1259 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1260 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
1262 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
1267 /* A - B == A + (-B). */
1268 neg_double (l2
, h2
, &lv
, &hv
);
1271 /* Fall through.... */
1274 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1278 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1281 case DIV
: case MOD
: case UDIV
: case UMOD
:
1282 /* We'd need to include tree.h to do this and it doesn't seem worth
1287 lv
= l1
& l2
, hv
= h1
& h2
;
1291 lv
= l1
| l2
, hv
= h1
| h2
;
1295 lv
= l1
^ l2
, hv
= h1
^ h2
;
1301 && ((unsigned HOST_WIDE_INT
) l1
1302 < (unsigned HOST_WIDE_INT
) l2
)))
1311 && ((unsigned HOST_WIDE_INT
) l1
1312 > (unsigned HOST_WIDE_INT
) l2
)))
1319 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1321 && ((unsigned HOST_WIDE_INT
) l1
1322 < (unsigned HOST_WIDE_INT
) l2
)))
1329 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1331 && ((unsigned HOST_WIDE_INT
) l1
1332 > (unsigned HOST_WIDE_INT
) l2
)))
1338 case LSHIFTRT
: case ASHIFTRT
:
1340 case ROTATE
: case ROTATERT
:
1341 #ifdef SHIFT_COUNT_TRUNCATED
1342 if (SHIFT_COUNT_TRUNCATED
)
1343 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1346 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1349 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1350 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1352 else if (code
== ASHIFT
)
1353 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1354 else if (code
== ROTATE
)
1355 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1356 else /* code == ROTATERT */
1357 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1364 return immed_double_const (lv
, hv
, mode
);
1367 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1368 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1370 /* Even if we can't compute a constant result,
1371 there are some cases worth simplifying. */
1376 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1377 when x is NaN, infinite, or finite and nonzero. They aren't
1378 when x is -0 and the rounding mode is not towards -infinity,
1379 since (-0) + 0 is then 0. */
1380 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1383 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1384 transformations are safe even for IEEE. */
1385 if (GET_CODE (op0
) == NEG
)
1386 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1387 else if (GET_CODE (op1
) == NEG
)
1388 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1390 /* (~a) + 1 -> -a */
1391 if (INTEGRAL_MODE_P (mode
)
1392 && GET_CODE (op0
) == NOT
1393 && trueop1
== const1_rtx
)
1394 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1396 /* Handle both-operands-constant cases. We can only add
1397 CONST_INTs to constants since the sum of relocatable symbols
1398 can't be handled by most assemblers. Don't add CONST_INT
1399 to CONST_INT since overflow won't be computed properly if wider
1400 than HOST_BITS_PER_WIDE_INT. */
1402 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1403 && GET_CODE (op1
) == CONST_INT
)
1404 return plus_constant (op0
, INTVAL (op1
));
1405 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1406 && GET_CODE (op0
) == CONST_INT
)
1407 return plus_constant (op1
, INTVAL (op0
));
1409 /* See if this is something like X * C - X or vice versa or
1410 if the multiplication is written as a shift. If so, we can
1411 distribute and make a new multiply, shift, or maybe just
1412 have X (if C is 2 in the example above). But don't make
1413 real multiply if we didn't have one before. */
1415 if (! FLOAT_MODE_P (mode
))
1417 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1418 rtx lhs
= op0
, rhs
= op1
;
1421 if (GET_CODE (lhs
) == NEG
)
1422 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1423 else if (GET_CODE (lhs
) == MULT
1424 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1426 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1429 else if (GET_CODE (lhs
) == ASHIFT
1430 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1431 && INTVAL (XEXP (lhs
, 1)) >= 0
1432 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1434 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1435 lhs
= XEXP (lhs
, 0);
1438 if (GET_CODE (rhs
) == NEG
)
1439 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1440 else if (GET_CODE (rhs
) == MULT
1441 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1443 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1446 else if (GET_CODE (rhs
) == ASHIFT
1447 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1448 && INTVAL (XEXP (rhs
, 1)) >= 0
1449 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1451 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1452 rhs
= XEXP (rhs
, 0);
1455 if (rtx_equal_p (lhs
, rhs
))
1457 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1458 GEN_INT (coeff0
+ coeff1
));
1459 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1463 /* If one of the operands is a PLUS or a MINUS, see if we can
1464 simplify this by the associative law.
1465 Don't use the associative law for floating point.
1466 The inaccuracy makes it nonassociative,
1467 and subtle programs can break if operations are associated. */
1469 if (INTEGRAL_MODE_P (mode
)
1470 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1471 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1472 || (GET_CODE (op0
) == CONST
1473 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1474 || (GET_CODE (op1
) == CONST
1475 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1476 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1479 /* Reassociate floating point addition only when the user
1480 specifies unsafe math optimizations. */
1481 if (FLOAT_MODE_P (mode
)
1482 && flag_unsafe_math_optimizations
)
1484 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1492 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1493 using cc0, in which case we want to leave it as a COMPARE
1494 so we can distinguish it from a register-register-copy.
1496 In IEEE floating point, x-0 is not the same as x. */
1498 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1499 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1500 && trueop1
== CONST0_RTX (mode
))
1504 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1505 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1506 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1507 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1509 rtx xop00
= XEXP (op0
, 0);
1510 rtx xop10
= XEXP (op1
, 0);
1513 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1515 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1516 && GET_MODE (xop00
) == GET_MODE (xop10
)
1517 && REGNO (xop00
) == REGNO (xop10
)
1518 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1519 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1526 /* We can't assume x-x is 0 even with non-IEEE floating point,
1527 but since it is zero except in very strange circumstances, we
1528 will treat it as zero with -funsafe-math-optimizations. */
1529 if (rtx_equal_p (trueop0
, trueop1
)
1530 && ! side_effects_p (op0
)
1531 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1532 return CONST0_RTX (mode
);
1534 /* Change subtraction from zero into negation. (0 - x) is the
1535 same as -x when x is NaN, infinite, or finite and nonzero.
1536 But if the mode has signed zeros, and does not round towards
1537 -infinity, then 0 - 0 is 0, not -0. */
1538 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1539 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1541 /* (-1 - a) is ~a. */
1542 if (trueop0
== constm1_rtx
)
1543 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1545 /* Subtracting 0 has no effect unless the mode has signed zeros
1546 and supports rounding towards -infinity. In such a case,
1548 if (!(HONOR_SIGNED_ZEROS (mode
)
1549 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1550 && trueop1
== CONST0_RTX (mode
))
1553 /* See if this is something like X * C - X or vice versa or
1554 if the multiplication is written as a shift. If so, we can
1555 distribute and make a new multiply, shift, or maybe just
1556 have X (if C is 2 in the example above). But don't make
1557 real multiply if we didn't have one before. */
1559 if (! FLOAT_MODE_P (mode
))
1561 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1562 rtx lhs
= op0
, rhs
= op1
;
1565 if (GET_CODE (lhs
) == NEG
)
1566 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1567 else if (GET_CODE (lhs
) == MULT
1568 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1570 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1573 else if (GET_CODE (lhs
) == ASHIFT
1574 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1575 && INTVAL (XEXP (lhs
, 1)) >= 0
1576 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1578 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1579 lhs
= XEXP (lhs
, 0);
1582 if (GET_CODE (rhs
) == NEG
)
1583 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1584 else if (GET_CODE (rhs
) == MULT
1585 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1587 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1590 else if (GET_CODE (rhs
) == ASHIFT
1591 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1592 && INTVAL (XEXP (rhs
, 1)) >= 0
1593 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1595 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1596 rhs
= XEXP (rhs
, 0);
1599 if (rtx_equal_p (lhs
, rhs
))
1601 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1602 GEN_INT (coeff0
- coeff1
));
1603 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1607 /* (a - (-b)) -> (a + b). True even for IEEE. */
1608 if (GET_CODE (op1
) == NEG
)
1609 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1611 /* (-x - c) may be simplified as (-c - x). */
1612 if (GET_CODE (op0
) == NEG
1613 && (GET_CODE (op1
) == CONST_INT
1614 || GET_CODE (op1
) == CONST_DOUBLE
))
1616 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1618 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1621 /* If one of the operands is a PLUS or a MINUS, see if we can
1622 simplify this by the associative law.
1623 Don't use the associative law for floating point.
1624 The inaccuracy makes it nonassociative,
1625 and subtle programs can break if operations are associated. */
1627 if (INTEGRAL_MODE_P (mode
)
1628 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1629 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1630 || (GET_CODE (op0
) == CONST
1631 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1632 || (GET_CODE (op1
) == CONST
1633 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1634 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1637 /* Don't let a relocatable value get a negative coeff. */
1638 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1639 return simplify_gen_binary (PLUS
, mode
,
1641 neg_const_int (mode
, op1
));
1643 /* (x - (x & y)) -> (x & ~y) */
1644 if (GET_CODE (op1
) == AND
)
1646 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1648 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1649 GET_MODE (XEXP (op1
, 1)));
1650 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1652 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1654 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1655 GET_MODE (XEXP (op1
, 0)));
1656 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1662 if (trueop1
== constm1_rtx
)
1663 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1665 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1666 x is NaN, since x * 0 is then also NaN. Nor is it valid
1667 when the mode has signed zeros, since multiplying a negative
1668 number by 0 will give -0, not 0. */
1669 if (!HONOR_NANS (mode
)
1670 && !HONOR_SIGNED_ZEROS (mode
)
1671 && trueop1
== CONST0_RTX (mode
)
1672 && ! side_effects_p (op0
))
1675 /* In IEEE floating point, x*1 is not equivalent to x for
1677 if (!HONOR_SNANS (mode
)
1678 && trueop1
== CONST1_RTX (mode
))
1681 /* Convert multiply by constant power of two into shift unless
1682 we are still generating RTL. This test is a kludge. */
1683 if (GET_CODE (trueop1
) == CONST_INT
1684 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1685 /* If the mode is larger than the host word size, and the
1686 uppermost bit is set, then this isn't a power of two due
1687 to implicit sign extension. */
1688 && (width
<= HOST_BITS_PER_WIDE_INT
1689 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1690 && ! rtx_equal_function_value_matters
)
1691 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1693 /* x*2 is x+x and x*(-1) is -x */
1694 if (GET_CODE (trueop1
) == CONST_DOUBLE
1695 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1696 && GET_MODE (op0
) == mode
)
1699 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1701 if (REAL_VALUES_EQUAL (d
, dconst2
))
1702 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1704 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1705 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1708 /* Reassociate multiplication, but for floating point MULTs
1709 only when the user specifies unsafe math optimizations. */
1710 if (! FLOAT_MODE_P (mode
)
1711 || flag_unsafe_math_optimizations
)
1713 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1720 if (trueop1
== const0_rtx
)
1722 if (GET_CODE (trueop1
) == CONST_INT
1723 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1724 == GET_MODE_MASK (mode
)))
1726 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1728 /* A | (~A) -> -1 */
1729 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1730 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1731 && ! side_effects_p (op0
)
1732 && GET_MODE_CLASS (mode
) != MODE_CC
)
1734 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1740 if (trueop1
== const0_rtx
)
1742 if (GET_CODE (trueop1
) == CONST_INT
1743 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1744 == GET_MODE_MASK (mode
)))
1745 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1746 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1747 && GET_MODE_CLASS (mode
) != MODE_CC
)
1749 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1755 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1757 if (GET_CODE (trueop1
) == CONST_INT
1758 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1759 == GET_MODE_MASK (mode
)))
1761 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1762 && GET_MODE_CLASS (mode
) != MODE_CC
)
1765 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1766 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1767 && ! side_effects_p (op0
)
1768 && GET_MODE_CLASS (mode
) != MODE_CC
)
1770 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1776 /* Convert divide by power of two into shift (divide by 1 handled
1778 if (GET_CODE (trueop1
) == CONST_INT
1779 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1780 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (arg1
));
1782 /* Fall through.... */
1785 if (trueop1
== CONST1_RTX (mode
))
1787 /* On some platforms DIV uses narrower mode than its
1789 rtx x
= gen_lowpart_common (mode
, op0
);
1792 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1793 return gen_lowpart_SUBREG (mode
, op0
);
1798 /* Maybe change 0 / x to 0. This transformation isn't safe for
1799 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1800 Nor is it safe for modes with signed zeros, since dividing
1801 0 by a negative number gives -0, not 0. */
1802 if (!HONOR_NANS (mode
)
1803 && !HONOR_SIGNED_ZEROS (mode
)
1804 && trueop0
== CONST0_RTX (mode
)
1805 && ! side_effects_p (op1
))
1808 /* Change division by a constant into multiplication. Only do
1809 this with -funsafe-math-optimizations. */
1810 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1811 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1812 && trueop1
!= CONST0_RTX (mode
)
1813 && flag_unsafe_math_optimizations
)
1816 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1818 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1820 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1821 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1822 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
1828 /* Handle modulus by power of two (mod with 1 handled below). */
1829 if (GET_CODE (trueop1
) == CONST_INT
1830 && exact_log2 (INTVAL (trueop1
)) > 0)
1831 return simplify_gen_binary (AND
, mode
, op0
,
1832 GEN_INT (INTVAL (op1
) - 1));
1834 /* Fall through.... */
1837 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1838 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1845 /* Rotating ~0 always results in ~0. */
1846 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1847 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1848 && ! side_effects_p (op1
))
1851 /* Fall through.... */
1855 if (trueop1
== const0_rtx
)
1857 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1862 if (width
<= HOST_BITS_PER_WIDE_INT
1863 && GET_CODE (trueop1
) == CONST_INT
1864 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1865 && ! side_effects_p (op0
))
1867 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1869 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1875 if (width
<= HOST_BITS_PER_WIDE_INT
1876 && GET_CODE (trueop1
) == CONST_INT
1877 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1878 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1879 && ! side_effects_p (op0
))
1881 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1883 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1889 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1891 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1893 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1899 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1901 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1903 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1912 /* ??? There are simplifications that can be done. */
1916 if (!VECTOR_MODE_P (mode
))
1918 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1920 != GET_MODE_INNER (GET_MODE (trueop0
)))
1921 || GET_CODE (trueop1
) != PARALLEL
1922 || XVECLEN (trueop1
, 0) != 1
1923 || GET_CODE (XVECEXP (trueop1
, 0, 0)) != CONST_INT
)
1926 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1927 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP (trueop1
, 0, 0)));
1931 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1932 || (GET_MODE_INNER (mode
)
1933 != GET_MODE_INNER (GET_MODE (trueop0
)))
1934 || GET_CODE (trueop1
) != PARALLEL
)
1937 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1939 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1940 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1941 rtvec v
= rtvec_alloc (n_elts
);
1944 if (XVECLEN (trueop1
, 0) != (int) n_elts
)
1946 for (i
= 0; i
< n_elts
; i
++)
1948 rtx x
= XVECEXP (trueop1
, 0, i
);
1950 if (GET_CODE (x
) != CONST_INT
)
1952 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, INTVAL (x
));
1955 return gen_rtx_CONST_VECTOR (mode
, v
);
1961 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
1962 ? GET_MODE (trueop0
)
1963 : GET_MODE_INNER (mode
));
1964 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
1965 ? GET_MODE (trueop1
)
1966 : GET_MODE_INNER (mode
));
1968 if (!VECTOR_MODE_P (mode
)
1969 || (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
1970 != GET_MODE_SIZE (mode
)))
1973 if ((VECTOR_MODE_P (op0_mode
)
1974 && (GET_MODE_INNER (mode
)
1975 != GET_MODE_INNER (op0_mode
)))
1976 || (!VECTOR_MODE_P (op0_mode
)
1977 && GET_MODE_INNER (mode
) != op0_mode
))
1980 if ((VECTOR_MODE_P (op1_mode
)
1981 && (GET_MODE_INNER (mode
)
1982 != GET_MODE_INNER (op1_mode
)))
1983 || (!VECTOR_MODE_P (op1_mode
)
1984 && GET_MODE_INNER (mode
) != op1_mode
))
1987 if ((GET_CODE (trueop0
) == CONST_VECTOR
1988 || GET_CODE (trueop0
) == CONST_INT
1989 || GET_CODE (trueop0
) == CONST_DOUBLE
)
1990 && (GET_CODE (trueop1
) == CONST_VECTOR
1991 || GET_CODE (trueop1
) == CONST_INT
1992 || GET_CODE (trueop1
) == CONST_DOUBLE
))
1994 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1995 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1996 rtvec v
= rtvec_alloc (n_elts
);
1998 unsigned in_n_elts
= 1;
2000 if (VECTOR_MODE_P (op0_mode
))
2001 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2002 for (i
= 0; i
< n_elts
; i
++)
2006 if (!VECTOR_MODE_P (op0_mode
))
2007 RTVEC_ELT (v
, i
) = trueop0
;
2009 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2013 if (!VECTOR_MODE_P (op1_mode
))
2014 RTVEC_ELT (v
, i
) = trueop1
;
2016 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2021 return gen_rtx_CONST_VECTOR (mode
, v
);
2033 /* Get the integer argument values in two forms:
2034 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2036 arg0
= INTVAL (trueop0
);
2037 arg1
= INTVAL (trueop1
);
2039 if (width
< HOST_BITS_PER_WIDE_INT
)
2041 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2042 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2045 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2046 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2049 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2050 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2058 /* Compute the value of the arithmetic. */
2063 val
= arg0s
+ arg1s
;
2067 val
= arg0s
- arg1s
;
2071 val
= arg0s
* arg1s
;
2076 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2079 val
= arg0s
/ arg1s
;
2084 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2087 val
= arg0s
% arg1s
;
2092 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2095 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2100 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2103 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2119 /* If shift count is undefined, don't fold it; let the machine do
2120 what it wants. But truncate it if the machine will do that. */
2124 #ifdef SHIFT_COUNT_TRUNCATED
2125 if (SHIFT_COUNT_TRUNCATED
)
2129 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
2136 #ifdef SHIFT_COUNT_TRUNCATED
2137 if (SHIFT_COUNT_TRUNCATED
)
2141 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
2148 #ifdef SHIFT_COUNT_TRUNCATED
2149 if (SHIFT_COUNT_TRUNCATED
)
2153 val
= arg0s
>> arg1
;
2155 /* Bootstrap compiler may not have sign extended the right shift.
2156 Manually extend the sign to insure bootstrap cc matches gcc. */
2157 if (arg0s
< 0 && arg1
> 0)
2158 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
2167 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2168 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2176 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2177 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2181 /* Do nothing here. */
2185 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2189 val
= ((unsigned HOST_WIDE_INT
) arg0
2190 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2194 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2198 val
= ((unsigned HOST_WIDE_INT
) arg0
2199 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2206 /* ??? There are simplifications that can be done. */
2213 val
= trunc_int_for_mode (val
, mode
);
2215 return GEN_INT (val
);
2218 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2221 Rather than test for specific case, we do this by a brute-force method
2222 and do all possible simplifications until no more changes occur. Then
2223 we rebuild the operation.
2225 If FORCE is true, then always generate the rtx. This is used to
2226 canonicalize stuff emitted from simplify_gen_binary. Note that this
2227 can still fail if the rtx is too complex. It won't fail just because
2228 the result is not 'simpler' than the input, however. */
2230 struct simplify_plus_minus_op_data
2237 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2239 const struct simplify_plus_minus_op_data
*d1
= p1
;
2240 const struct simplify_plus_minus_op_data
*d2
= p2
;
2242 return (commutative_operand_precedence (d2
->op
)
2243 - commutative_operand_precedence (d1
->op
));
2247 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2250 struct simplify_plus_minus_op_data ops
[8];
2252 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2253 int first
, negate
, changed
;
2256 memset (ops
, 0, sizeof ops
);
2258 /* Set up the two operands and then expand them until nothing has been
2259 changed. If we run out of room in our array, give up; this should
2260 almost never happen. */
2265 ops
[1].neg
= (code
== MINUS
);
2271 for (i
= 0; i
< n_ops
; i
++)
2273 rtx this_op
= ops
[i
].op
;
2274 int this_neg
= ops
[i
].neg
;
2275 enum rtx_code this_code
= GET_CODE (this_op
);
2284 ops
[n_ops
].op
= XEXP (this_op
, 1);
2285 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2288 ops
[i
].op
= XEXP (this_op
, 0);
2294 ops
[i
].op
= XEXP (this_op
, 0);
2295 ops
[i
].neg
= ! this_neg
;
2301 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2302 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2303 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2305 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2306 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2307 ops
[n_ops
].neg
= this_neg
;
2315 /* ~a -> (-a - 1) */
2318 ops
[n_ops
].op
= constm1_rtx
;
2319 ops
[n_ops
++].neg
= this_neg
;
2320 ops
[i
].op
= XEXP (this_op
, 0);
2321 ops
[i
].neg
= !this_neg
;
2329 ops
[i
].op
= neg_const_int (mode
, this_op
);
2342 /* If we only have two operands, we can't do anything. */
2343 if (n_ops
<= 2 && !force
)
2346 /* Count the number of CONSTs we didn't split above. */
2347 for (i
= 0; i
< n_ops
; i
++)
2348 if (GET_CODE (ops
[i
].op
) == CONST
)
2351 /* Now simplify each pair of operands until nothing changes. The first
2352 time through just simplify constants against each other. */
2359 for (i
= 0; i
< n_ops
- 1; i
++)
2360 for (j
= i
+ 1; j
< n_ops
; j
++)
2362 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2363 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2365 if (lhs
!= 0 && rhs
!= 0
2366 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2368 enum rtx_code ncode
= PLUS
;
2374 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2376 else if (swap_commutative_operands_p (lhs
, rhs
))
2377 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2379 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2381 /* Reject "simplifications" that just wrap the two
2382 arguments in a CONST. Failure to do so can result
2383 in infinite recursion with simplify_binary_operation
2384 when it calls us to simplify CONST operations. */
2386 && ! (GET_CODE (tem
) == CONST
2387 && GET_CODE (XEXP (tem
, 0)) == ncode
2388 && XEXP (XEXP (tem
, 0), 0) == lhs
2389 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2390 /* Don't allow -x + -1 -> ~x simplifications in the
2391 first pass. This allows us the chance to combine
2392 the -1 with other constants. */
2394 && GET_CODE (tem
) == NOT
2395 && XEXP (tem
, 0) == rhs
))
2398 if (GET_CODE (tem
) == NEG
)
2399 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2400 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2401 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2405 ops
[j
].op
= NULL_RTX
;
2415 /* Pack all the operands to the lower-numbered entries. */
2416 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2421 /* Sort the operations based on swap_commutative_operands_p. */
2422 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2424 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2426 && GET_CODE (ops
[1].op
) == CONST_INT
2427 && CONSTANT_P (ops
[0].op
)
2429 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
2431 /* We suppressed creation of trivial CONST expressions in the
2432 combination loop to avoid recursion. Create one manually now.
2433 The combination loop should have ensured that there is exactly
2434 one CONST_INT, and the sort will have ensured that it is last
2435 in the array and that any other constant will be next-to-last. */
2438 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2439 && CONSTANT_P (ops
[n_ops
- 2].op
))
2441 rtx value
= ops
[n_ops
- 1].op
;
2442 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2443 value
= neg_const_int (mode
, value
);
2444 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2448 /* Count the number of CONSTs that we generated. */
2450 for (i
= 0; i
< n_ops
; i
++)
2451 if (GET_CODE (ops
[i
].op
) == CONST
)
2454 /* Give up if we didn't reduce the number of operands we had. Make
2455 sure we count a CONST as two operands. If we have the same
2456 number of operands, but have made more CONSTs than before, this
2457 is also an improvement, so accept it. */
2459 && (n_ops
+ n_consts
> input_ops
2460 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2463 /* Put a non-negated operand first. If there aren't any, make all
2464 operands positive and negate the whole thing later. */
2467 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2471 for (i
= 0; i
< n_ops
; i
++)
2483 /* Now make the result by performing the requested operations. */
2485 for (i
= 1; i
< n_ops
; i
++)
2486 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2487 mode
, result
, ops
[i
].op
);
2489 return negate
? gen_rtx_NEG (mode
, result
) : result
;
2492 /* Like simplify_binary_operation except used for relational operators.
2493 MODE is the mode of the operands, not that of the result. If MODE
2494 is VOIDmode, both operands must also be VOIDmode and we compare the
2495 operands in "infinite precision".
2497 If no simplification is possible, this function returns zero. Otherwise,
2498 it returns either const_true_rtx or const0_rtx. */
2501 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2504 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2509 if (mode
== VOIDmode
2510 && (GET_MODE (op0
) != VOIDmode
2511 || GET_MODE (op1
) != VOIDmode
))
2514 /* If op0 is a compare, extract the comparison arguments from it. */
2515 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2516 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2518 trueop0
= avoid_constant_pool_reference (op0
);
2519 trueop1
= avoid_constant_pool_reference (op1
);
2521 /* We can't simplify MODE_CC values since we don't know what the
2522 actual comparison is. */
2523 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2526 /* Make sure the constant is second. */
2527 if (swap_commutative_operands_p (trueop0
, trueop1
))
2529 tem
= op0
, op0
= op1
, op1
= tem
;
2530 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
2531 code
= swap_condition (code
);
2534 /* For integer comparisons of A and B maybe we can simplify A - B and can
2535 then simplify a comparison of that with zero. If A and B are both either
2536 a register or a CONST_INT, this can't help; testing for these cases will
2537 prevent infinite recursion here and speed things up.
2539 If CODE is an unsigned comparison, then we can never do this optimization,
2540 because it gives an incorrect result if the subtraction wraps around zero.
2541 ANSI C defines unsigned operations such that they never overflow, and
2542 thus such cases can not be ignored. */
2544 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2545 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
2546 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
2547 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2548 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2549 return simplify_relational_operation (signed_condition (code
),
2550 mode
, tem
, const0_rtx
);
2552 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2553 return const_true_rtx
;
2555 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2558 /* For modes without NaNs, if the two operands are equal, we know the
2559 result except if they have side-effects. */
2560 if (! HONOR_NANS (GET_MODE (trueop0
))
2561 && rtx_equal_p (trueop0
, trueop1
)
2562 && ! side_effects_p (trueop0
))
2563 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2565 /* If the operands are floating-point constants, see if we can fold
2567 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2568 && GET_CODE (trueop1
) == CONST_DOUBLE
2569 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2571 REAL_VALUE_TYPE d0
, d1
;
2573 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2574 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2576 /* Comparisons are unordered iff at least one of the values is NaN. */
2577 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2587 return const_true_rtx
;
2600 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2601 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2602 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2605 /* Otherwise, see if the operands are both integers. */
2606 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2607 && (GET_CODE (trueop0
) == CONST_DOUBLE
2608 || GET_CODE (trueop0
) == CONST_INT
)
2609 && (GET_CODE (trueop1
) == CONST_DOUBLE
2610 || GET_CODE (trueop1
) == CONST_INT
))
2612 int width
= GET_MODE_BITSIZE (mode
);
2613 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2614 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2616 /* Get the two words comprising each integer constant. */
2617 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2619 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2620 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2624 l0u
= l0s
= INTVAL (trueop0
);
2625 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2628 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2630 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2631 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2635 l1u
= l1s
= INTVAL (trueop1
);
2636 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2639 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2640 we have to sign or zero-extend the values. */
2641 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2643 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2644 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2646 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2647 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2649 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2650 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2652 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2653 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2655 equal
= (h0u
== h1u
&& l0u
== l1u
);
2656 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2657 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2658 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2659 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2662 /* Otherwise, there are some code-specific tests we can make. */
2668 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2673 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2674 return const_true_rtx
;
2678 /* Unsigned values are never negative. */
2679 if (trueop1
== const0_rtx
)
2680 return const_true_rtx
;
2684 if (trueop1
== const0_rtx
)
2689 /* Unsigned values are never greater than the largest
2691 if (GET_CODE (trueop1
) == CONST_INT
2692 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2693 && INTEGRAL_MODE_P (mode
))
2694 return const_true_rtx
;
2698 if (GET_CODE (trueop1
) == CONST_INT
2699 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2700 && INTEGRAL_MODE_P (mode
))
2705 /* Optimize abs(x) < 0.0. */
2706 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
2708 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2710 if (GET_CODE (tem
) == ABS
)
2716 /* Optimize abs(x) >= 0.0. */
2717 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
2719 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2721 if (GET_CODE (tem
) == ABS
)
2722 return const_true_rtx
;
2727 /* Optimize ! (abs(x) < 0.0). */
2728 if (trueop1
== CONST0_RTX (mode
))
2730 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2732 if (GET_CODE (tem
) == ABS
)
2733 return const_true_rtx
;
2744 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2750 return equal
? const_true_rtx
: const0_rtx
;
2753 return ! equal
? const_true_rtx
: const0_rtx
;
2756 return op0lt
? const_true_rtx
: const0_rtx
;
2759 return op1lt
? const_true_rtx
: const0_rtx
;
2761 return op0ltu
? const_true_rtx
: const0_rtx
;
2763 return op1ltu
? const_true_rtx
: const0_rtx
;
2766 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2769 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2771 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2773 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2775 return const_true_rtx
;
2783 /* Simplify CODE, an operation with result mode MODE and three operands,
2784 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2785 a constant. Return 0 if no simplifications is possible. */
2788 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
2789 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
2792 unsigned int width
= GET_MODE_BITSIZE (mode
);
2794 /* VOIDmode means "infinite" precision. */
2796 width
= HOST_BITS_PER_WIDE_INT
;
2802 if (GET_CODE (op0
) == CONST_INT
2803 && GET_CODE (op1
) == CONST_INT
2804 && GET_CODE (op2
) == CONST_INT
2805 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2806 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2808 /* Extracting a bit-field from a constant */
2809 HOST_WIDE_INT val
= INTVAL (op0
);
2811 if (BITS_BIG_ENDIAN
)
2812 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2813 - INTVAL (op2
) - INTVAL (op1
));
2815 val
>>= INTVAL (op2
);
2817 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2819 /* First zero-extend. */
2820 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2821 /* If desired, propagate sign bit. */
2822 if (code
== SIGN_EXTRACT
2823 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2824 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2827 /* Clear the bits that don't belong in our mode,
2828 unless they and our sign bit are all one.
2829 So we get either a reasonable negative value or a reasonable
2830 unsigned value for this mode. */
2831 if (width
< HOST_BITS_PER_WIDE_INT
2832 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2833 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2834 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2836 return GEN_INT (val
);
2841 if (GET_CODE (op0
) == CONST_INT
)
2842 return op0
!= const0_rtx
? op1
: op2
;
2844 /* Convert c ? a : a into "a". */
2845 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
2848 /* Convert a != b ? a : b into "a". */
2849 if (GET_CODE (op0
) == NE
2850 && ! side_effects_p (op0
)
2851 && ! HONOR_NANS (mode
)
2852 && ! HONOR_SIGNED_ZEROS (mode
)
2853 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
2854 && rtx_equal_p (XEXP (op0
, 1), op2
))
2855 || (rtx_equal_p (XEXP (op0
, 0), op2
)
2856 && rtx_equal_p (XEXP (op0
, 1), op1
))))
2859 /* Convert a == b ? a : b into "b". */
2860 if (GET_CODE (op0
) == EQ
2861 && ! side_effects_p (op0
)
2862 && ! HONOR_NANS (mode
)
2863 && ! HONOR_SIGNED_ZEROS (mode
)
2864 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
2865 && rtx_equal_p (XEXP (op0
, 1), op2
))
2866 || (rtx_equal_p (XEXP (op0
, 0), op2
)
2867 && rtx_equal_p (XEXP (op0
, 1), op1
))))
2870 if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2872 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2873 ? GET_MODE (XEXP (op0
, 1))
2874 : GET_MODE (XEXP (op0
, 0)));
2876 if (cmp_mode
== VOIDmode
)
2877 cmp_mode
= op0_mode
;
2878 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2879 XEXP (op0
, 0), XEXP (op0
, 1));
2881 /* See if any simplifications were possible. */
2882 if (temp
== const0_rtx
)
2884 else if (temp
== const_true_rtx
)
2889 /* Look for happy constants in op1 and op2. */
2890 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2892 HOST_WIDE_INT t
= INTVAL (op1
);
2893 HOST_WIDE_INT f
= INTVAL (op2
);
2895 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2896 code
= GET_CODE (op0
);
2897 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2900 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2908 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2914 if (GET_MODE (op0
) != mode
2915 || GET_MODE (op1
) != mode
2916 || !VECTOR_MODE_P (mode
))
2918 op2
= avoid_constant_pool_reference (op2
);
2919 if (GET_CODE (op2
) == CONST_INT
)
2921 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2922 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2923 int mask
= (1 << n_elts
) - 1;
2925 if (!(INTVAL (op2
) & mask
))
2927 if ((INTVAL (op2
) & mask
) == mask
)
2930 op0
= avoid_constant_pool_reference (op0
);
2931 op1
= avoid_constant_pool_reference (op1
);
2932 if (GET_CODE (op0
) == CONST_VECTOR
2933 && GET_CODE (op1
) == CONST_VECTOR
)
2935 rtvec v
= rtvec_alloc (n_elts
);
2938 for (i
= 0; i
< n_elts
; i
++)
2939 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
2940 ? CONST_VECTOR_ELT (op0
, i
)
2941 : CONST_VECTOR_ELT (op1
, i
));
2942 return gen_rtx_CONST_VECTOR (mode
, v
);
2954 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
2955 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
2957 Works by unpacking OP into a collection of 8-bit values
2958 represented as a little-endian array of 'unsigned char', selecting by BYTE,
2959 and then repacking them again for OUTERMODE. */
2962 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
2963 enum machine_mode innermode
, unsigned int byte
)
2965 /* We support up to 512-bit values (for V8DFmode). */
2969 value_mask
= (1 << value_bit
) - 1
2971 unsigned char value
[max_bitsize
/ value_bit
];
2980 rtvec result_v
= NULL
;
2981 enum mode_class outer_class
;
2982 enum machine_mode outer_submode
;
2984 /* Some ports misuse CCmode. */
2985 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
2988 /* Unpack the value. */
2990 if (GET_CODE (op
) == CONST_VECTOR
)
2992 num_elem
= CONST_VECTOR_NUNITS (op
);
2993 elems
= &CONST_VECTOR_ELT (op
, 0);
2994 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
3000 elem_bitsize
= max_bitsize
;
3003 if (BITS_PER_UNIT
% value_bit
!= 0)
3004 abort (); /* Too complicated; reducing value_bit may help. */
3005 if (elem_bitsize
% BITS_PER_UNIT
!= 0)
3006 abort (); /* I don't know how to handle endianness of sub-units. */
3008 for (elem
= 0; elem
< num_elem
; elem
++)
3011 rtx el
= elems
[elem
];
3013 /* Vectors are kept in target memory order. (This is probably
3016 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3017 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3019 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3020 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3021 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3022 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3023 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3026 switch (GET_CODE (el
))
3030 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3032 *vp
++ = INTVAL (el
) >> i
;
3033 /* CONST_INTs are always logically sign-extended. */
3034 for (; i
< elem_bitsize
; i
+= value_bit
)
3035 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
3039 if (GET_MODE (el
) == VOIDmode
)
3041 /* If this triggers, someone should have generated a
3042 CONST_INT instead. */
3043 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3046 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
3047 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
3048 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
3050 *vp
++ = CONST_DOUBLE_HIGH (el
) >> i
;
3053 /* It shouldn't matter what's done here, so fill it with
3055 for (; i
< max_bitsize
; i
+= value_bit
)
3058 else if (GET_MODE_CLASS (GET_MODE (el
)) == MODE_FLOAT
)
3060 long tmp
[max_bitsize
/ 32];
3061 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
3063 if (bitsize
> elem_bitsize
)
3065 if (bitsize
% value_bit
!= 0)
3068 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
3071 /* real_to_target produces its result in words affected by
3072 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3073 and use WORDS_BIG_ENDIAN instead; see the documentation
3074 of SUBREG in rtl.texi. */
3075 for (i
= 0; i
< bitsize
; i
+= value_bit
)
3078 if (WORDS_BIG_ENDIAN
)
3079 ibase
= bitsize
- 1 - i
;
3082 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
3085 /* It shouldn't matter what's done here, so fill it with
3087 for (; i
< elem_bitsize
; i
+= value_bit
)
3099 /* Now, pick the right byte to start with. */
3100 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3101 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3102 will already have offset 0. */
3103 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
3105 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
3107 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3108 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3109 byte
= (subword_byte
% UNITS_PER_WORD
3110 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3113 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3114 so if it's become negative it will instead be very large.) */
3115 if (byte
>= GET_MODE_SIZE (innermode
))
3118 /* Convert from bytes to chunks of size value_bit. */
3119 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
3121 /* Re-pack the value. */
3123 if (VECTOR_MODE_P (outermode
))
3125 num_elem
= GET_MODE_NUNITS (outermode
);
3126 result_v
= rtvec_alloc (num_elem
);
3127 elems
= &RTVEC_ELT (result_v
, 0);
3128 outer_submode
= GET_MODE_INNER (outermode
);
3134 outer_submode
= outermode
;
3137 outer_class
= GET_MODE_CLASS (outer_submode
);
3138 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
3140 if (elem_bitsize
% value_bit
!= 0)
3142 if (elem_bitsize
+ value_start
* value_bit
> max_bitsize
)
3145 for (elem
= 0; elem
< num_elem
; elem
++)
3149 /* Vectors are stored in target memory order. (This is probably
3152 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3153 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3155 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3156 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3157 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3158 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3159 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3162 switch (outer_class
)
3165 case MODE_PARTIAL_INT
:
3167 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
3170 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3172 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
3173 for (; i
< elem_bitsize
; i
+= value_bit
)
3174 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
3175 << (i
- HOST_BITS_PER_WIDE_INT
));
3177 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3179 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3180 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
3182 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
3189 long tmp
[max_bitsize
/ 32];
3191 /* real_from_target wants its input in words affected by
3192 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3193 and use WORDS_BIG_ENDIAN instead; see the documentation
3194 of SUBREG in rtl.texi. */
3195 for (i
= 0; i
< max_bitsize
/ 32; i
++)
3197 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
3200 if (WORDS_BIG_ENDIAN
)
3201 ibase
= elem_bitsize
- 1 - i
;
3204 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
3207 real_from_target (&r
, tmp
, outer_submode
);
3208 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
3216 if (VECTOR_MODE_P (outermode
))
3217 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
3222 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3223 Return 0 if no simplifications are possible. */
3225 simplify_subreg (enum machine_mode outermode
, rtx op
,
3226 enum machine_mode innermode
, unsigned int byte
)
3228 /* Little bit of sanity checking. */
3229 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3230 || innermode
== BLKmode
|| outermode
== BLKmode
)
3233 if (GET_MODE (op
) != innermode
3234 && GET_MODE (op
) != VOIDmode
)
3237 if (byte
% GET_MODE_SIZE (outermode
)
3238 || byte
>= GET_MODE_SIZE (innermode
))
3241 if (outermode
== innermode
&& !byte
)
3244 if (GET_CODE (op
) == CONST_INT
3245 || GET_CODE (op
) == CONST_DOUBLE
3246 || GET_CODE (op
) == CONST_VECTOR
)
3247 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
3249 /* Changing mode twice with SUBREG => just change it once,
3250 or not at all if changing back op starting mode. */
3251 if (GET_CODE (op
) == SUBREG
)
3253 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3254 int final_offset
= byte
+ SUBREG_BYTE (op
);
3257 if (outermode
== innermostmode
3258 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3259 return SUBREG_REG (op
);
3261 /* The SUBREG_BYTE represents offset, as if the value were stored
3262 in memory. Irritating exception is paradoxical subreg, where
3263 we define SUBREG_BYTE to be 0. On big endian machines, this
3264 value should be negative. For a moment, undo this exception. */
3265 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3267 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3268 if (WORDS_BIG_ENDIAN
)
3269 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3270 if (BYTES_BIG_ENDIAN
)
3271 final_offset
+= difference
% UNITS_PER_WORD
;
3273 if (SUBREG_BYTE (op
) == 0
3274 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3276 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3277 if (WORDS_BIG_ENDIAN
)
3278 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3279 if (BYTES_BIG_ENDIAN
)
3280 final_offset
+= difference
% UNITS_PER_WORD
;
3283 /* See whether resulting subreg will be paradoxical. */
3284 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3286 /* In nonparadoxical subregs we can't handle negative offsets. */
3287 if (final_offset
< 0)
3289 /* Bail out in case resulting subreg would be incorrect. */
3290 if (final_offset
% GET_MODE_SIZE (outermode
)
3291 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3297 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3299 /* In paradoxical subreg, see if we are still looking on lower part.
3300 If so, our SUBREG_BYTE will be 0. */
3301 if (WORDS_BIG_ENDIAN
)
3302 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3303 if (BYTES_BIG_ENDIAN
)
3304 offset
+= difference
% UNITS_PER_WORD
;
3305 if (offset
== final_offset
)
3311 /* Recurse for further possible simplifications. */
3312 new = simplify_subreg (outermode
, SUBREG_REG (op
),
3313 GET_MODE (SUBREG_REG (op
)),
3317 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3320 /* SUBREG of a hard register => just change the register number
3321 and/or mode. If the hard register is not valid in that mode,
3322 suppress this simplification. If the hard register is the stack,
3323 frame, or argument pointer, leave this as a SUBREG. */
3326 && (! REG_FUNCTION_VALUE_P (op
)
3327 || ! rtx_equal_function_value_matters
)
3328 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3329 #ifdef CANNOT_CHANGE_MODE_CLASS
3330 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3331 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3332 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3334 && ((reload_completed
&& !frame_pointer_needed
)
3335 || (REGNO (op
) != FRAME_POINTER_REGNUM
3336 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3337 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3340 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3341 && REGNO (op
) != ARG_POINTER_REGNUM
3343 && REGNO (op
) != STACK_POINTER_REGNUM
3344 && subreg_offset_representable_p (REGNO (op
), innermode
,
3347 rtx tem
= gen_rtx_SUBREG (outermode
, op
, byte
);
3348 int final_regno
= subreg_hard_regno (tem
, 0);
3350 /* ??? We do allow it if the current REG is not valid for
3351 its mode. This is a kludge to work around how float/complex
3352 arguments are passed on 32-bit SPARC and should be fixed. */
3353 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3354 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
3356 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3358 /* Propagate original regno. We don't have any way to specify
3359 the offset inside original regno, so do so only for lowpart.
3360 The information is used only by alias analysis that can not
3361 grog partial register anyway. */
3363 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3364 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3369 /* If we have a SUBREG of a register that we are replacing and we are
3370 replacing it with a MEM, make a new MEM and try replacing the
3371 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3372 or if we would be widening it. */
3374 if (GET_CODE (op
) == MEM
3375 && ! mode_dependent_address_p (XEXP (op
, 0))
3376 /* Allow splitting of volatile memory references in case we don't
3377 have instruction to move the whole thing. */
3378 && (! MEM_VOLATILE_P (op
)
3379 || ! have_insn_for (SET
, innermode
))
3380 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3381 return adjust_address_nv (op
, outermode
, byte
);
3383 /* Handle complex values represented as CONCAT
3384 of real and imaginary part. */
3385 if (GET_CODE (op
) == CONCAT
)
3387 int is_realpart
= byte
< (unsigned int) GET_MODE_UNIT_SIZE (innermode
);
3388 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
3389 unsigned int final_offset
;
3392 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
3393 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3396 /* We can at least simplify it by referring directly to the relevant part. */
3397 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3403 /* Make a SUBREG operation or equivalent if it folds. */
3406 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
3407 enum machine_mode innermode
, unsigned int byte
)
3410 /* Little bit of sanity checking. */
3411 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3412 || innermode
== BLKmode
|| outermode
== BLKmode
)
3415 if (GET_MODE (op
) != innermode
3416 && GET_MODE (op
) != VOIDmode
)
3419 if (byte
% GET_MODE_SIZE (outermode
)
3420 || byte
>= GET_MODE_SIZE (innermode
))
3423 if (GET_CODE (op
) == QUEUED
)
3426 new = simplify_subreg (outermode
, op
, innermode
, byte
);
3430 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
3433 return gen_rtx_SUBREG (outermode
, op
, byte
);
3435 /* Simplify X, an rtx expression.
3437 Return the simplified expression or NULL if no simplifications
3440 This is the preferred entry point into the simplification routines;
3441 however, we still allow passes to call the more specific routines.
3443 Right now GCC has three (yes, three) major bodies of RTL simplification
3444 code that need to be unified.
3446 1. fold_rtx in cse.c. This code uses various CSE specific
3447 information to aid in RTL simplification.
3449 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3450 it uses combine specific information to aid in RTL
3453 3. The routines in this file.
3456 Long term we want to only have one body of simplification code; to
3457 get to that state I recommend the following steps:
3459 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3460 which are not pass dependent state into these routines.
3462 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3463 use this routine whenever possible.
3465 3. Allow for pass dependent state to be provided to these
3466 routines and add simplifications based on the pass dependent
3467 state. Remove code from cse.c & combine.c that becomes
3470 It will take time, but ultimately the compiler will be easier to
3471 maintain and improve. It's totally silly that when we add a
3472 simplification that it needs to be added to 4 places (3 for RTL
3473 simplification and 1 for tree simplification. */
3476 simplify_rtx (rtx x
)
3478 enum rtx_code code
= GET_CODE (x
);
3479 enum machine_mode mode
= GET_MODE (x
);
3482 switch (GET_RTX_CLASS (code
))
3485 return simplify_unary_operation (code
, mode
,
3486 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3488 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3489 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
3491 /* Fall through.... */
3494 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3498 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3499 XEXP (x
, 0), XEXP (x
, 1),
3503 temp
= simplify_relational_operation (code
,
3504 ((GET_MODE (XEXP (x
, 0))
3506 ? GET_MODE (XEXP (x
, 0))
3507 : GET_MODE (XEXP (x
, 1))),
3508 XEXP (x
, 0), XEXP (x
, 1));
3509 #ifdef FLOAT_STORE_FLAG_VALUE
3510 if (temp
!= 0 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
3512 if (temp
== const0_rtx
)
3513 temp
= CONST0_RTX (mode
);
3515 temp
= CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode
),
3523 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
3524 GET_MODE (SUBREG_REG (x
)),
3526 if (code
== CONSTANT_P_RTX
)
3528 if (CONSTANT_P (XEXP (x
, 0)))
3536 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3537 if (GET_CODE (XEXP (x
, 0)) == HIGH
3538 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))