1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
61 /* Negate a CONST_INT rtx, truncating (because a conversion from a
62 maximally negative number can overflow). */
64 neg_const_int (enum machine_mode mode
, rtx i
)
66 return gen_int_mode (- INTVAL (i
), mode
);
70 /* Make a binary operation by properly ordering the operands and
71 seeing if the expression folds. */
74 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
79 /* Put complex operands first and constants second if commutative. */
80 if (GET_RTX_CLASS (code
) == 'c'
81 && swap_commutative_operands_p (op0
, op1
))
82 tem
= op0
, op0
= op1
, op1
= tem
;
84 /* If this simplifies, do it. */
85 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
89 /* Handle addition and subtraction specially. Otherwise, just form
92 if (code
== PLUS
|| code
== MINUS
)
94 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
99 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
102 /* If X is a MEM referencing the constant pool, return the real value.
103 Otherwise return X. */
105 avoid_constant_pool_reference (rtx x
)
108 enum machine_mode cmode
;
110 switch (GET_CODE (x
))
116 /* Handle float extensions of constant pool references. */
118 c
= avoid_constant_pool_reference (tmp
);
119 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
123 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
124 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
134 /* Call target hook to avoid the effects of -fpic etc.... */
135 addr
= (*targetm
.delegitimize_address
) (addr
);
137 if (GET_CODE (addr
) == LO_SUM
)
138 addr
= XEXP (addr
, 1);
140 if (GET_CODE (addr
) != SYMBOL_REF
141 || ! CONSTANT_POOL_ADDRESS_P (addr
))
144 c
= get_pool_constant (addr
);
145 cmode
= get_pool_mode (addr
);
147 /* If we're accessing the constant in a different mode than it was
148 originally stored, attempt to fix that up via subreg simplifications.
149 If that fails we have no choice but to return the original memory. */
150 if (cmode
!= GET_MODE (x
))
152 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
159 /* Make a unary operation by first seeing if it folds and otherwise making
160 the specified operation. */
163 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
164 enum machine_mode op_mode
)
168 /* If this simplifies, use it. */
169 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
172 return gen_rtx_fmt_e (code
, mode
, op
);
175 /* Likewise for ternary operations. */
178 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
179 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
183 /* If this simplifies, use it. */
184 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
188 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
191 /* Likewise, for relational operations.
192 CMP_MODE specifies mode comparison is done in.
196 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
197 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
201 if (cmp_mode
== VOIDmode
)
202 cmp_mode
= GET_MODE (op0
);
203 if (cmp_mode
== VOIDmode
)
204 cmp_mode
= GET_MODE (op1
);
206 if (cmp_mode
!= VOIDmode
)
208 tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
);
212 #ifdef FLOAT_STORE_FLAG_VALUE
213 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
216 if (tem
== const0_rtx
)
217 return CONST0_RTX (mode
);
218 if (tem
!= const_true_rtx
)
220 val
= FLOAT_STORE_FLAG_VALUE (mode
);
221 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
228 /* For the following tests, ensure const0_rtx is op1. */
229 if (swap_commutative_operands_p (op0
, op1
)
230 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
231 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
233 /* If op0 is a compare, extract the comparison arguments from it. */
234 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
235 return simplify_gen_relational (code
, mode
, VOIDmode
,
236 XEXP (op0
, 0), XEXP (op0
, 1));
238 /* If op0 is a comparison, extract the comparison arguments form it. */
239 if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && op1
== const0_rtx
)
243 if (GET_MODE (op0
) == mode
)
245 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
246 XEXP (op0
, 0), XEXP (op0
, 1));
250 enum rtx_code
new = reversed_comparison_code (op0
, NULL_RTX
);
252 return simplify_gen_relational (new, mode
, VOIDmode
,
253 XEXP (op0
, 0), XEXP (op0
, 1));
257 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
260 /* Replace all occurrences of OLD in X with NEW and try to simplify the
261 resulting RTX. Return a new RTX which is as simplified as possible. */
264 simplify_replace_rtx (rtx x
, rtx old
, rtx
new)
266 enum rtx_code code
= GET_CODE (x
);
267 enum machine_mode mode
= GET_MODE (x
);
268 enum machine_mode op_mode
;
271 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
272 to build a new expression substituting recursively. If we can't do
273 anything, return our input. */
278 switch (GET_RTX_CLASS (code
))
282 op_mode
= GET_MODE (op0
);
283 op0
= simplify_replace_rtx (op0
, old
, new);
284 if (op0
== XEXP (x
, 0))
286 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
290 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
291 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
292 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
294 return simplify_gen_binary (code
, mode
, op0
, op1
);
299 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
300 op0
= simplify_replace_rtx (op0
, old
, new);
301 op1
= simplify_replace_rtx (op1
, old
, new);
302 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
304 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
309 op_mode
= GET_MODE (op0
);
310 op0
= simplify_replace_rtx (op0
, old
, new);
311 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
312 op2
= simplify_replace_rtx (XEXP (x
, 2), old
, new);
313 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
315 if (op_mode
== VOIDmode
)
316 op_mode
= GET_MODE (op0
);
317 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
320 /* The only case we try to handle is a SUBREG. */
323 op0
= simplify_replace_rtx (SUBREG_REG (x
), old
, new);
324 if (op0
== SUBREG_REG (x
))
326 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
327 GET_MODE (SUBREG_REG (x
)),
329 return op0
? op0
: x
;
336 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
337 if (op0
== XEXP (x
, 0))
339 return replace_equiv_address_nv (x
, op0
);
341 else if (code
== LO_SUM
)
343 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
344 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
346 /* (lo_sum (high x) x) -> x */
347 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
350 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
352 return gen_rtx_LO_SUM (mode
, op0
, op1
);
354 else if (code
== REG
)
356 if (REG_P (old
) && REGNO (x
) == REGNO (old
))
367 /* Try to simplify a unary operation CODE whose output mode is to be
368 MODE with input operand OP whose mode was originally OP_MODE.
369 Return zero if no simplification can be made. */
371 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
372 rtx op
, enum machine_mode op_mode
)
374 unsigned int width
= GET_MODE_BITSIZE (mode
);
375 rtx trueop
= avoid_constant_pool_reference (op
);
377 if (code
== VEC_DUPLICATE
)
379 if (!VECTOR_MODE_P (mode
))
381 if (GET_MODE (trueop
) != VOIDmode
382 && !VECTOR_MODE_P (GET_MODE (trueop
))
383 && GET_MODE_INNER (mode
) != GET_MODE (trueop
))
385 if (GET_MODE (trueop
) != VOIDmode
386 && VECTOR_MODE_P (GET_MODE (trueop
))
387 && GET_MODE_INNER (mode
) != GET_MODE_INNER (GET_MODE (trueop
)))
389 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
390 || GET_CODE (trueop
) == CONST_VECTOR
)
392 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
393 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
394 rtvec v
= rtvec_alloc (n_elts
);
397 if (GET_CODE (trueop
) != CONST_VECTOR
)
398 for (i
= 0; i
< n_elts
; i
++)
399 RTVEC_ELT (v
, i
) = trueop
;
402 enum machine_mode inmode
= GET_MODE (trueop
);
403 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
404 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
406 if (in_n_elts
>= n_elts
|| n_elts
% in_n_elts
)
408 for (i
= 0; i
< n_elts
; i
++)
409 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
411 return gen_rtx_CONST_VECTOR (mode
, v
);
414 else if (GET_CODE (op
) == CONST
)
415 return simplify_unary_operation (code
, mode
, XEXP (op
, 0), op_mode
);
417 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
419 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
420 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
421 enum machine_mode opmode
= GET_MODE (trueop
);
422 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
423 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
424 rtvec v
= rtvec_alloc (n_elts
);
427 if (op_n_elts
!= n_elts
)
430 for (i
= 0; i
< n_elts
; i
++)
432 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
433 CONST_VECTOR_ELT (trueop
, i
),
434 GET_MODE_INNER (opmode
));
437 RTVEC_ELT (v
, i
) = x
;
439 return gen_rtx_CONST_VECTOR (mode
, v
);
442 /* The order of these tests is critical so that, for example, we don't
443 check the wrong mode (input vs. output) for a conversion operation,
444 such as FIX. At some point, this should be simplified. */
446 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
447 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
449 HOST_WIDE_INT hv
, lv
;
452 if (GET_CODE (trueop
) == CONST_INT
)
453 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
455 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
457 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
458 d
= real_value_truncate (mode
, d
);
459 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
461 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
462 && (GET_CODE (trueop
) == CONST_DOUBLE
463 || GET_CODE (trueop
) == CONST_INT
))
465 HOST_WIDE_INT hv
, lv
;
468 if (GET_CODE (trueop
) == CONST_INT
)
469 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
471 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
473 if (op_mode
== VOIDmode
)
475 /* We don't know how to interpret negative-looking numbers in
476 this case, so don't try to fold those. */
480 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
483 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
485 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
486 d
= real_value_truncate (mode
, d
);
487 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
490 if (GET_CODE (trueop
) == CONST_INT
491 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
493 HOST_WIDE_INT arg0
= INTVAL (trueop
);
507 val
= (arg0
>= 0 ? arg0
: - arg0
);
511 /* Don't use ffs here. Instead, get low order bit and then its
512 number. If arg0 is zero, this will return 0, as desired. */
513 arg0
&= GET_MODE_MASK (mode
);
514 val
= exact_log2 (arg0
& (- arg0
)) + 1;
518 arg0
&= GET_MODE_MASK (mode
);
519 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
522 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
526 arg0
&= GET_MODE_MASK (mode
);
529 /* Even if the value at zero is undefined, we have to come
530 up with some replacement. Seems good enough. */
531 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
532 val
= GET_MODE_BITSIZE (mode
);
535 val
= exact_log2 (arg0
& -arg0
);
539 arg0
&= GET_MODE_MASK (mode
);
542 val
++, arg0
&= arg0
- 1;
546 arg0
&= GET_MODE_MASK (mode
);
549 val
++, arg0
&= arg0
- 1;
558 /* When zero-extending a CONST_INT, we need to know its
560 if (op_mode
== VOIDmode
)
562 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
564 /* If we were really extending the mode,
565 we would have to distinguish between zero-extension
566 and sign-extension. */
567 if (width
!= GET_MODE_BITSIZE (op_mode
))
571 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
572 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
578 if (op_mode
== VOIDmode
)
580 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
582 /* If we were really extending the mode,
583 we would have to distinguish between zero-extension
584 and sign-extension. */
585 if (width
!= GET_MODE_BITSIZE (op_mode
))
589 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
592 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
594 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
595 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
612 val
= trunc_int_for_mode (val
, mode
);
614 return GEN_INT (val
);
617 /* We can do some operations on integer CONST_DOUBLEs. Also allow
618 for a DImode operation on a CONST_INT. */
619 else if (GET_MODE (trueop
) == VOIDmode
620 && width
<= HOST_BITS_PER_WIDE_INT
* 2
621 && (GET_CODE (trueop
) == CONST_DOUBLE
622 || GET_CODE (trueop
) == CONST_INT
))
624 unsigned HOST_WIDE_INT l1
, lv
;
625 HOST_WIDE_INT h1
, hv
;
627 if (GET_CODE (trueop
) == CONST_DOUBLE
)
628 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
630 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
640 neg_double (l1
, h1
, &lv
, &hv
);
645 neg_double (l1
, h1
, &lv
, &hv
);
657 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
660 lv
= exact_log2 (l1
& -l1
) + 1;
666 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
667 - HOST_BITS_PER_WIDE_INT
;
669 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
670 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
671 lv
= GET_MODE_BITSIZE (mode
);
677 lv
= exact_log2 (l1
& -l1
);
679 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
680 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
681 lv
= GET_MODE_BITSIZE (mode
);
704 /* This is just a change-of-mode, so do nothing. */
709 if (op_mode
== VOIDmode
)
712 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
716 lv
= l1
& GET_MODE_MASK (op_mode
);
720 if (op_mode
== VOIDmode
721 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
725 lv
= l1
& GET_MODE_MASK (op_mode
);
726 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
727 && (lv
& ((HOST_WIDE_INT
) 1
728 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
729 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
731 hv
= HWI_SIGN_EXTEND (lv
);
742 return immed_double_const (lv
, hv
, mode
);
745 else if (GET_CODE (trueop
) == CONST_DOUBLE
746 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
748 REAL_VALUE_TYPE d
, t
;
749 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
754 if (HONOR_SNANS (mode
) && real_isnan (&d
))
756 real_sqrt (&t
, mode
, &d
);
760 d
= REAL_VALUE_ABS (d
);
763 d
= REAL_VALUE_NEGATE (d
);
766 d
= real_value_truncate (mode
, d
);
769 /* All this does is change the mode. */
772 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
778 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
781 else if (GET_CODE (trueop
) == CONST_DOUBLE
782 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
783 && GET_MODE_CLASS (mode
) == MODE_INT
784 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
786 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
787 operators are intentionally left unspecified (to ease implementation
788 by target backends), for consistency, this routine implements the
789 same semantics for constant folding as used by the middle-end. */
791 HOST_WIDE_INT xh
, xl
, th
, tl
;
792 REAL_VALUE_TYPE x
, t
;
793 REAL_VALUE_FROM_CONST_DOUBLE (x
, trueop
);
797 if (REAL_VALUE_ISNAN (x
))
800 /* Test against the signed upper bound. */
801 if (width
> HOST_BITS_PER_WIDE_INT
)
803 th
= ((unsigned HOST_WIDE_INT
) 1
804 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
810 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
812 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
813 if (REAL_VALUES_LESS (t
, x
))
820 /* Test against the signed lower bound. */
821 if (width
> HOST_BITS_PER_WIDE_INT
)
823 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
829 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
831 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
832 if (REAL_VALUES_LESS (x
, t
))
838 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
842 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
845 /* Test against the unsigned upper bound. */
846 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
851 else if (width
>= HOST_BITS_PER_WIDE_INT
)
853 th
= ((unsigned HOST_WIDE_INT
) 1
854 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
860 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
862 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
863 if (REAL_VALUES_LESS (t
, x
))
870 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
876 return immed_double_const (xl
, xh
, mode
);
879 /* This was formerly used only for non-IEEE float.
880 eggert@twinsun.com says it is safe for IEEE also. */
883 enum rtx_code reversed
;
886 /* There are some simplifications we can do even if the operands
891 /* (not (not X)) == X. */
892 if (GET_CODE (op
) == NOT
)
895 /* (not (eq X Y)) == (ne X Y), etc. */
896 if (GET_RTX_CLASS (GET_CODE (op
)) == '<'
897 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
898 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
900 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
901 XEXP (op
, 0), XEXP (op
, 1));
903 /* (not (plus X -1)) can become (neg X). */
904 if (GET_CODE (op
) == PLUS
905 && XEXP (op
, 1) == constm1_rtx
)
906 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
908 /* Similarly, (not (neg X)) is (plus X -1). */
909 if (GET_CODE (op
) == NEG
)
910 return plus_constant (XEXP (op
, 0), -1);
912 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
913 if (GET_CODE (op
) == XOR
914 && GET_CODE (XEXP (op
, 1)) == CONST_INT
915 && (temp
= simplify_unary_operation (NOT
, mode
,
918 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
921 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
922 operands other than 1, but that is not valid. We could do a
923 similar simplification for (not (lshiftrt C X)) where C is
924 just the sign bit, but this doesn't seem common enough to
926 if (GET_CODE (op
) == ASHIFT
927 && XEXP (op
, 0) == const1_rtx
)
929 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
930 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
933 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
934 by reversing the comparison code if valid. */
935 if (STORE_FLAG_VALUE
== -1
936 && GET_RTX_CLASS (GET_CODE (op
)) == '<'
937 && (reversed
= reversed_comparison_code (op
, NULL_RTX
))
939 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
940 XEXP (op
, 0), XEXP (op
, 1));
942 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
943 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
944 so we can perform the above simplification. */
946 if (STORE_FLAG_VALUE
== -1
947 && GET_CODE (op
) == ASHIFTRT
948 && GET_CODE (XEXP (op
, 1)) == CONST_INT
949 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
950 return simplify_gen_relational (GE
, mode
, VOIDmode
,
951 XEXP (op
, 0), const0_rtx
);
956 /* (neg (neg X)) == X. */
957 if (GET_CODE (op
) == NEG
)
960 /* (neg (plus X 1)) can become (not X). */
961 if (GET_CODE (op
) == PLUS
962 && XEXP (op
, 1) == const1_rtx
)
963 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
965 /* Similarly, (neg (not X)) is (plus X 1). */
966 if (GET_CODE (op
) == NOT
)
967 return plus_constant (XEXP (op
, 0), 1);
969 /* (neg (minus X Y)) can become (minus Y X). This transformation
970 isn't safe for modes with signed zeros, since if X and Y are
971 both +0, (minus Y X) is the same as (minus X Y). If the
972 rounding mode is towards +infinity (or -infinity) then the two
973 expressions will be rounded differently. */
974 if (GET_CODE (op
) == MINUS
975 && !HONOR_SIGNED_ZEROS (mode
)
976 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
977 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1),
980 if (GET_CODE (op
) == PLUS
981 && !HONOR_SIGNED_ZEROS (mode
)
982 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
984 /* (neg (plus A C)) is simplified to (minus -C A). */
985 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
986 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
988 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1),
991 return simplify_gen_binary (MINUS
, mode
, temp
,
995 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
996 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
997 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1000 /* (neg (mult A B)) becomes (mult (neg A) B).
1001 This works even for floating-point values. */
1002 if (GET_CODE (op
) == MULT
1003 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1005 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1006 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
1009 /* NEG commutes with ASHIFT since it is multiplication. Only do
1010 this if we can then eliminate the NEG (e.g., if the operand
1012 if (GET_CODE (op
) == ASHIFT
)
1014 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0),
1017 return simplify_gen_binary (ASHIFT
, mode
, temp
,
1024 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1025 becomes just the MINUS if its mode is MODE. This allows
1026 folding switch statements on machines using casesi (such as
1028 if (GET_CODE (op
) == TRUNCATE
1029 && GET_MODE (XEXP (op
, 0)) == mode
1030 && GET_CODE (XEXP (op
, 0)) == MINUS
1031 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1032 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1033 return XEXP (op
, 0);
1035 /* Check for a sign extension of a subreg of a promoted
1036 variable, where the promotion is sign-extended, and the
1037 target mode is the same as the variable's promotion. */
1038 if (GET_CODE (op
) == SUBREG
1039 && SUBREG_PROMOTED_VAR_P (op
)
1040 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1041 && GET_MODE (XEXP (op
, 0)) == mode
)
1042 return XEXP (op
, 0);
1044 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1045 if (! POINTERS_EXTEND_UNSIGNED
1046 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1048 || (GET_CODE (op
) == SUBREG
1049 && GET_CODE (SUBREG_REG (op
)) == REG
1050 && REG_POINTER (SUBREG_REG (op
))
1051 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1052 return convert_memory_address (Pmode
, op
);
1057 /* Check for a zero extension of a subreg of a promoted
1058 variable, where the promotion is zero-extended, and the
1059 target mode is the same as the variable's promotion. */
1060 if (GET_CODE (op
) == SUBREG
1061 && SUBREG_PROMOTED_VAR_P (op
)
1062 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1063 && GET_MODE (XEXP (op
, 0)) == mode
)
1064 return XEXP (op
, 0);
1066 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1067 if (POINTERS_EXTEND_UNSIGNED
> 0
1068 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1070 || (GET_CODE (op
) == SUBREG
1071 && GET_CODE (SUBREG_REG (op
)) == REG
1072 && REG_POINTER (SUBREG_REG (op
))
1073 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1074 return convert_memory_address (Pmode
, op
);
1086 /* Subroutine of simplify_binary_operation to simplify a commutative,
1087 associative binary operation CODE with result mode MODE, operating
1088 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1089 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1090 canonicalization is possible. */
1093 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1098 /* Linearize the operator to the left. */
1099 if (GET_CODE (op1
) == code
)
1101 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1102 if (GET_CODE (op0
) == code
)
1104 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1105 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1108 /* "a op (b op c)" becomes "(b op c) op a". */
1109 if (! swap_commutative_operands_p (op1
, op0
))
1110 return simplify_gen_binary (code
, mode
, op1
, op0
);
1117 if (GET_CODE (op0
) == code
)
1119 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1120 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1122 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1123 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1126 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1127 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1128 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1129 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1131 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1133 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1134 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1135 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1136 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1138 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1144 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1145 and OP1. Return 0 if no simplification is possible.
1147 Don't use this for relational operations such as EQ or LT.
1148 Use simplify_relational_operation instead. */
1150 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1153 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
1155 unsigned int width
= GET_MODE_BITSIZE (mode
);
1156 rtx trueop0
, trueop1
;
1159 /* Relational operations don't work here. We must know the mode
1160 of the operands in order to do the comparison correctly.
1161 Assuming a full word can give incorrect results.
1162 Consider comparing 128 with -128 in QImode. */
1164 if (GET_RTX_CLASS (code
) == '<')
1167 /* Make sure the constant is second. */
1168 if (GET_RTX_CLASS (code
) == 'c'
1169 && swap_commutative_operands_p (op0
, op1
))
1171 tem
= op0
, op0
= op1
, op1
= tem
;
1174 trueop0
= avoid_constant_pool_reference (op0
);
1175 trueop1
= avoid_constant_pool_reference (op1
);
1177 if (VECTOR_MODE_P (mode
)
1178 && GET_CODE (trueop0
) == CONST_VECTOR
1179 && GET_CODE (trueop1
) == CONST_VECTOR
)
1181 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1182 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1183 enum machine_mode op0mode
= GET_MODE (trueop0
);
1184 int op0_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op0mode
));
1185 unsigned op0_n_elts
= (GET_MODE_SIZE (op0mode
) / op0_elt_size
);
1186 enum machine_mode op1mode
= GET_MODE (trueop1
);
1187 int op1_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op1mode
));
1188 unsigned op1_n_elts
= (GET_MODE_SIZE (op1mode
) / op1_elt_size
);
1189 rtvec v
= rtvec_alloc (n_elts
);
1192 if (op0_n_elts
!= n_elts
|| op1_n_elts
!= n_elts
)
1195 for (i
= 0; i
< n_elts
; i
++)
1197 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
1198 CONST_VECTOR_ELT (trueop0
, i
),
1199 CONST_VECTOR_ELT (trueop1
, i
));
1202 RTVEC_ELT (v
, i
) = x
;
1205 return gen_rtx_CONST_VECTOR (mode
, v
);
1208 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1209 && GET_CODE (trueop0
) == CONST_DOUBLE
1210 && GET_CODE (trueop1
) == CONST_DOUBLE
1211 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
1213 REAL_VALUE_TYPE f0
, f1
, value
;
1215 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
1216 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
1217 f0
= real_value_truncate (mode
, f0
);
1218 f1
= real_value_truncate (mode
, f1
);
1220 if (HONOR_SNANS (mode
)
1221 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
1225 && REAL_VALUES_EQUAL (f1
, dconst0
)
1226 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
1229 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
1231 value
= real_value_truncate (mode
, value
);
1232 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
1235 /* We can fold some multi-word operations. */
1236 if (GET_MODE_CLASS (mode
) == MODE_INT
1237 && width
== HOST_BITS_PER_WIDE_INT
* 2
1238 && (GET_CODE (trueop0
) == CONST_DOUBLE
1239 || GET_CODE (trueop0
) == CONST_INT
)
1240 && (GET_CODE (trueop1
) == CONST_DOUBLE
1241 || GET_CODE (trueop1
) == CONST_INT
))
1243 unsigned HOST_WIDE_INT l1
, l2
, lv
;
1244 HOST_WIDE_INT h1
, h2
, hv
;
1246 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1247 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
1249 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
1251 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1252 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
1254 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
1259 /* A - B == A + (-B). */
1260 neg_double (l2
, h2
, &lv
, &hv
);
1263 /* Fall through.... */
1266 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1270 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1273 case DIV
: case MOD
: case UDIV
: case UMOD
:
1274 /* We'd need to include tree.h to do this and it doesn't seem worth
1279 lv
= l1
& l2
, hv
= h1
& h2
;
1283 lv
= l1
| l2
, hv
= h1
| h2
;
1287 lv
= l1
^ l2
, hv
= h1
^ h2
;
1293 && ((unsigned HOST_WIDE_INT
) l1
1294 < (unsigned HOST_WIDE_INT
) l2
)))
1303 && ((unsigned HOST_WIDE_INT
) l1
1304 > (unsigned HOST_WIDE_INT
) l2
)))
1311 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1313 && ((unsigned HOST_WIDE_INT
) l1
1314 < (unsigned HOST_WIDE_INT
) l2
)))
1321 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1323 && ((unsigned HOST_WIDE_INT
) l1
1324 > (unsigned HOST_WIDE_INT
) l2
)))
1330 case LSHIFTRT
: case ASHIFTRT
:
1332 case ROTATE
: case ROTATERT
:
1333 #ifdef SHIFT_COUNT_TRUNCATED
1334 if (SHIFT_COUNT_TRUNCATED
)
1335 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1338 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1341 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1342 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1344 else if (code
== ASHIFT
)
1345 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1346 else if (code
== ROTATE
)
1347 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1348 else /* code == ROTATERT */
1349 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1356 return immed_double_const (lv
, hv
, mode
);
1359 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1360 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1362 /* Even if we can't compute a constant result,
1363 there are some cases worth simplifying. */
1368 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1369 when x is NaN, infinite, or finite and nonzero. They aren't
1370 when x is -0 and the rounding mode is not towards -infinity,
1371 since (-0) + 0 is then 0. */
1372 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1375 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1376 transformations are safe even for IEEE. */
1377 if (GET_CODE (op0
) == NEG
)
1378 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1379 else if (GET_CODE (op1
) == NEG
)
1380 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1382 /* (~a) + 1 -> -a */
1383 if (INTEGRAL_MODE_P (mode
)
1384 && GET_CODE (op0
) == NOT
1385 && trueop1
== const1_rtx
)
1386 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1388 /* Handle both-operands-constant cases. We can only add
1389 CONST_INTs to constants since the sum of relocatable symbols
1390 can't be handled by most assemblers. Don't add CONST_INT
1391 to CONST_INT since overflow won't be computed properly if wider
1392 than HOST_BITS_PER_WIDE_INT. */
1394 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1395 && GET_CODE (op1
) == CONST_INT
)
1396 return plus_constant (op0
, INTVAL (op1
));
1397 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1398 && GET_CODE (op0
) == CONST_INT
)
1399 return plus_constant (op1
, INTVAL (op0
));
1401 /* See if this is something like X * C - X or vice versa or
1402 if the multiplication is written as a shift. If so, we can
1403 distribute and make a new multiply, shift, or maybe just
1404 have X (if C is 2 in the example above). But don't make
1405 real multiply if we didn't have one before. */
1407 if (! FLOAT_MODE_P (mode
))
1409 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1410 rtx lhs
= op0
, rhs
= op1
;
1413 if (GET_CODE (lhs
) == NEG
)
1414 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1415 else if (GET_CODE (lhs
) == MULT
1416 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1418 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1421 else if (GET_CODE (lhs
) == ASHIFT
1422 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1423 && INTVAL (XEXP (lhs
, 1)) >= 0
1424 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1426 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1427 lhs
= XEXP (lhs
, 0);
1430 if (GET_CODE (rhs
) == NEG
)
1431 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1432 else if (GET_CODE (rhs
) == MULT
1433 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1435 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1438 else if (GET_CODE (rhs
) == ASHIFT
1439 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1440 && INTVAL (XEXP (rhs
, 1)) >= 0
1441 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1443 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1444 rhs
= XEXP (rhs
, 0);
1447 if (rtx_equal_p (lhs
, rhs
))
1449 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1450 GEN_INT (coeff0
+ coeff1
));
1451 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1455 /* If one of the operands is a PLUS or a MINUS, see if we can
1456 simplify this by the associative law.
1457 Don't use the associative law for floating point.
1458 The inaccuracy makes it nonassociative,
1459 and subtle programs can break if operations are associated. */
1461 if (INTEGRAL_MODE_P (mode
)
1462 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1463 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1464 || (GET_CODE (op0
) == CONST
1465 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1466 || (GET_CODE (op1
) == CONST
1467 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1468 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1471 /* Reassociate floating point addition only when the user
1472 specifies unsafe math optimizations. */
1473 if (FLOAT_MODE_P (mode
)
1474 && flag_unsafe_math_optimizations
)
1476 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1484 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1485 using cc0, in which case we want to leave it as a COMPARE
1486 so we can distinguish it from a register-register-copy.
1488 In IEEE floating point, x-0 is not the same as x. */
1490 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1491 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1492 && trueop1
== CONST0_RTX (mode
))
1496 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1497 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1498 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1499 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1501 rtx xop00
= XEXP (op0
, 0);
1502 rtx xop10
= XEXP (op1
, 0);
1505 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1507 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1508 && GET_MODE (xop00
) == GET_MODE (xop10
)
1509 && REGNO (xop00
) == REGNO (xop10
)
1510 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1511 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1518 /* We can't assume x-x is 0 even with non-IEEE floating point,
1519 but since it is zero except in very strange circumstances, we
1520 will treat it as zero with -funsafe-math-optimizations. */
1521 if (rtx_equal_p (trueop0
, trueop1
)
1522 && ! side_effects_p (op0
)
1523 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1524 return CONST0_RTX (mode
);
1526 /* Change subtraction from zero into negation. (0 - x) is the
1527 same as -x when x is NaN, infinite, or finite and nonzero.
1528 But if the mode has signed zeros, and does not round towards
1529 -infinity, then 0 - 0 is 0, not -0. */
1530 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1531 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1533 /* (-1 - a) is ~a. */
1534 if (trueop0
== constm1_rtx
)
1535 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1537 /* Subtracting 0 has no effect unless the mode has signed zeros
1538 and supports rounding towards -infinity. In such a case,
1540 if (!(HONOR_SIGNED_ZEROS (mode
)
1541 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1542 && trueop1
== CONST0_RTX (mode
))
1545 /* See if this is something like X * C - X or vice versa or
1546 if the multiplication is written as a shift. If so, we can
1547 distribute and make a new multiply, shift, or maybe just
1548 have X (if C is 2 in the example above). But don't make
1549 real multiply if we didn't have one before. */
1551 if (! FLOAT_MODE_P (mode
))
1553 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1554 rtx lhs
= op0
, rhs
= op1
;
1557 if (GET_CODE (lhs
) == NEG
)
1558 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1559 else if (GET_CODE (lhs
) == MULT
1560 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1562 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1565 else if (GET_CODE (lhs
) == ASHIFT
1566 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1567 && INTVAL (XEXP (lhs
, 1)) >= 0
1568 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1570 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1571 lhs
= XEXP (lhs
, 0);
1574 if (GET_CODE (rhs
) == NEG
)
1575 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1576 else if (GET_CODE (rhs
) == MULT
1577 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1579 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1582 else if (GET_CODE (rhs
) == ASHIFT
1583 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1584 && INTVAL (XEXP (rhs
, 1)) >= 0
1585 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1587 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1588 rhs
= XEXP (rhs
, 0);
1591 if (rtx_equal_p (lhs
, rhs
))
1593 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1594 GEN_INT (coeff0
- coeff1
));
1595 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1599 /* (a - (-b)) -> (a + b). True even for IEEE. */
1600 if (GET_CODE (op1
) == NEG
)
1601 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1603 /* (-x - c) may be simplified as (-c - x). */
1604 if (GET_CODE (op0
) == NEG
1605 && (GET_CODE (op1
) == CONST_INT
1606 || GET_CODE (op1
) == CONST_DOUBLE
))
1608 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1610 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1613 /* If one of the operands is a PLUS or a MINUS, see if we can
1614 simplify this by the associative law.
1615 Don't use the associative law for floating point.
1616 The inaccuracy makes it nonassociative,
1617 and subtle programs can break if operations are associated. */
1619 if (INTEGRAL_MODE_P (mode
)
1620 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1621 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1622 || (GET_CODE (op0
) == CONST
1623 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1624 || (GET_CODE (op1
) == CONST
1625 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1626 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1629 /* Don't let a relocatable value get a negative coeff. */
1630 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1631 return simplify_gen_binary (PLUS
, mode
,
1633 neg_const_int (mode
, op1
));
1635 /* (x - (x & y)) -> (x & ~y) */
1636 if (GET_CODE (op1
) == AND
)
1638 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1640 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1641 GET_MODE (XEXP (op1
, 1)));
1642 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1644 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1646 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1647 GET_MODE (XEXP (op1
, 0)));
1648 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1654 if (trueop1
== constm1_rtx
)
1655 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1657 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1658 x is NaN, since x * 0 is then also NaN. Nor is it valid
1659 when the mode has signed zeros, since multiplying a negative
1660 number by 0 will give -0, not 0. */
1661 if (!HONOR_NANS (mode
)
1662 && !HONOR_SIGNED_ZEROS (mode
)
1663 && trueop1
== CONST0_RTX (mode
)
1664 && ! side_effects_p (op0
))
1667 /* In IEEE floating point, x*1 is not equivalent to x for
1669 if (!HONOR_SNANS (mode
)
1670 && trueop1
== CONST1_RTX (mode
))
1673 /* Convert multiply by constant power of two into shift unless
1674 we are still generating RTL. This test is a kludge. */
1675 if (GET_CODE (trueop1
) == CONST_INT
1676 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1677 /* If the mode is larger than the host word size, and the
1678 uppermost bit is set, then this isn't a power of two due
1679 to implicit sign extension. */
1680 && (width
<= HOST_BITS_PER_WIDE_INT
1681 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1682 && ! rtx_equal_function_value_matters
)
1683 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1685 /* x*2 is x+x and x*(-1) is -x */
1686 if (GET_CODE (trueop1
) == CONST_DOUBLE
1687 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1688 && GET_MODE (op0
) == mode
)
1691 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1693 if (REAL_VALUES_EQUAL (d
, dconst2
))
1694 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1696 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1697 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1700 /* Reassociate multiplication, but for floating point MULTs
1701 only when the user specifies unsafe math optimizations. */
1702 if (! FLOAT_MODE_P (mode
)
1703 || flag_unsafe_math_optimizations
)
1705 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1712 if (trueop1
== const0_rtx
)
1714 if (GET_CODE (trueop1
) == CONST_INT
1715 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1716 == GET_MODE_MASK (mode
)))
1718 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1720 /* A | (~A) -> -1 */
1721 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1722 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1723 && ! side_effects_p (op0
)
1724 && GET_MODE_CLASS (mode
) != MODE_CC
)
1726 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1732 if (trueop1
== const0_rtx
)
1734 if (GET_CODE (trueop1
) == CONST_INT
1735 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1736 == GET_MODE_MASK (mode
)))
1737 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1738 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1739 && GET_MODE_CLASS (mode
) != MODE_CC
)
1741 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1747 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1749 if (GET_CODE (trueop1
) == CONST_INT
1750 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1751 == GET_MODE_MASK (mode
)))
1753 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1754 && GET_MODE_CLASS (mode
) != MODE_CC
)
1757 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1758 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1759 && ! side_effects_p (op0
)
1760 && GET_MODE_CLASS (mode
) != MODE_CC
)
1762 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1768 /* Convert divide by power of two into shift (divide by 1 handled
1770 if (GET_CODE (trueop1
) == CONST_INT
1771 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1772 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (arg1
));
1774 /* Fall through.... */
1777 if (trueop1
== CONST1_RTX (mode
))
1779 /* On some platforms DIV uses narrower mode than its
1781 rtx x
= gen_lowpart_common (mode
, op0
);
1784 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1785 return gen_lowpart_SUBREG (mode
, op0
);
1790 /* Maybe change 0 / x to 0. This transformation isn't safe for
1791 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1792 Nor is it safe for modes with signed zeros, since dividing
1793 0 by a negative number gives -0, not 0. */
1794 if (!HONOR_NANS (mode
)
1795 && !HONOR_SIGNED_ZEROS (mode
)
1796 && trueop0
== CONST0_RTX (mode
)
1797 && ! side_effects_p (op1
))
1800 /* Change division by a constant into multiplication. Only do
1801 this with -funsafe-math-optimizations. */
1802 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1803 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1804 && trueop1
!= CONST0_RTX (mode
)
1805 && flag_unsafe_math_optimizations
)
1808 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1810 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1812 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1813 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1814 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
1820 /* Handle modulus by power of two (mod with 1 handled below). */
1821 if (GET_CODE (trueop1
) == CONST_INT
1822 && exact_log2 (INTVAL (trueop1
)) > 0)
1823 return simplify_gen_binary (AND
, mode
, op0
,
1824 GEN_INT (INTVAL (op1
) - 1));
1826 /* Fall through.... */
1829 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1830 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1837 /* Rotating ~0 always results in ~0. */
1838 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1839 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1840 && ! side_effects_p (op1
))
1843 /* Fall through.... */
1847 if (trueop1
== const0_rtx
)
1849 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1854 if (width
<= HOST_BITS_PER_WIDE_INT
1855 && GET_CODE (trueop1
) == CONST_INT
1856 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1857 && ! side_effects_p (op0
))
1859 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1861 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1867 if (width
<= HOST_BITS_PER_WIDE_INT
1868 && GET_CODE (trueop1
) == CONST_INT
1869 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1870 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1871 && ! side_effects_p (op0
))
1873 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1875 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1881 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1883 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1885 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1891 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1893 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1895 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1904 /* ??? There are simplifications that can be done. */
1908 if (!VECTOR_MODE_P (mode
))
1910 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1912 != GET_MODE_INNER (GET_MODE (trueop0
)))
1913 || GET_CODE (trueop1
) != PARALLEL
1914 || XVECLEN (trueop1
, 0) != 1
1915 || GET_CODE (XVECEXP (trueop1
, 0, 0)) != CONST_INT
)
1918 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1919 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP (trueop1
, 0, 0)));
1923 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1924 || (GET_MODE_INNER (mode
)
1925 != GET_MODE_INNER (GET_MODE (trueop0
)))
1926 || GET_CODE (trueop1
) != PARALLEL
)
1929 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1931 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1932 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1933 rtvec v
= rtvec_alloc (n_elts
);
1936 if (XVECLEN (trueop1
, 0) != (int) n_elts
)
1938 for (i
= 0; i
< n_elts
; i
++)
1940 rtx x
= XVECEXP (trueop1
, 0, i
);
1942 if (GET_CODE (x
) != CONST_INT
)
1944 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, INTVAL (x
));
1947 return gen_rtx_CONST_VECTOR (mode
, v
);
1953 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
1954 ? GET_MODE (trueop0
)
1955 : GET_MODE_INNER (mode
));
1956 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
1957 ? GET_MODE (trueop1
)
1958 : GET_MODE_INNER (mode
));
1960 if (!VECTOR_MODE_P (mode
)
1961 || (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
1962 != GET_MODE_SIZE (mode
)))
1965 if ((VECTOR_MODE_P (op0_mode
)
1966 && (GET_MODE_INNER (mode
)
1967 != GET_MODE_INNER (op0_mode
)))
1968 || (!VECTOR_MODE_P (op0_mode
)
1969 && GET_MODE_INNER (mode
) != op0_mode
))
1972 if ((VECTOR_MODE_P (op1_mode
)
1973 && (GET_MODE_INNER (mode
)
1974 != GET_MODE_INNER (op1_mode
)))
1975 || (!VECTOR_MODE_P (op1_mode
)
1976 && GET_MODE_INNER (mode
) != op1_mode
))
1979 if ((GET_CODE (trueop0
) == CONST_VECTOR
1980 || GET_CODE (trueop0
) == CONST_INT
1981 || GET_CODE (trueop0
) == CONST_DOUBLE
)
1982 && (GET_CODE (trueop1
) == CONST_VECTOR
1983 || GET_CODE (trueop1
) == CONST_INT
1984 || GET_CODE (trueop1
) == CONST_DOUBLE
))
1986 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1987 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1988 rtvec v
= rtvec_alloc (n_elts
);
1990 unsigned in_n_elts
= 1;
1992 if (VECTOR_MODE_P (op0_mode
))
1993 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
1994 for (i
= 0; i
< n_elts
; i
++)
1998 if (!VECTOR_MODE_P (op0_mode
))
1999 RTVEC_ELT (v
, i
) = trueop0
;
2001 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2005 if (!VECTOR_MODE_P (op1_mode
))
2006 RTVEC_ELT (v
, i
) = trueop1
;
2008 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2013 return gen_rtx_CONST_VECTOR (mode
, v
);
2025 /* Get the integer argument values in two forms:
2026 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2028 arg0
= INTVAL (trueop0
);
2029 arg1
= INTVAL (trueop1
);
2031 if (width
< HOST_BITS_PER_WIDE_INT
)
2033 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2034 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2037 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2038 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2041 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2042 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2050 /* Compute the value of the arithmetic. */
2055 val
= arg0s
+ arg1s
;
2059 val
= arg0s
- arg1s
;
2063 val
= arg0s
* arg1s
;
2068 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2071 val
= arg0s
/ arg1s
;
2076 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2079 val
= arg0s
% arg1s
;
2084 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2087 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2092 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2095 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2111 /* If shift count is undefined, don't fold it; let the machine do
2112 what it wants. But truncate it if the machine will do that. */
2116 #ifdef SHIFT_COUNT_TRUNCATED
2117 if (SHIFT_COUNT_TRUNCATED
)
2121 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
2128 #ifdef SHIFT_COUNT_TRUNCATED
2129 if (SHIFT_COUNT_TRUNCATED
)
2133 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
2140 #ifdef SHIFT_COUNT_TRUNCATED
2141 if (SHIFT_COUNT_TRUNCATED
)
2145 val
= arg0s
>> arg1
;
2147 /* Bootstrap compiler may not have sign extended the right shift.
2148 Manually extend the sign to insure bootstrap cc matches gcc. */
2149 if (arg0s
< 0 && arg1
> 0)
2150 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
2159 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2160 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2168 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2169 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2173 /* Do nothing here. */
2177 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2181 val
= ((unsigned HOST_WIDE_INT
) arg0
2182 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2186 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2190 val
= ((unsigned HOST_WIDE_INT
) arg0
2191 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2198 /* ??? There are simplifications that can be done. */
2205 val
= trunc_int_for_mode (val
, mode
);
2207 return GEN_INT (val
);
2210 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2213 Rather than test for specific case, we do this by a brute-force method
2214 and do all possible simplifications until no more changes occur. Then
2215 we rebuild the operation.
2217 If FORCE is true, then always generate the rtx. This is used to
2218 canonicalize stuff emitted from simplify_gen_binary. Note that this
2219 can still fail if the rtx is too complex. It won't fail just because
2220 the result is not 'simpler' than the input, however. */
2222 struct simplify_plus_minus_op_data
2229 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2231 const struct simplify_plus_minus_op_data
*d1
= p1
;
2232 const struct simplify_plus_minus_op_data
*d2
= p2
;
2234 return (commutative_operand_precedence (d2
->op
)
2235 - commutative_operand_precedence (d1
->op
));
2239 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2242 struct simplify_plus_minus_op_data ops
[8];
2244 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2248 memset (ops
, 0, sizeof ops
);
2250 /* Set up the two operands and then expand them until nothing has been
2251 changed. If we run out of room in our array, give up; this should
2252 almost never happen. */
2257 ops
[1].neg
= (code
== MINUS
);
2263 for (i
= 0; i
< n_ops
; i
++)
2265 rtx this_op
= ops
[i
].op
;
2266 int this_neg
= ops
[i
].neg
;
2267 enum rtx_code this_code
= GET_CODE (this_op
);
2276 ops
[n_ops
].op
= XEXP (this_op
, 1);
2277 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2280 ops
[i
].op
= XEXP (this_op
, 0);
2286 ops
[i
].op
= XEXP (this_op
, 0);
2287 ops
[i
].neg
= ! this_neg
;
2293 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2294 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2295 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2297 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2298 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2299 ops
[n_ops
].neg
= this_neg
;
2307 /* ~a -> (-a - 1) */
2310 ops
[n_ops
].op
= constm1_rtx
;
2311 ops
[n_ops
++].neg
= this_neg
;
2312 ops
[i
].op
= XEXP (this_op
, 0);
2313 ops
[i
].neg
= !this_neg
;
2321 ops
[i
].op
= neg_const_int (mode
, this_op
);
2334 /* If we only have two operands, we can't do anything. */
2335 if (n_ops
<= 2 && !force
)
2338 /* Count the number of CONSTs we didn't split above. */
2339 for (i
= 0; i
< n_ops
; i
++)
2340 if (GET_CODE (ops
[i
].op
) == CONST
)
2343 /* Now simplify each pair of operands until nothing changes. The first
2344 time through just simplify constants against each other. */
2351 for (i
= 0; i
< n_ops
- 1; i
++)
2352 for (j
= i
+ 1; j
< n_ops
; j
++)
2354 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2355 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2357 if (lhs
!= 0 && rhs
!= 0
2358 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2360 enum rtx_code ncode
= PLUS
;
2366 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2368 else if (swap_commutative_operands_p (lhs
, rhs
))
2369 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2371 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2373 /* Reject "simplifications" that just wrap the two
2374 arguments in a CONST. Failure to do so can result
2375 in infinite recursion with simplify_binary_operation
2376 when it calls us to simplify CONST operations. */
2378 && ! (GET_CODE (tem
) == CONST
2379 && GET_CODE (XEXP (tem
, 0)) == ncode
2380 && XEXP (XEXP (tem
, 0), 0) == lhs
2381 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2382 /* Don't allow -x + -1 -> ~x simplifications in the
2383 first pass. This allows us the chance to combine
2384 the -1 with other constants. */
2386 && GET_CODE (tem
) == NOT
2387 && XEXP (tem
, 0) == rhs
))
2390 if (GET_CODE (tem
) == NEG
)
2391 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2392 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2393 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2397 ops
[j
].op
= NULL_RTX
;
2407 /* Pack all the operands to the lower-numbered entries. */
2408 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2413 /* Sort the operations based on swap_commutative_operands_p. */
2414 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2416 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2418 && GET_CODE (ops
[1].op
) == CONST_INT
2419 && CONSTANT_P (ops
[0].op
)
2421 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
2423 /* We suppressed creation of trivial CONST expressions in the
2424 combination loop to avoid recursion. Create one manually now.
2425 The combination loop should have ensured that there is exactly
2426 one CONST_INT, and the sort will have ensured that it is last
2427 in the array and that any other constant will be next-to-last. */
2430 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2431 && CONSTANT_P (ops
[n_ops
- 2].op
))
2433 rtx value
= ops
[n_ops
- 1].op
;
2434 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2435 value
= neg_const_int (mode
, value
);
2436 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2440 /* Count the number of CONSTs that we generated. */
2442 for (i
= 0; i
< n_ops
; i
++)
2443 if (GET_CODE (ops
[i
].op
) == CONST
)
2446 /* Give up if we didn't reduce the number of operands we had. Make
2447 sure we count a CONST as two operands. If we have the same
2448 number of operands, but have made more CONSTs than before, this
2449 is also an improvement, so accept it. */
2451 && (n_ops
+ n_consts
> input_ops
2452 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2455 /* Put a non-negated operand first, if possible. */
2457 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2460 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
2469 /* Now make the result by performing the requested operations. */
2471 for (i
= 1; i
< n_ops
; i
++)
2472 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2473 mode
, result
, ops
[i
].op
);
2478 /* Like simplify_binary_operation except used for relational operators.
2479 MODE is the mode of the operands, not that of the result. If MODE
2480 is VOIDmode, both operands must also be VOIDmode and we compare the
2481 operands in "infinite precision".
2483 If no simplification is possible, this function returns zero. Otherwise,
2484 it returns either const_true_rtx or const0_rtx. */
2487 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2490 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2495 if (mode
== VOIDmode
2496 && (GET_MODE (op0
) != VOIDmode
2497 || GET_MODE (op1
) != VOIDmode
))
2500 /* If op0 is a compare, extract the comparison arguments from it. */
2501 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2502 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2504 /* We can't simplify MODE_CC values since we don't know what the
2505 actual comparison is. */
2506 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2509 /* Make sure the constant is second. */
2510 if (swap_commutative_operands_p (op0
, op1
))
2512 tem
= op0
, op0
= op1
, op1
= tem
;
2513 code
= swap_condition (code
);
2516 trueop0
= avoid_constant_pool_reference (op0
);
2517 trueop1
= avoid_constant_pool_reference (op1
);
2519 /* For integer comparisons of A and B maybe we can simplify A - B and can
2520 then simplify a comparison of that with zero. If A and B are both either
2521 a register or a CONST_INT, this can't help; testing for these cases will
2522 prevent infinite recursion here and speed things up.
2524 If CODE is an unsigned comparison, then we can never do this optimization,
2525 because it gives an incorrect result if the subtraction wraps around zero.
2526 ANSI C defines unsigned operations such that they never overflow, and
2527 thus such cases can not be ignored. */
2529 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2530 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
2531 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
2532 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2533 /* We cannot do this for == or != if tem is a nonzero address. */
2534 && ((code
!= EQ
&& code
!= NE
) || ! nonzero_address_p (tem
))
2535 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2536 return simplify_relational_operation (signed_condition (code
),
2537 mode
, tem
, const0_rtx
);
2539 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2540 return const_true_rtx
;
2542 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2545 /* For modes without NaNs, if the two operands are equal, we know the
2546 result except if they have side-effects. */
2547 if (! HONOR_NANS (GET_MODE (trueop0
))
2548 && rtx_equal_p (trueop0
, trueop1
)
2549 && ! side_effects_p (trueop0
))
2550 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2552 /* If the operands are floating-point constants, see if we can fold
2554 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2555 && GET_CODE (trueop1
) == CONST_DOUBLE
2556 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2558 REAL_VALUE_TYPE d0
, d1
;
2560 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2561 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2563 /* Comparisons are unordered iff at least one of the values is NaN. */
2564 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2574 return const_true_rtx
;
2587 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2588 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2589 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2592 /* Otherwise, see if the operands are both integers. */
2593 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2594 && (GET_CODE (trueop0
) == CONST_DOUBLE
2595 || GET_CODE (trueop0
) == CONST_INT
)
2596 && (GET_CODE (trueop1
) == CONST_DOUBLE
2597 || GET_CODE (trueop1
) == CONST_INT
))
2599 int width
= GET_MODE_BITSIZE (mode
);
2600 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2601 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2603 /* Get the two words comprising each integer constant. */
2604 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2606 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2607 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2611 l0u
= l0s
= INTVAL (trueop0
);
2612 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2615 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2617 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2618 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2622 l1u
= l1s
= INTVAL (trueop1
);
2623 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2626 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2627 we have to sign or zero-extend the values. */
2628 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2630 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2631 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2633 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2634 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2636 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2637 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2639 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2640 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2642 equal
= (h0u
== h1u
&& l0u
== l1u
);
2643 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2644 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2645 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2646 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2649 /* Otherwise, there are some code-specific tests we can make. */
2655 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2660 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2661 return const_true_rtx
;
2665 /* Unsigned values are never negative. */
2666 if (trueop1
== const0_rtx
)
2667 return const_true_rtx
;
2671 if (trueop1
== const0_rtx
)
2676 /* Unsigned values are never greater than the largest
2678 if (GET_CODE (trueop1
) == CONST_INT
2679 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2680 && INTEGRAL_MODE_P (mode
))
2681 return const_true_rtx
;
2685 if (GET_CODE (trueop1
) == CONST_INT
2686 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2687 && INTEGRAL_MODE_P (mode
))
2692 /* Optimize abs(x) < 0.0. */
2693 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
2695 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2697 if (GET_CODE (tem
) == ABS
)
2703 /* Optimize abs(x) >= 0.0. */
2704 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
2706 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2708 if (GET_CODE (tem
) == ABS
)
2709 return const_true_rtx
;
2714 /* Optimize ! (abs(x) < 0.0). */
2715 if (trueop1
== CONST0_RTX (mode
))
2717 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2719 if (GET_CODE (tem
) == ABS
)
2720 return const_true_rtx
;
2731 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2737 return equal
? const_true_rtx
: const0_rtx
;
2740 return ! equal
? const_true_rtx
: const0_rtx
;
2743 return op0lt
? const_true_rtx
: const0_rtx
;
2746 return op1lt
? const_true_rtx
: const0_rtx
;
2748 return op0ltu
? const_true_rtx
: const0_rtx
;
2750 return op1ltu
? const_true_rtx
: const0_rtx
;
2753 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2756 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2758 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2760 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2762 return const_true_rtx
;
2770 /* Simplify CODE, an operation with result mode MODE and three operands,
2771 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2772 a constant. Return 0 if no simplifications is possible. */
2775 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
2776 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
2779 unsigned int width
= GET_MODE_BITSIZE (mode
);
2781 /* VOIDmode means "infinite" precision. */
2783 width
= HOST_BITS_PER_WIDE_INT
;
2789 if (GET_CODE (op0
) == CONST_INT
2790 && GET_CODE (op1
) == CONST_INT
2791 && GET_CODE (op2
) == CONST_INT
2792 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2793 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2795 /* Extracting a bit-field from a constant */
2796 HOST_WIDE_INT val
= INTVAL (op0
);
2798 if (BITS_BIG_ENDIAN
)
2799 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2800 - INTVAL (op2
) - INTVAL (op1
));
2802 val
>>= INTVAL (op2
);
2804 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2806 /* First zero-extend. */
2807 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2808 /* If desired, propagate sign bit. */
2809 if (code
== SIGN_EXTRACT
2810 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2811 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2814 /* Clear the bits that don't belong in our mode,
2815 unless they and our sign bit are all one.
2816 So we get either a reasonable negative value or a reasonable
2817 unsigned value for this mode. */
2818 if (width
< HOST_BITS_PER_WIDE_INT
2819 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2820 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2821 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2823 return GEN_INT (val
);
2828 if (GET_CODE (op0
) == CONST_INT
)
2829 return op0
!= const0_rtx
? op1
: op2
;
2831 /* Convert c ? a : a into "a". */
2832 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
2835 /* Convert a != b ? a : b into "a". */
2836 if (GET_CODE (op0
) == NE
2837 && ! side_effects_p (op0
)
2838 && ! HONOR_NANS (mode
)
2839 && ! HONOR_SIGNED_ZEROS (mode
)
2840 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
2841 && rtx_equal_p (XEXP (op0
, 1), op2
))
2842 || (rtx_equal_p (XEXP (op0
, 0), op2
)
2843 && rtx_equal_p (XEXP (op0
, 1), op1
))))
2846 /* Convert a == b ? a : b into "b". */
2847 if (GET_CODE (op0
) == EQ
2848 && ! side_effects_p (op0
)
2849 && ! HONOR_NANS (mode
)
2850 && ! HONOR_SIGNED_ZEROS (mode
)
2851 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
2852 && rtx_equal_p (XEXP (op0
, 1), op2
))
2853 || (rtx_equal_p (XEXP (op0
, 0), op2
)
2854 && rtx_equal_p (XEXP (op0
, 1), op1
))))
2857 if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2859 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2860 ? GET_MODE (XEXP (op0
, 1))
2861 : GET_MODE (XEXP (op0
, 0)));
2863 if (cmp_mode
== VOIDmode
)
2864 cmp_mode
= op0_mode
;
2865 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2866 XEXP (op0
, 0), XEXP (op0
, 1));
2868 /* See if any simplifications were possible. */
2869 if (temp
== const0_rtx
)
2871 else if (temp
== const_true_rtx
)
2876 /* Look for happy constants in op1 and op2. */
2877 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2879 HOST_WIDE_INT t
= INTVAL (op1
);
2880 HOST_WIDE_INT f
= INTVAL (op2
);
2882 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2883 code
= GET_CODE (op0
);
2884 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2887 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2895 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2901 if (GET_MODE (op0
) != mode
2902 || GET_MODE (op1
) != mode
2903 || !VECTOR_MODE_P (mode
))
2905 op2
= avoid_constant_pool_reference (op2
);
2906 if (GET_CODE (op2
) == CONST_INT
)
2908 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2909 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2910 int mask
= (1 << n_elts
) - 1;
2912 if (!(INTVAL (op2
) & mask
))
2914 if ((INTVAL (op2
) & mask
) == mask
)
2917 op0
= avoid_constant_pool_reference (op0
);
2918 op1
= avoid_constant_pool_reference (op1
);
2919 if (GET_CODE (op0
) == CONST_VECTOR
2920 && GET_CODE (op1
) == CONST_VECTOR
)
2922 rtvec v
= rtvec_alloc (n_elts
);
2925 for (i
= 0; i
< n_elts
; i
++)
2926 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
2927 ? CONST_VECTOR_ELT (op0
, i
)
2928 : CONST_VECTOR_ELT (op1
, i
));
2929 return gen_rtx_CONST_VECTOR (mode
, v
);
2941 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
2942 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
2944 Works by unpacking OP into a collection of 8-bit values
2945 represented as a little-endian array of 'unsigned char', selecting by BYTE,
2946 and then repacking them again for OUTERMODE. */
2949 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
2950 enum machine_mode innermode
, unsigned int byte
)
2952 /* We support up to 512-bit values (for V8DFmode). */
2956 value_mask
= (1 << value_bit
) - 1
2958 unsigned char value
[max_bitsize
/ value_bit
];
2967 rtvec result_v
= NULL
;
2968 enum mode_class outer_class
;
2969 enum machine_mode outer_submode
;
2971 /* Some ports misuse CCmode. */
2972 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
2975 /* Unpack the value. */
2977 if (GET_CODE (op
) == CONST_VECTOR
)
2979 num_elem
= CONST_VECTOR_NUNITS (op
);
2980 elems
= &CONST_VECTOR_ELT (op
, 0);
2981 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
2987 elem_bitsize
= max_bitsize
;
2990 if (BITS_PER_UNIT
% value_bit
!= 0)
2991 abort (); /* Too complicated; reducing value_bit may help. */
2992 if (elem_bitsize
% BITS_PER_UNIT
!= 0)
2993 abort (); /* I don't know how to handle endianness of sub-units. */
2995 for (elem
= 0; elem
< num_elem
; elem
++)
2998 rtx el
= elems
[elem
];
3000 /* Vectors are kept in target memory order. (This is probably
3003 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3004 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3006 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3007 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3008 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3009 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3010 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3013 switch (GET_CODE (el
))
3017 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3019 *vp
++ = INTVAL (el
) >> i
;
3020 /* CONST_INTs are always logically sign-extended. */
3021 for (; i
< elem_bitsize
; i
+= value_bit
)
3022 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
3026 if (GET_MODE (el
) == VOIDmode
)
3028 /* If this triggers, someone should have generated a
3029 CONST_INT instead. */
3030 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3033 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
3034 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
3035 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
3038 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
3041 /* It shouldn't matter what's done here, so fill it with
3043 for (; i
< max_bitsize
; i
+= value_bit
)
3046 else if (GET_MODE_CLASS (GET_MODE (el
)) == MODE_FLOAT
)
3048 long tmp
[max_bitsize
/ 32];
3049 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
3051 if (bitsize
> elem_bitsize
)
3053 if (bitsize
% value_bit
!= 0)
3056 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
3059 /* real_to_target produces its result in words affected by
3060 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3061 and use WORDS_BIG_ENDIAN instead; see the documentation
3062 of SUBREG in rtl.texi. */
3063 for (i
= 0; i
< bitsize
; i
+= value_bit
)
3066 if (WORDS_BIG_ENDIAN
)
3067 ibase
= bitsize
- 1 - i
;
3070 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
3073 /* It shouldn't matter what's done here, so fill it with
3075 for (; i
< elem_bitsize
; i
+= value_bit
)
3087 /* Now, pick the right byte to start with. */
3088 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3089 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3090 will already have offset 0. */
3091 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
3093 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
3095 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3096 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3097 byte
= (subword_byte
% UNITS_PER_WORD
3098 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3101 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3102 so if it's become negative it will instead be very large.) */
3103 if (byte
>= GET_MODE_SIZE (innermode
))
3106 /* Convert from bytes to chunks of size value_bit. */
3107 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
3109 /* Re-pack the value. */
3111 if (VECTOR_MODE_P (outermode
))
3113 num_elem
= GET_MODE_NUNITS (outermode
);
3114 result_v
= rtvec_alloc (num_elem
);
3115 elems
= &RTVEC_ELT (result_v
, 0);
3116 outer_submode
= GET_MODE_INNER (outermode
);
3122 outer_submode
= outermode
;
3125 outer_class
= GET_MODE_CLASS (outer_submode
);
3126 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
3128 if (elem_bitsize
% value_bit
!= 0)
3130 if (elem_bitsize
+ value_start
* value_bit
> max_bitsize
)
3133 for (elem
= 0; elem
< num_elem
; elem
++)
3137 /* Vectors are stored in target memory order. (This is probably
3140 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3141 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3143 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3144 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3145 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3146 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3147 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3150 switch (outer_class
)
3153 case MODE_PARTIAL_INT
:
3155 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
3158 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3160 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
3161 for (; i
< elem_bitsize
; i
+= value_bit
)
3162 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
3163 << (i
- HOST_BITS_PER_WIDE_INT
));
3165 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3167 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3168 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
3170 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
3177 long tmp
[max_bitsize
/ 32];
3179 /* real_from_target wants its input in words affected by
3180 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3181 and use WORDS_BIG_ENDIAN instead; see the documentation
3182 of SUBREG in rtl.texi. */
3183 for (i
= 0; i
< max_bitsize
/ 32; i
++)
3185 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
3188 if (WORDS_BIG_ENDIAN
)
3189 ibase
= elem_bitsize
- 1 - i
;
3192 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
3195 real_from_target (&r
, tmp
, outer_submode
);
3196 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
3204 if (VECTOR_MODE_P (outermode
))
3205 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
3210 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3211 Return 0 if no simplifications are possible. */
3213 simplify_subreg (enum machine_mode outermode
, rtx op
,
3214 enum machine_mode innermode
, unsigned int byte
)
3216 /* Little bit of sanity checking. */
3217 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3218 || innermode
== BLKmode
|| outermode
== BLKmode
)
3221 if (GET_MODE (op
) != innermode
3222 && GET_MODE (op
) != VOIDmode
)
3225 if (byte
% GET_MODE_SIZE (outermode
)
3226 || byte
>= GET_MODE_SIZE (innermode
))
3229 if (outermode
== innermode
&& !byte
)
3232 if (GET_CODE (op
) == CONST_INT
3233 || GET_CODE (op
) == CONST_DOUBLE
3234 || GET_CODE (op
) == CONST_VECTOR
)
3235 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
3237 /* Changing mode twice with SUBREG => just change it once,
3238 or not at all if changing back op starting mode. */
3239 if (GET_CODE (op
) == SUBREG
)
3241 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3242 int final_offset
= byte
+ SUBREG_BYTE (op
);
3245 if (outermode
== innermostmode
3246 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3247 return SUBREG_REG (op
);
3249 /* The SUBREG_BYTE represents offset, as if the value were stored
3250 in memory. Irritating exception is paradoxical subreg, where
3251 we define SUBREG_BYTE to be 0. On big endian machines, this
3252 value should be negative. For a moment, undo this exception. */
3253 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3255 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3256 if (WORDS_BIG_ENDIAN
)
3257 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3258 if (BYTES_BIG_ENDIAN
)
3259 final_offset
+= difference
% UNITS_PER_WORD
;
3261 if (SUBREG_BYTE (op
) == 0
3262 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3264 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3265 if (WORDS_BIG_ENDIAN
)
3266 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3267 if (BYTES_BIG_ENDIAN
)
3268 final_offset
+= difference
% UNITS_PER_WORD
;
3271 /* See whether resulting subreg will be paradoxical. */
3272 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3274 /* In nonparadoxical subregs we can't handle negative offsets. */
3275 if (final_offset
< 0)
3277 /* Bail out in case resulting subreg would be incorrect. */
3278 if (final_offset
% GET_MODE_SIZE (outermode
)
3279 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3285 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3287 /* In paradoxical subreg, see if we are still looking on lower part.
3288 If so, our SUBREG_BYTE will be 0. */
3289 if (WORDS_BIG_ENDIAN
)
3290 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3291 if (BYTES_BIG_ENDIAN
)
3292 offset
+= difference
% UNITS_PER_WORD
;
3293 if (offset
== final_offset
)
3299 /* Recurse for further possible simplifications. */
3300 new = simplify_subreg (outermode
, SUBREG_REG (op
),
3301 GET_MODE (SUBREG_REG (op
)),
3305 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3308 /* SUBREG of a hard register => just change the register number
3309 and/or mode. If the hard register is not valid in that mode,
3310 suppress this simplification. If the hard register is the stack,
3311 frame, or argument pointer, leave this as a SUBREG. */
3314 && (! REG_FUNCTION_VALUE_P (op
)
3315 || ! rtx_equal_function_value_matters
)
3316 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3317 #ifdef CANNOT_CHANGE_MODE_CLASS
3318 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3319 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3320 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3322 && ((reload_completed
&& !frame_pointer_needed
)
3323 || (REGNO (op
) != FRAME_POINTER_REGNUM
3324 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3325 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3328 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3329 && REGNO (op
) != ARG_POINTER_REGNUM
3331 && REGNO (op
) != STACK_POINTER_REGNUM
3332 && subreg_offset_representable_p (REGNO (op
), innermode
,
3335 rtx tem
= gen_rtx_SUBREG (outermode
, op
, byte
);
3336 int final_regno
= subreg_hard_regno (tem
, 0);
3338 /* ??? We do allow it if the current REG is not valid for
3339 its mode. This is a kludge to work around how float/complex
3340 arguments are passed on 32-bit SPARC and should be fixed. */
3341 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3342 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
3344 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3346 /* Propagate original regno. We don't have any way to specify
3347 the offset inside original regno, so do so only for lowpart.
3348 The information is used only by alias analysis that can not
3349 grog partial register anyway. */
3351 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3352 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3357 /* If we have a SUBREG of a register that we are replacing and we are
3358 replacing it with a MEM, make a new MEM and try replacing the
3359 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3360 or if we would be widening it. */
3362 if (GET_CODE (op
) == MEM
3363 && ! mode_dependent_address_p (XEXP (op
, 0))
3364 /* Allow splitting of volatile memory references in case we don't
3365 have instruction to move the whole thing. */
3366 && (! MEM_VOLATILE_P (op
)
3367 || ! have_insn_for (SET
, innermode
))
3368 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3369 return adjust_address_nv (op
, outermode
, byte
);
3371 /* Handle complex values represented as CONCAT
3372 of real and imaginary part. */
3373 if (GET_CODE (op
) == CONCAT
)
3375 int is_realpart
= byte
< (unsigned int) GET_MODE_UNIT_SIZE (innermode
);
3376 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
3377 unsigned int final_offset
;
3380 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
3381 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3384 /* We can at least simplify it by referring directly to the
3386 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3389 /* Optimize SUBREG truncations of zero and sign extended values. */
3390 if ((GET_CODE (op
) == ZERO_EXTEND
3391 || GET_CODE (op
) == SIGN_EXTEND
)
3392 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
3394 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
3396 /* If we're requesting the lowpart of a zero or sign extension,
3397 there are three possibilities. If the outermode is the same
3398 as the origmode, we can omit both the extension and the subreg.
3399 If the outermode is not larger than the origmode, we can apply
3400 the truncation without the extension. Finally, if the outermode
3401 is larger than the origmode, but both are integer modes, we
3402 can just extend to the appropriate mode. */
3405 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
3406 if (outermode
== origmode
)
3407 return XEXP (op
, 0);
3408 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
3409 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
3410 subreg_lowpart_offset (outermode
,
3412 if (SCALAR_INT_MODE_P (outermode
))
3413 return simplify_gen_unary (GET_CODE (op
), outermode
,
3414 XEXP (op
, 0), origmode
);
3417 /* A SUBREG resulting from a zero extension may fold to zero if
3418 it extracts higher bits that the ZERO_EXTEND's source bits. */
3419 if (GET_CODE (op
) == ZERO_EXTEND
3420 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
3421 return CONST0_RTX (outermode
);
3427 /* Make a SUBREG operation or equivalent if it folds. */
3430 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
3431 enum machine_mode innermode
, unsigned int byte
)
3434 /* Little bit of sanity checking. */
3435 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3436 || innermode
== BLKmode
|| outermode
== BLKmode
)
3439 if (GET_MODE (op
) != innermode
3440 && GET_MODE (op
) != VOIDmode
)
3443 if (byte
% GET_MODE_SIZE (outermode
)
3444 || byte
>= GET_MODE_SIZE (innermode
))
3447 if (GET_CODE (op
) == QUEUED
)
3450 new = simplify_subreg (outermode
, op
, innermode
, byte
);
3454 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
3457 return gen_rtx_SUBREG (outermode
, op
, byte
);
3459 /* Simplify X, an rtx expression.
3461 Return the simplified expression or NULL if no simplifications
3464 This is the preferred entry point into the simplification routines;
3465 however, we still allow passes to call the more specific routines.
3467 Right now GCC has three (yes, three) major bodies of RTL simplification
3468 code that need to be unified.
3470 1. fold_rtx in cse.c. This code uses various CSE specific
3471 information to aid in RTL simplification.
3473 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3474 it uses combine specific information to aid in RTL
3477 3. The routines in this file.
3480 Long term we want to only have one body of simplification code; to
3481 get to that state I recommend the following steps:
3483 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3484 which are not pass dependent state into these routines.
3486 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3487 use this routine whenever possible.
3489 3. Allow for pass dependent state to be provided to these
3490 routines and add simplifications based on the pass dependent
3491 state. Remove code from cse.c & combine.c that becomes
3494 It will take time, but ultimately the compiler will be easier to
3495 maintain and improve. It's totally silly that when we add a
3496 simplification that it needs to be added to 4 places (3 for RTL
3497 simplification and 1 for tree simplification. */
3500 simplify_rtx (rtx x
)
3502 enum rtx_code code
= GET_CODE (x
);
3503 enum machine_mode mode
= GET_MODE (x
);
3506 switch (GET_RTX_CLASS (code
))
3509 return simplify_unary_operation (code
, mode
,
3510 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3512 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3513 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
3515 /* Fall through.... */
3518 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3522 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3523 XEXP (x
, 0), XEXP (x
, 1),
3527 temp
= simplify_relational_operation (code
,
3528 ((GET_MODE (XEXP (x
, 0))
3530 ? GET_MODE (XEXP (x
, 0))
3531 : GET_MODE (XEXP (x
, 1))),
3532 XEXP (x
, 0), XEXP (x
, 1));
3533 #ifdef FLOAT_STORE_FLAG_VALUE
3534 if (temp
!= 0 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
3536 if (temp
== const0_rtx
)
3537 temp
= CONST0_RTX (mode
);
3539 temp
= CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode
),
3547 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
3548 GET_MODE (SUBREG_REG (x
)),
3550 if (code
== CONSTANT_P_RTX
)
3552 if (CONSTANT_P (XEXP (x
, 0)))
3560 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3561 if (GET_CODE (XEXP (x
, 0)) == HIGH
3562 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))