1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
42 /* Simplification and canonicalization of RTL. */
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51 static rtx neg_const_int
PARAMS ((enum machine_mode
, rtx
));
52 static int simplify_plus_minus_op_data_cmp
PARAMS ((const void *,
54 static rtx simplify_plus_minus
PARAMS ((enum rtx_code
,
55 enum machine_mode
, rtx
,
58 /* Negate a CONST_INT rtx, truncating (because a conversion from a
59 maximally negative number can overflow). */
61 neg_const_int (mode
, i
)
62 enum machine_mode mode
;
65 return gen_int_mode (- INTVAL (i
), mode
);
69 /* Make a binary operation by properly ordering the operands and
70 seeing if the expression folds. */
73 simplify_gen_binary (code
, mode
, op0
, op1
)
75 enum machine_mode mode
;
80 /* Put complex operands first and constants second if commutative. */
81 if (GET_RTX_CLASS (code
) == 'c'
82 && swap_commutative_operands_p (op0
, op1
))
83 tem
= op0
, op0
= op1
, op1
= tem
;
85 /* If this simplifies, do it. */
86 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
90 /* Handle addition and subtraction specially. Otherwise, just form
93 if (code
== PLUS
|| code
== MINUS
)
95 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
100 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
103 /* If X is a MEM referencing the constant pool, return the real value.
104 Otherwise return X. */
106 avoid_constant_pool_reference (x
)
110 enum machine_mode cmode
;
112 if (GET_CODE (x
) != MEM
)
116 if (GET_CODE (addr
) == LO_SUM
)
117 addr
= XEXP (addr
, 1);
119 if (GET_CODE (addr
) != SYMBOL_REF
120 || ! CONSTANT_POOL_ADDRESS_P (addr
))
123 c
= get_pool_constant (addr
);
124 cmode
= get_pool_mode (addr
);
126 /* If we're accessing the constant in a different mode than it was
127 originally stored, attempt to fix that up via subreg simplifications.
128 If that fails we have no choice but to return the original memory. */
129 if (cmode
!= GET_MODE (x
))
131 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
138 /* Make a unary operation by first seeing if it folds and otherwise making
139 the specified operation. */
142 simplify_gen_unary (code
, mode
, op
, op_mode
)
144 enum machine_mode mode
;
146 enum machine_mode op_mode
;
150 /* If this simplifies, use it. */
151 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
154 return gen_rtx_fmt_e (code
, mode
, op
);
157 /* Likewise for ternary operations. */
160 simplify_gen_ternary (code
, mode
, op0_mode
, op0
, op1
, op2
)
162 enum machine_mode mode
, op0_mode
;
167 /* If this simplifies, use it. */
168 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
172 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
175 /* Likewise, for relational operations.
176 CMP_MODE specifies mode comparison is done in.
180 simplify_gen_relational (code
, mode
, cmp_mode
, op0
, op1
)
182 enum machine_mode mode
;
183 enum machine_mode cmp_mode
;
188 if ((tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
)) != 0)
191 /* For the following tests, ensure const0_rtx is op1. */
192 if (op0
== const0_rtx
&& swap_commutative_operands_p (op0
, op1
))
193 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
195 /* If op0 is a compare, extract the comparison arguments from it. */
196 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
197 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
199 /* If op0 is a comparison, extract the comparison arguments form it. */
200 if (code
== NE
&& op1
== const0_rtx
201 && GET_RTX_CLASS (GET_CODE (op0
)) == '<')
203 else if (code
== EQ
&& op1
== const0_rtx
)
205 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
206 enum rtx_code
new = reversed_comparison_code (op0
, NULL_RTX
);
216 /* Put complex operands first and constants second. */
217 if (swap_commutative_operands_p (op0
, op1
))
218 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
220 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
223 /* Replace all occurrences of OLD in X with NEW and try to simplify the
224 resulting RTX. Return a new RTX which is as simplified as possible. */
227 simplify_replace_rtx (x
, old
, new)
232 enum rtx_code code
= GET_CODE (x
);
233 enum machine_mode mode
= GET_MODE (x
);
235 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
236 to build a new expression substituting recursively. If we can't do
237 anything, return our input. */
242 switch (GET_RTX_CLASS (code
))
246 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
247 rtx op
= (XEXP (x
, 0) == old
248 ? new : simplify_replace_rtx (XEXP (x
, 0), old
, new));
250 return simplify_gen_unary (code
, mode
, op
, op_mode
);
256 simplify_gen_binary (code
, mode
,
257 simplify_replace_rtx (XEXP (x
, 0), old
, new),
258 simplify_replace_rtx (XEXP (x
, 1), old
, new));
261 enum machine_mode op_mode
= (GET_MODE (XEXP (x
, 0)) != VOIDmode
262 ? GET_MODE (XEXP (x
, 0))
263 : GET_MODE (XEXP (x
, 1)));
264 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
265 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
268 simplify_gen_relational (code
, mode
,
271 : GET_MODE (op0
) != VOIDmode
280 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
281 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
284 simplify_gen_ternary (code
, mode
,
289 simplify_replace_rtx (XEXP (x
, 1), old
, new),
290 simplify_replace_rtx (XEXP (x
, 2), old
, new));
294 /* The only case we try to handle is a SUBREG. */
298 exp
= simplify_gen_subreg (GET_MODE (x
),
299 simplify_replace_rtx (SUBREG_REG (x
),
301 GET_MODE (SUBREG_REG (x
)),
310 return replace_equiv_address_nv (x
,
311 simplify_replace_rtx (XEXP (x
, 0),
313 else if (code
== LO_SUM
)
315 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
316 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
318 /* (lo_sum (high x) x) -> x */
319 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
322 return gen_rtx_LO_SUM (mode
, op0
, op1
);
324 else if (code
== REG
)
326 if (REG_P (old
) && REGNO (x
) == REGNO (old
))
338 /* Try to simplify a unary operation CODE whose output mode is to be
339 MODE with input operand OP whose mode was originally OP_MODE.
340 Return zero if no simplification can be made. */
342 simplify_unary_operation (code
, mode
, op
, op_mode
)
344 enum machine_mode mode
;
346 enum machine_mode op_mode
;
348 unsigned int width
= GET_MODE_BITSIZE (mode
);
349 rtx trueop
= avoid_constant_pool_reference (op
);
351 /* The order of these tests is critical so that, for example, we don't
352 check the wrong mode (input vs. output) for a conversion operation,
353 such as FIX. At some point, this should be simplified. */
355 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
356 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
358 HOST_WIDE_INT hv
, lv
;
361 if (GET_CODE (trueop
) == CONST_INT
)
362 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
364 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
366 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
367 d
= real_value_truncate (mode
, d
);
368 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
370 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
371 && (GET_CODE (trueop
) == CONST_DOUBLE
372 || GET_CODE (trueop
) == CONST_INT
))
374 HOST_WIDE_INT hv
, lv
;
377 if (GET_CODE (trueop
) == CONST_INT
)
378 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
380 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
382 if (op_mode
== VOIDmode
)
384 /* We don't know how to interpret negative-looking numbers in
385 this case, so don't try to fold those. */
389 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
392 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
394 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
395 d
= real_value_truncate (mode
, d
);
396 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
399 if (GET_CODE (trueop
) == CONST_INT
400 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
402 HOST_WIDE_INT arg0
= INTVAL (trueop
);
416 val
= (arg0
>= 0 ? arg0
: - arg0
);
420 /* Don't use ffs here. Instead, get low order bit and then its
421 number. If arg0 is zero, this will return 0, as desired. */
422 arg0
&= GET_MODE_MASK (mode
);
423 val
= exact_log2 (arg0
& (- arg0
)) + 1;
431 /* When zero-extending a CONST_INT, we need to know its
433 if (op_mode
== VOIDmode
)
435 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
437 /* If we were really extending the mode,
438 we would have to distinguish between zero-extension
439 and sign-extension. */
440 if (width
!= GET_MODE_BITSIZE (op_mode
))
444 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
445 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
451 if (op_mode
== VOIDmode
)
453 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
455 /* If we were really extending the mode,
456 we would have to distinguish between zero-extension
457 and sign-extension. */
458 if (width
!= GET_MODE_BITSIZE (op_mode
))
462 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
465 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
467 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
468 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
485 val
= trunc_int_for_mode (val
, mode
);
487 return GEN_INT (val
);
490 /* We can do some operations on integer CONST_DOUBLEs. Also allow
491 for a DImode operation on a CONST_INT. */
492 else if (GET_MODE (trueop
) == VOIDmode
493 && width
<= HOST_BITS_PER_WIDE_INT
* 2
494 && (GET_CODE (trueop
) == CONST_DOUBLE
495 || GET_CODE (trueop
) == CONST_INT
))
497 unsigned HOST_WIDE_INT l1
, lv
;
498 HOST_WIDE_INT h1
, hv
;
500 if (GET_CODE (trueop
) == CONST_DOUBLE
)
501 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
503 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
513 neg_double (l1
, h1
, &lv
, &hv
);
518 neg_double (l1
, h1
, &lv
, &hv
);
526 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& (-h1
)) + 1;
528 lv
= exact_log2 (l1
& (-l1
)) + 1;
532 /* This is just a change-of-mode, so do nothing. */
537 if (op_mode
== VOIDmode
)
540 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
544 lv
= l1
& GET_MODE_MASK (op_mode
);
548 if (op_mode
== VOIDmode
549 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
553 lv
= l1
& GET_MODE_MASK (op_mode
);
554 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
555 && (lv
& ((HOST_WIDE_INT
) 1
556 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
557 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
559 hv
= HWI_SIGN_EXTEND (lv
);
570 return immed_double_const (lv
, hv
, mode
);
573 else if (GET_CODE (trueop
) == CONST_DOUBLE
574 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
576 REAL_VALUE_TYPE d
, t
;
577 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
582 if (HONOR_SNANS (mode
) && real_isnan (&d
))
584 real_sqrt (&t
, mode
, &d
);
588 d
= REAL_VALUE_ABS (d
);
591 d
= REAL_VALUE_NEGATE (d
);
594 d
= real_value_truncate (mode
, d
);
597 /* All this does is change the mode. */
600 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
606 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
609 else if (GET_CODE (trueop
) == CONST_DOUBLE
610 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
611 && GET_MODE_CLASS (mode
) == MODE_INT
612 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
616 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
619 case FIX
: i
= REAL_VALUE_FIX (d
); break;
620 case UNSIGNED_FIX
: i
= REAL_VALUE_UNSIGNED_FIX (d
); break;
624 return gen_int_mode (i
, mode
);
627 /* This was formerly used only for non-IEEE float.
628 eggert@twinsun.com says it is safe for IEEE also. */
631 enum rtx_code reversed
;
632 /* There are some simplifications we can do even if the operands
637 /* (not (not X)) == X. */
638 if (GET_CODE (op
) == NOT
)
641 /* (not (eq X Y)) == (ne X Y), etc. */
642 if (mode
== BImode
&& GET_RTX_CLASS (GET_CODE (op
)) == '<'
643 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
645 return gen_rtx_fmt_ee (reversed
,
646 op_mode
, XEXP (op
, 0), XEXP (op
, 1));
650 /* (neg (neg X)) == X. */
651 if (GET_CODE (op
) == NEG
)
656 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
657 becomes just the MINUS if its mode is MODE. This allows
658 folding switch statements on machines using casesi (such as
660 if (GET_CODE (op
) == TRUNCATE
661 && GET_MODE (XEXP (op
, 0)) == mode
662 && GET_CODE (XEXP (op
, 0)) == MINUS
663 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
664 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
667 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
668 if (! POINTERS_EXTEND_UNSIGNED
669 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
671 || (GET_CODE (op
) == SUBREG
672 && GET_CODE (SUBREG_REG (op
)) == REG
673 && REG_POINTER (SUBREG_REG (op
))
674 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
675 return convert_memory_address (Pmode
, op
);
679 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
681 if (POINTERS_EXTEND_UNSIGNED
> 0
682 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
684 || (GET_CODE (op
) == SUBREG
685 && GET_CODE (SUBREG_REG (op
)) == REG
686 && REG_POINTER (SUBREG_REG (op
))
687 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
688 return convert_memory_address (Pmode
, op
);
700 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
701 and OP1. Return 0 if no simplification is possible.
703 Don't use this for relational operations such as EQ or LT.
704 Use simplify_relational_operation instead. */
706 simplify_binary_operation (code
, mode
, op0
, op1
)
708 enum machine_mode mode
;
711 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
713 unsigned int width
= GET_MODE_BITSIZE (mode
);
715 rtx trueop0
= avoid_constant_pool_reference (op0
);
716 rtx trueop1
= avoid_constant_pool_reference (op1
);
718 /* Relational operations don't work here. We must know the mode
719 of the operands in order to do the comparison correctly.
720 Assuming a full word can give incorrect results.
721 Consider comparing 128 with -128 in QImode. */
723 if (GET_RTX_CLASS (code
) == '<')
726 /* Make sure the constant is second. */
727 if (GET_RTX_CLASS (code
) == 'c'
728 && swap_commutative_operands_p (trueop0
, trueop1
))
730 tem
= op0
, op0
= op1
, op1
= tem
;
731 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
734 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
735 && GET_CODE (trueop0
) == CONST_DOUBLE
736 && GET_CODE (trueop1
) == CONST_DOUBLE
737 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
739 REAL_VALUE_TYPE f0
, f1
, value
;
741 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
742 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
743 f0
= real_value_truncate (mode
, f0
);
744 f1
= real_value_truncate (mode
, f1
);
747 && !MODE_HAS_INFINITIES (mode
)
748 && REAL_VALUES_EQUAL (f1
, dconst0
))
751 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
753 value
= real_value_truncate (mode
, value
);
754 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
757 /* We can fold some multi-word operations. */
758 if (GET_MODE_CLASS (mode
) == MODE_INT
759 && width
== HOST_BITS_PER_WIDE_INT
* 2
760 && (GET_CODE (trueop0
) == CONST_DOUBLE
761 || GET_CODE (trueop0
) == CONST_INT
)
762 && (GET_CODE (trueop1
) == CONST_DOUBLE
763 || GET_CODE (trueop1
) == CONST_INT
))
765 unsigned HOST_WIDE_INT l1
, l2
, lv
;
766 HOST_WIDE_INT h1
, h2
, hv
;
768 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
769 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
771 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
773 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
774 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
776 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
781 /* A - B == A + (-B). */
782 neg_double (l2
, h2
, &lv
, &hv
);
785 /* .. fall through ... */
788 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
792 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
795 case DIV
: case MOD
: case UDIV
: case UMOD
:
796 /* We'd need to include tree.h to do this and it doesn't seem worth
801 lv
= l1
& l2
, hv
= h1
& h2
;
805 lv
= l1
| l2
, hv
= h1
| h2
;
809 lv
= l1
^ l2
, hv
= h1
^ h2
;
815 && ((unsigned HOST_WIDE_INT
) l1
816 < (unsigned HOST_WIDE_INT
) l2
)))
825 && ((unsigned HOST_WIDE_INT
) l1
826 > (unsigned HOST_WIDE_INT
) l2
)))
833 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
835 && ((unsigned HOST_WIDE_INT
) l1
836 < (unsigned HOST_WIDE_INT
) l2
)))
843 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
845 && ((unsigned HOST_WIDE_INT
) l1
846 > (unsigned HOST_WIDE_INT
) l2
)))
852 case LSHIFTRT
: case ASHIFTRT
:
854 case ROTATE
: case ROTATERT
:
855 #ifdef SHIFT_COUNT_TRUNCATED
856 if (SHIFT_COUNT_TRUNCATED
)
857 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
860 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
863 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
864 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
866 else if (code
== ASHIFT
)
867 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
868 else if (code
== ROTATE
)
869 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
870 else /* code == ROTATERT */
871 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
878 return immed_double_const (lv
, hv
, mode
);
881 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
882 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
884 /* Even if we can't compute a constant result,
885 there are some cases worth simplifying. */
890 /* Maybe simplify x + 0 to x. The two expressions are equivalent
891 when x is NaN, infinite, or finite and nonzero. They aren't
892 when x is -0 and the rounding mode is not towards -infinity,
893 since (-0) + 0 is then 0. */
894 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
897 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
898 transformations are safe even for IEEE. */
899 if (GET_CODE (op0
) == NEG
)
900 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
901 else if (GET_CODE (op1
) == NEG
)
902 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
905 if (INTEGRAL_MODE_P (mode
)
906 && GET_CODE (op0
) == NOT
907 && trueop1
== const1_rtx
)
908 return gen_rtx_NEG (mode
, XEXP (op0
, 0));
910 /* Handle both-operands-constant cases. We can only add
911 CONST_INTs to constants since the sum of relocatable symbols
912 can't be handled by most assemblers. Don't add CONST_INT
913 to CONST_INT since overflow won't be computed properly if wider
914 than HOST_BITS_PER_WIDE_INT. */
916 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
917 && GET_CODE (op1
) == CONST_INT
)
918 return plus_constant (op0
, INTVAL (op1
));
919 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
920 && GET_CODE (op0
) == CONST_INT
)
921 return plus_constant (op1
, INTVAL (op0
));
923 /* See if this is something like X * C - X or vice versa or
924 if the multiplication is written as a shift. If so, we can
925 distribute and make a new multiply, shift, or maybe just
926 have X (if C is 2 in the example above). But don't make
927 real multiply if we didn't have one before. */
929 if (! FLOAT_MODE_P (mode
))
931 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
932 rtx lhs
= op0
, rhs
= op1
;
935 if (GET_CODE (lhs
) == NEG
)
936 coeff0
= -1, lhs
= XEXP (lhs
, 0);
937 else if (GET_CODE (lhs
) == MULT
938 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
940 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
943 else if (GET_CODE (lhs
) == ASHIFT
944 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
945 && INTVAL (XEXP (lhs
, 1)) >= 0
946 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
948 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
952 if (GET_CODE (rhs
) == NEG
)
953 coeff1
= -1, rhs
= XEXP (rhs
, 0);
954 else if (GET_CODE (rhs
) == MULT
955 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
957 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
960 else if (GET_CODE (rhs
) == ASHIFT
961 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
962 && INTVAL (XEXP (rhs
, 1)) >= 0
963 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
965 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
969 if (rtx_equal_p (lhs
, rhs
))
971 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
972 GEN_INT (coeff0
+ coeff1
));
973 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
977 /* If one of the operands is a PLUS or a MINUS, see if we can
978 simplify this by the associative law.
979 Don't use the associative law for floating point.
980 The inaccuracy makes it nonassociative,
981 and subtle programs can break if operations are associated. */
983 if (INTEGRAL_MODE_P (mode
)
984 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
985 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
986 || (GET_CODE (op0
) == CONST
987 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
988 || (GET_CODE (op1
) == CONST
989 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
990 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
996 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
997 using cc0, in which case we want to leave it as a COMPARE
998 so we can distinguish it from a register-register-copy.
1000 In IEEE floating point, x-0 is not the same as x. */
1002 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1003 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1004 && trueop1
== CONST0_RTX (mode
))
1008 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1009 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1010 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1011 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1013 rtx xop00
= XEXP (op0
, 0);
1014 rtx xop10
= XEXP (op1
, 0);
1017 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1019 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1020 && GET_MODE (xop00
) == GET_MODE (xop10
)
1021 && REGNO (xop00
) == REGNO (xop10
)
1022 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1023 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1030 /* We can't assume x-x is 0 even with non-IEEE floating point,
1031 but since it is zero except in very strange circumstances, we
1032 will treat it as zero with -funsafe-math-optimizations. */
1033 if (rtx_equal_p (trueop0
, trueop1
)
1034 && ! side_effects_p (op0
)
1035 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1036 return CONST0_RTX (mode
);
1038 /* Change subtraction from zero into negation. (0 - x) is the
1039 same as -x when x is NaN, infinite, or finite and nonzero.
1040 But if the mode has signed zeros, and does not round towards
1041 -infinity, then 0 - 0 is 0, not -0. */
1042 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1043 return gen_rtx_NEG (mode
, op1
);
1045 /* (-1 - a) is ~a. */
1046 if (trueop0
== constm1_rtx
)
1047 return gen_rtx_NOT (mode
, op1
);
1049 /* Subtracting 0 has no effect unless the mode has signed zeros
1050 and supports rounding towards -infinity. In such a case,
1052 if (!(HONOR_SIGNED_ZEROS (mode
)
1053 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1054 && trueop1
== CONST0_RTX (mode
))
1057 /* See if this is something like X * C - X or vice versa or
1058 if the multiplication is written as a shift. If so, we can
1059 distribute and make a new multiply, shift, or maybe just
1060 have X (if C is 2 in the example above). But don't make
1061 real multiply if we didn't have one before. */
1063 if (! FLOAT_MODE_P (mode
))
1065 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1066 rtx lhs
= op0
, rhs
= op1
;
1069 if (GET_CODE (lhs
) == NEG
)
1070 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1071 else if (GET_CODE (lhs
) == MULT
1072 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1074 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1077 else if (GET_CODE (lhs
) == ASHIFT
1078 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1079 && INTVAL (XEXP (lhs
, 1)) >= 0
1080 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1082 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1083 lhs
= XEXP (lhs
, 0);
1086 if (GET_CODE (rhs
) == NEG
)
1087 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1088 else if (GET_CODE (rhs
) == MULT
1089 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1091 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1094 else if (GET_CODE (rhs
) == ASHIFT
1095 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1096 && INTVAL (XEXP (rhs
, 1)) >= 0
1097 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1099 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1100 rhs
= XEXP (rhs
, 0);
1103 if (rtx_equal_p (lhs
, rhs
))
1105 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1106 GEN_INT (coeff0
- coeff1
));
1107 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1111 /* (a - (-b)) -> (a + b). True even for IEEE. */
1112 if (GET_CODE (op1
) == NEG
)
1113 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1115 /* If one of the operands is a PLUS or a MINUS, see if we can
1116 simplify this by the associative law.
1117 Don't use the associative law for floating point.
1118 The inaccuracy makes it nonassociative,
1119 and subtle programs can break if operations are associated. */
1121 if (INTEGRAL_MODE_P (mode
)
1122 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1123 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1124 || (GET_CODE (op0
) == CONST
1125 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1126 || (GET_CODE (op1
) == CONST
1127 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1128 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1131 /* Don't let a relocatable value get a negative coeff. */
1132 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1133 return simplify_gen_binary (PLUS
, mode
,
1135 neg_const_int (mode
, op1
));
1137 /* (x - (x & y)) -> (x & ~y) */
1138 if (GET_CODE (op1
) == AND
)
1140 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1141 return simplify_gen_binary (AND
, mode
, op0
,
1142 gen_rtx_NOT (mode
, XEXP (op1
, 1)));
1143 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1144 return simplify_gen_binary (AND
, mode
, op0
,
1145 gen_rtx_NOT (mode
, XEXP (op1
, 0)));
1150 if (trueop1
== constm1_rtx
)
1152 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
1154 return tem
? tem
: gen_rtx_NEG (mode
, op0
);
1157 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1158 x is NaN, since x * 0 is then also NaN. Nor is it valid
1159 when the mode has signed zeros, since multiplying a negative
1160 number by 0 will give -0, not 0. */
1161 if (!HONOR_NANS (mode
)
1162 && !HONOR_SIGNED_ZEROS (mode
)
1163 && trueop1
== CONST0_RTX (mode
)
1164 && ! side_effects_p (op0
))
1167 /* In IEEE floating point, x*1 is not equivalent to x for
1169 if (!HONOR_SNANS (mode
)
1170 && trueop1
== CONST1_RTX (mode
))
1173 /* Convert multiply by constant power of two into shift unless
1174 we are still generating RTL. This test is a kludge. */
1175 if (GET_CODE (trueop1
) == CONST_INT
1176 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1177 /* If the mode is larger than the host word size, and the
1178 uppermost bit is set, then this isn't a power of two due
1179 to implicit sign extension. */
1180 && (width
<= HOST_BITS_PER_WIDE_INT
1181 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1182 && ! rtx_equal_function_value_matters
)
1183 return gen_rtx_ASHIFT (mode
, op0
, GEN_INT (val
));
1185 /* x*2 is x+x and x*(-1) is -x */
1186 if (GET_CODE (trueop1
) == CONST_DOUBLE
1187 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1188 && GET_MODE (op0
) == mode
)
1191 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1193 if (REAL_VALUES_EQUAL (d
, dconst2
))
1194 return gen_rtx_PLUS (mode
, op0
, copy_rtx (op0
));
1196 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1197 return gen_rtx_NEG (mode
, op0
);
1202 if (trueop1
== const0_rtx
)
1204 if (GET_CODE (trueop1
) == CONST_INT
1205 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1206 == GET_MODE_MASK (mode
)))
1208 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1210 /* A | (~A) -> -1 */
1211 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1212 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1213 && ! side_effects_p (op0
)
1214 && GET_MODE_CLASS (mode
) != MODE_CC
)
1219 if (trueop1
== const0_rtx
)
1221 if (GET_CODE (trueop1
) == CONST_INT
1222 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1223 == GET_MODE_MASK (mode
)))
1224 return gen_rtx_NOT (mode
, op0
);
1225 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1226 && GET_MODE_CLASS (mode
) != MODE_CC
)
1231 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1233 if (GET_CODE (trueop1
) == CONST_INT
1234 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1235 == GET_MODE_MASK (mode
)))
1237 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1238 && GET_MODE_CLASS (mode
) != MODE_CC
)
1241 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1242 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1243 && ! side_effects_p (op0
)
1244 && GET_MODE_CLASS (mode
) != MODE_CC
)
1249 /* Convert divide by power of two into shift (divide by 1 handled
1251 if (GET_CODE (trueop1
) == CONST_INT
1252 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1253 return gen_rtx_LSHIFTRT (mode
, op0
, GEN_INT (arg1
));
1255 /* ... fall through ... */
1258 if (trueop1
== CONST1_RTX (mode
))
1260 /* On some platforms DIV uses narrower mode than its
1262 rtx x
= gen_lowpart_common (mode
, op0
);
1265 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1266 return gen_lowpart_SUBREG (mode
, op0
);
1271 /* Maybe change 0 / x to 0. This transformation isn't safe for
1272 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1273 Nor is it safe for modes with signed zeros, since dividing
1274 0 by a negative number gives -0, not 0. */
1275 if (!HONOR_NANS (mode
)
1276 && !HONOR_SIGNED_ZEROS (mode
)
1277 && trueop0
== CONST0_RTX (mode
)
1278 && ! side_effects_p (op1
))
1281 /* Change division by a constant into multiplication. Only do
1282 this with -funsafe-math-optimizations. */
1283 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1284 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1285 && trueop1
!= CONST0_RTX (mode
)
1286 && flag_unsafe_math_optimizations
)
1289 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1291 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1293 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1294 return gen_rtx_MULT (mode
, op0
,
1295 CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
));
1301 /* Handle modulus by power of two (mod with 1 handled below). */
1302 if (GET_CODE (trueop1
) == CONST_INT
1303 && exact_log2 (INTVAL (trueop1
)) > 0)
1304 return gen_rtx_AND (mode
, op0
, GEN_INT (INTVAL (op1
) - 1));
1306 /* ... fall through ... */
1309 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1310 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1317 /* Rotating ~0 always results in ~0. */
1318 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1319 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1320 && ! side_effects_p (op1
))
1323 /* ... fall through ... */
1327 if (trueop1
== const0_rtx
)
1329 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1334 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1
) == CONST_INT
1335 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1336 && ! side_effects_p (op0
))
1338 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1343 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1
) == CONST_INT
1344 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1345 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1346 && ! side_effects_p (op0
))
1348 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1353 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1355 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1360 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1362 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1370 /* ??? There are simplifications that can be done. */
1380 /* Get the integer argument values in two forms:
1381 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1383 arg0
= INTVAL (trueop0
);
1384 arg1
= INTVAL (trueop1
);
1386 if (width
< HOST_BITS_PER_WIDE_INT
)
1388 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1389 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1392 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1393 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1396 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1397 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1405 /* Compute the value of the arithmetic. */
1410 val
= arg0s
+ arg1s
;
1414 val
= arg0s
- arg1s
;
1418 val
= arg0s
* arg1s
;
1423 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1426 val
= arg0s
/ arg1s
;
1431 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1434 val
= arg0s
% arg1s
;
1439 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1442 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1447 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1450 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1466 /* If shift count is undefined, don't fold it; let the machine do
1467 what it wants. But truncate it if the machine will do that. */
1471 #ifdef SHIFT_COUNT_TRUNCATED
1472 if (SHIFT_COUNT_TRUNCATED
)
1476 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1483 #ifdef SHIFT_COUNT_TRUNCATED
1484 if (SHIFT_COUNT_TRUNCATED
)
1488 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1495 #ifdef SHIFT_COUNT_TRUNCATED
1496 if (SHIFT_COUNT_TRUNCATED
)
1500 val
= arg0s
>> arg1
;
1502 /* Bootstrap compiler may not have sign extended the right shift.
1503 Manually extend the sign to insure bootstrap cc matches gcc. */
1504 if (arg0s
< 0 && arg1
> 0)
1505 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1514 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1515 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1523 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1524 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1528 /* Do nothing here. */
1532 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1536 val
= ((unsigned HOST_WIDE_INT
) arg0
1537 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1541 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1545 val
= ((unsigned HOST_WIDE_INT
) arg0
1546 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1553 val
= trunc_int_for_mode (val
, mode
);
1555 return GEN_INT (val
);
1558 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1561 Rather than test for specific case, we do this by a brute-force method
1562 and do all possible simplifications until no more changes occur. Then
1563 we rebuild the operation.
1565 If FORCE is true, then always generate the rtx. This is used to
1566 canonicalize stuff emitted from simplify_gen_binary. Note that this
1567 can still fail if the rtx is too complex. It won't fail just because
1568 the result is not 'simpler' than the input, however. */
1570 struct simplify_plus_minus_op_data
1577 simplify_plus_minus_op_data_cmp (p1
, p2
)
1581 const struct simplify_plus_minus_op_data
*d1
= p1
;
1582 const struct simplify_plus_minus_op_data
*d2
= p2
;
1584 return (commutative_operand_precedence (d2
->op
)
1585 - commutative_operand_precedence (d1
->op
));
1589 simplify_plus_minus (code
, mode
, op0
, op1
, force
)
1591 enum machine_mode mode
;
1595 struct simplify_plus_minus_op_data ops
[8];
1597 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
1598 int first
, negate
, changed
;
1601 memset ((char *) ops
, 0, sizeof ops
);
1603 /* Set up the two operands and then expand them until nothing has been
1604 changed. If we run out of room in our array, give up; this should
1605 almost never happen. */
1610 ops
[1].neg
= (code
== MINUS
);
1616 for (i
= 0; i
< n_ops
; i
++)
1618 rtx this_op
= ops
[i
].op
;
1619 int this_neg
= ops
[i
].neg
;
1620 enum rtx_code this_code
= GET_CODE (this_op
);
1629 ops
[n_ops
].op
= XEXP (this_op
, 1);
1630 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
1633 ops
[i
].op
= XEXP (this_op
, 0);
1639 ops
[i
].op
= XEXP (this_op
, 0);
1640 ops
[i
].neg
= ! this_neg
;
1646 && GET_CODE (XEXP (this_op
, 0)) == PLUS
1647 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
1648 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
1650 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
1651 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
1652 ops
[n_ops
].neg
= this_neg
;
1660 /* ~a -> (-a - 1) */
1663 ops
[n_ops
].op
= constm1_rtx
;
1664 ops
[n_ops
++].neg
= this_neg
;
1665 ops
[i
].op
= XEXP (this_op
, 0);
1666 ops
[i
].neg
= !this_neg
;
1674 ops
[i
].op
= neg_const_int (mode
, this_op
);
1687 /* If we only have two operands, we can't do anything. */
1688 if (n_ops
<= 2 && !force
)
1691 /* Count the number of CONSTs we didn't split above. */
1692 for (i
= 0; i
< n_ops
; i
++)
1693 if (GET_CODE (ops
[i
].op
) == CONST
)
1696 /* Now simplify each pair of operands until nothing changes. The first
1697 time through just simplify constants against each other. */
1704 for (i
= 0; i
< n_ops
- 1; i
++)
1705 for (j
= i
+ 1; j
< n_ops
; j
++)
1707 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
1708 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
1710 if (lhs
!= 0 && rhs
!= 0
1711 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
1713 enum rtx_code ncode
= PLUS
;
1719 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
1721 else if (swap_commutative_operands_p (lhs
, rhs
))
1722 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
1724 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
1726 /* Reject "simplifications" that just wrap the two
1727 arguments in a CONST. Failure to do so can result
1728 in infinite recursion with simplify_binary_operation
1729 when it calls us to simplify CONST operations. */
1731 && ! (GET_CODE (tem
) == CONST
1732 && GET_CODE (XEXP (tem
, 0)) == ncode
1733 && XEXP (XEXP (tem
, 0), 0) == lhs
1734 && XEXP (XEXP (tem
, 0), 1) == rhs
)
1735 /* Don't allow -x + -1 -> ~x simplifications in the
1736 first pass. This allows us the chance to combine
1737 the -1 with other constants. */
1739 && GET_CODE (tem
) == NOT
1740 && XEXP (tem
, 0) == rhs
))
1743 if (GET_CODE (tem
) == NEG
)
1744 tem
= XEXP (tem
, 0), lneg
= !lneg
;
1745 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
1746 tem
= neg_const_int (mode
, tem
), lneg
= 0;
1750 ops
[j
].op
= NULL_RTX
;
1760 /* Pack all the operands to the lower-numbered entries. */
1761 for (i
= 0, j
= 0; j
< n_ops
; j
++)
1766 /* Sort the operations based on swap_commutative_operands_p. */
1767 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
1769 /* We suppressed creation of trivial CONST expressions in the
1770 combination loop to avoid recursion. Create one manually now.
1771 The combination loop should have ensured that there is exactly
1772 one CONST_INT, and the sort will have ensured that it is last
1773 in the array and that any other constant will be next-to-last. */
1776 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
1777 && CONSTANT_P (ops
[n_ops
- 2].op
))
1779 rtx value
= ops
[n_ops
- 1].op
;
1780 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
1781 value
= neg_const_int (mode
, value
);
1782 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
1786 /* Count the number of CONSTs that we generated. */
1788 for (i
= 0; i
< n_ops
; i
++)
1789 if (GET_CODE (ops
[i
].op
) == CONST
)
1792 /* Give up if we didn't reduce the number of operands we had. Make
1793 sure we count a CONST as two operands. If we have the same
1794 number of operands, but have made more CONSTs than before, this
1795 is also an improvement, so accept it. */
1797 && (n_ops
+ n_consts
> input_ops
1798 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
1801 /* Put a non-negated operand first. If there aren't any, make all
1802 operands positive and negate the whole thing later. */
1805 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
1809 for (i
= 0; i
< n_ops
; i
++)
1821 /* Now make the result by performing the requested operations. */
1823 for (i
= 1; i
< n_ops
; i
++)
1824 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
1825 mode
, result
, ops
[i
].op
);
1827 return negate
? gen_rtx_NEG (mode
, result
) : result
;
1830 /* Like simplify_binary_operation except used for relational operators.
1831 MODE is the mode of the operands, not that of the result. If MODE
1832 is VOIDmode, both operands must also be VOIDmode and we compare the
1833 operands in "infinite precision".
1835 If no simplification is possible, this function returns zero. Otherwise,
1836 it returns either const_true_rtx or const0_rtx. */
1839 simplify_relational_operation (code
, mode
, op0
, op1
)
1841 enum machine_mode mode
;
1844 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
1849 if (mode
== VOIDmode
1850 && (GET_MODE (op0
) != VOIDmode
1851 || GET_MODE (op1
) != VOIDmode
))
1854 /* If op0 is a compare, extract the comparison arguments from it. */
1855 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
1856 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
1858 trueop0
= avoid_constant_pool_reference (op0
);
1859 trueop1
= avoid_constant_pool_reference (op1
);
1861 /* We can't simplify MODE_CC values since we don't know what the
1862 actual comparison is. */
1863 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
1870 /* Make sure the constant is second. */
1871 if (swap_commutative_operands_p (trueop0
, trueop1
))
1873 tem
= op0
, op0
= op1
, op1
= tem
;
1874 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
1875 code
= swap_condition (code
);
1878 /* For integer comparisons of A and B maybe we can simplify A - B and can
1879 then simplify a comparison of that with zero. If A and B are both either
1880 a register or a CONST_INT, this can't help; testing for these cases will
1881 prevent infinite recursion here and speed things up.
1883 If CODE is an unsigned comparison, then we can never do this optimization,
1884 because it gives an incorrect result if the subtraction wraps around zero.
1885 ANSI C defines unsigned operations such that they never overflow, and
1886 thus such cases can not be ignored. */
1888 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
1889 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
1890 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
1891 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
1892 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
1893 return simplify_relational_operation (signed_condition (code
),
1894 mode
, tem
, const0_rtx
);
1896 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
1897 return const_true_rtx
;
1899 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
1902 /* For modes without NaNs, if the two operands are equal, we know the
1904 if (!HONOR_NANS (GET_MODE (trueop0
)) && rtx_equal_p (trueop0
, trueop1
))
1905 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
1907 /* If the operands are floating-point constants, see if we can fold
1909 else if (GET_CODE (trueop0
) == CONST_DOUBLE
1910 && GET_CODE (trueop1
) == CONST_DOUBLE
1911 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
1913 REAL_VALUE_TYPE d0
, d1
;
1915 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
1916 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
1918 /* Comparisons are unordered iff at least one of the values is NaN. */
1919 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
1929 return const_true_rtx
;
1942 equal
= REAL_VALUES_EQUAL (d0
, d1
);
1943 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
1944 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
1947 /* Otherwise, see if the operands are both integers. */
1948 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
1949 && (GET_CODE (trueop0
) == CONST_DOUBLE
1950 || GET_CODE (trueop0
) == CONST_INT
)
1951 && (GET_CODE (trueop1
) == CONST_DOUBLE
1952 || GET_CODE (trueop1
) == CONST_INT
))
1954 int width
= GET_MODE_BITSIZE (mode
);
1955 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
1956 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
1958 /* Get the two words comprising each integer constant. */
1959 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1961 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
1962 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
1966 l0u
= l0s
= INTVAL (trueop0
);
1967 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
1970 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1972 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
1973 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
1977 l1u
= l1s
= INTVAL (trueop1
);
1978 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
1981 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1982 we have to sign or zero-extend the values. */
1983 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
1985 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1986 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1988 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1989 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1991 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1992 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1994 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
1995 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
1997 equal
= (h0u
== h1u
&& l0u
== l1u
);
1998 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
1999 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2000 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2001 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2004 /* Otherwise, there are some code-specific tests we can make. */
2010 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2015 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2016 return const_true_rtx
;
2020 /* Unsigned values are never negative. */
2021 if (trueop1
== const0_rtx
)
2022 return const_true_rtx
;
2026 if (trueop1
== const0_rtx
)
2031 /* Unsigned values are never greater than the largest
2033 if (GET_CODE (trueop1
) == CONST_INT
2034 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2035 && INTEGRAL_MODE_P (mode
))
2036 return const_true_rtx
;
2040 if (GET_CODE (trueop1
) == CONST_INT
2041 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2042 && INTEGRAL_MODE_P (mode
))
2047 /* Optimize abs(x) < 0.0. */
2048 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
2050 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2052 if (GET_CODE (tem
) == ABS
)
2058 /* Optimize abs(x) >= 0.0. */
2059 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
2061 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2063 if (GET_CODE (tem
) == ABS
)
2075 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2081 return equal
? const_true_rtx
: const0_rtx
;
2084 return ! equal
? const_true_rtx
: const0_rtx
;
2087 return op0lt
? const_true_rtx
: const0_rtx
;
2090 return op1lt
? const_true_rtx
: const0_rtx
;
2092 return op0ltu
? const_true_rtx
: const0_rtx
;
2094 return op1ltu
? const_true_rtx
: const0_rtx
;
2097 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2100 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2102 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2104 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2106 return const_true_rtx
;
2114 /* Simplify CODE, an operation with result mode MODE and three operands,
2115 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2116 a constant. Return 0 if no simplifications is possible. */
2119 simplify_ternary_operation (code
, mode
, op0_mode
, op0
, op1
, op2
)
2121 enum machine_mode mode
, op0_mode
;
2124 unsigned int width
= GET_MODE_BITSIZE (mode
);
2126 /* VOIDmode means "infinite" precision. */
2128 width
= HOST_BITS_PER_WIDE_INT
;
2134 if (GET_CODE (op0
) == CONST_INT
2135 && GET_CODE (op1
) == CONST_INT
2136 && GET_CODE (op2
) == CONST_INT
2137 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2138 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2140 /* Extracting a bit-field from a constant */
2141 HOST_WIDE_INT val
= INTVAL (op0
);
2143 if (BITS_BIG_ENDIAN
)
2144 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2145 - INTVAL (op2
) - INTVAL (op1
));
2147 val
>>= INTVAL (op2
);
2149 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2151 /* First zero-extend. */
2152 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2153 /* If desired, propagate sign bit. */
2154 if (code
== SIGN_EXTRACT
2155 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2156 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2159 /* Clear the bits that don't belong in our mode,
2160 unless they and our sign bit are all one.
2161 So we get either a reasonable negative value or a reasonable
2162 unsigned value for this mode. */
2163 if (width
< HOST_BITS_PER_WIDE_INT
2164 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2165 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2166 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2168 return GEN_INT (val
);
2173 if (GET_CODE (op0
) == CONST_INT
)
2174 return op0
!= const0_rtx
? op1
: op2
;
2176 /* Convert a == b ? b : a to "a". */
2177 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
2178 && !HONOR_NANS (mode
)
2179 && rtx_equal_p (XEXP (op0
, 0), op1
)
2180 && rtx_equal_p (XEXP (op0
, 1), op2
))
2182 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
2183 && !HONOR_NANS (mode
)
2184 && rtx_equal_p (XEXP (op0
, 1), op1
)
2185 && rtx_equal_p (XEXP (op0
, 0), op2
))
2187 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2189 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2190 ? GET_MODE (XEXP (op0
, 1))
2191 : GET_MODE (XEXP (op0
, 0)));
2193 if (cmp_mode
== VOIDmode
)
2194 cmp_mode
= op0_mode
;
2195 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2196 XEXP (op0
, 0), XEXP (op0
, 1));
2198 /* See if any simplifications were possible. */
2199 if (temp
== const0_rtx
)
2201 else if (temp
== const1_rtx
)
2206 /* Look for happy constants in op1 and op2. */
2207 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2209 HOST_WIDE_INT t
= INTVAL (op1
);
2210 HOST_WIDE_INT f
= INTVAL (op2
);
2212 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2213 code
= GET_CODE (op0
);
2214 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2217 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2225 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2237 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2238 Return 0 if no simplifications is possible. */
2240 simplify_subreg (outermode
, op
, innermode
, byte
)
2243 enum machine_mode outermode
, innermode
;
2245 /* Little bit of sanity checking. */
2246 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2247 || innermode
== BLKmode
|| outermode
== BLKmode
)
2250 if (GET_MODE (op
) != innermode
2251 && GET_MODE (op
) != VOIDmode
)
2254 if (byte
% GET_MODE_SIZE (outermode
)
2255 || byte
>= GET_MODE_SIZE (innermode
))
2258 if (outermode
== innermode
&& !byte
)
2261 /* Simplify subregs of vector constants. */
2262 if (GET_CODE (op
) == CONST_VECTOR
)
2264 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (innermode
));
2265 const unsigned int offset
= byte
/ elt_size
;
2268 if (GET_MODE_INNER (innermode
) == outermode
)
2270 elt
= CONST_VECTOR_ELT (op
, offset
);
2272 /* ?? We probably don't need this copy_rtx because constants
2273 can be shared. ?? */
2275 return copy_rtx (elt
);
2277 else if (GET_MODE_INNER (innermode
) == GET_MODE_INNER (outermode
)
2278 && GET_MODE_SIZE (innermode
) > GET_MODE_SIZE (outermode
))
2280 return (gen_rtx_CONST_VECTOR
2282 gen_rtvec_v (GET_MODE_NUNITS (outermode
),
2283 &CONST_VECTOR_ELT (op
, offset
))));
2285 else if (GET_MODE_CLASS (outermode
) == MODE_INT
2286 && (GET_MODE_SIZE (outermode
) % elt_size
== 0))
2288 /* This happens when the target register size is smaller then
2289 the vector mode, and we synthesize operations with vectors
2290 of elements that are smaller than the register size. */
2291 HOST_WIDE_INT sum
= 0, high
= 0;
2292 unsigned n_elts
= (GET_MODE_SIZE (outermode
) / elt_size
);
2293 unsigned i
= BYTES_BIG_ENDIAN
? offset
: offset
+ n_elts
- 1;
2294 unsigned step
= BYTES_BIG_ENDIAN
? 1 : -1;
2295 int shift
= BITS_PER_UNIT
* elt_size
;
2297 for (; n_elts
--; i
+= step
)
2299 elt
= CONST_VECTOR_ELT (op
, i
);
2300 if (GET_CODE (elt
) == CONST_DOUBLE
2301 && GET_MODE_CLASS (GET_MODE (elt
)) == MODE_FLOAT
)
2303 elt
= gen_lowpart_common (int_mode_for_mode (GET_MODE (elt
)),
2308 if (GET_CODE (elt
) != CONST_INT
)
2310 high
= high
<< shift
| sum
>> (HOST_BITS_PER_WIDE_INT
- shift
);
2311 sum
= (sum
<< shift
) + INTVAL (elt
);
2313 if (GET_MODE_BITSIZE (outermode
) <= HOST_BITS_PER_WIDE_INT
)
2314 return GEN_INT (trunc_int_for_mode (sum
, outermode
));
2315 else if (GET_MODE_BITSIZE (outermode
) == 2* HOST_BITS_PER_WIDE_INT
)
2316 return immed_double_const (high
, sum
, outermode
);
2320 else if (GET_MODE_CLASS (outermode
) == MODE_INT
2321 && (elt_size
% GET_MODE_SIZE (outermode
) == 0))
2323 enum machine_mode new_mode
2324 = int_mode_for_mode (GET_MODE_INNER (innermode
));
2325 int subbyte
= byte
% elt_size
;
2327 op
= simplify_subreg (new_mode
, op
, innermode
, byte
- subbyte
);
2330 return simplify_subreg (outermode
, op
, new_mode
, subbyte
);
2332 else if (GET_MODE_CLASS (outermode
) == MODE_INT
)
2333 /* This shouldn't happen, but let's not do anything stupid. */
2337 /* Attempt to simplify constant to non-SUBREG expression. */
2338 if (CONSTANT_P (op
))
2341 unsigned HOST_WIDE_INT val
= 0;
2343 if (GET_MODE_CLASS (outermode
) == MODE_VECTOR_INT
2344 || GET_MODE_CLASS (outermode
) == MODE_VECTOR_FLOAT
)
2346 /* Construct a CONST_VECTOR from individual subregs. */
2347 enum machine_mode submode
= GET_MODE_INNER (outermode
);
2348 int subsize
= GET_MODE_UNIT_SIZE (outermode
);
2349 int i
, elts
= GET_MODE_NUNITS (outermode
);
2350 rtvec v
= rtvec_alloc (elts
);
2353 for (i
= 0; i
< elts
; i
++, byte
+= subsize
)
2355 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2356 /* ??? It would be nice if we could actually make such subregs
2357 on targets that allow such relocations. */
2358 if (byte
>= GET_MODE_UNIT_SIZE (innermode
))
2359 elt
= CONST0_RTX (submode
);
2361 elt
= simplify_subreg (submode
, op
, innermode
, byte
);
2364 RTVEC_ELT (v
, i
) = elt
;
2366 return gen_rtx_CONST_VECTOR (outermode
, v
);
2369 /* ??? This code is partly redundant with code below, but can handle
2370 the subregs of floats and similar corner cases.
2371 Later it we should move all simplification code here and rewrite
2372 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2373 using SIMPLIFY_SUBREG. */
2374 if (subreg_lowpart_offset (outermode
, innermode
) == byte
2375 && GET_CODE (op
) != CONST_VECTOR
)
2377 rtx
new = gen_lowpart_if_possible (outermode
, op
);
2382 /* Similar comment as above apply here. */
2383 if (GET_MODE_SIZE (outermode
) == UNITS_PER_WORD
2384 && GET_MODE_SIZE (innermode
) > UNITS_PER_WORD
2385 && GET_MODE_CLASS (outermode
) == MODE_INT
)
2387 rtx
new = constant_subword (op
,
2388 (byte
/ UNITS_PER_WORD
),
2394 if (GET_MODE_CLASS (outermode
) != MODE_INT
2395 && GET_MODE_CLASS (outermode
) != MODE_CC
)
2397 enum machine_mode new_mode
= int_mode_for_mode (outermode
);
2399 if (new_mode
!= innermode
|| byte
!= 0)
2401 op
= simplify_subreg (new_mode
, op
, innermode
, byte
);
2404 return simplify_subreg (outermode
, op
, new_mode
, 0);
2408 offset
= byte
* BITS_PER_UNIT
;
2409 switch (GET_CODE (op
))
2412 if (GET_MODE (op
) != VOIDmode
)
2415 /* We can't handle this case yet. */
2416 if (GET_MODE_BITSIZE (outermode
) >= HOST_BITS_PER_WIDE_INT
)
2419 part
= offset
>= HOST_BITS_PER_WIDE_INT
;
2420 if ((BITS_PER_WORD
> HOST_BITS_PER_WIDE_INT
2421 && BYTES_BIG_ENDIAN
)
2422 || (BITS_PER_WORD
<= HOST_BITS_PER_WIDE_INT
2423 && WORDS_BIG_ENDIAN
))
2425 val
= part
? CONST_DOUBLE_HIGH (op
) : CONST_DOUBLE_LOW (op
);
2426 offset
%= HOST_BITS_PER_WIDE_INT
;
2428 /* We've already picked the word we want from a double, so
2429 pretend this is actually an integer. */
2430 innermode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
2434 if (GET_CODE (op
) == CONST_INT
)
2437 /* We don't handle synthesizing of non-integral constants yet. */
2438 if (GET_MODE_CLASS (outermode
) != MODE_INT
)
2441 if (BYTES_BIG_ENDIAN
|| WORDS_BIG_ENDIAN
)
2443 if (WORDS_BIG_ENDIAN
)
2444 offset
= (GET_MODE_BITSIZE (innermode
)
2445 - GET_MODE_BITSIZE (outermode
) - offset
);
2446 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
2447 && GET_MODE_SIZE (outermode
) < UNITS_PER_WORD
)
2448 offset
= (offset
+ BITS_PER_WORD
- GET_MODE_BITSIZE (outermode
)
2449 - 2 * (offset
% BITS_PER_WORD
));
2452 if (offset
>= HOST_BITS_PER_WIDE_INT
)
2453 return ((HOST_WIDE_INT
) val
< 0) ? constm1_rtx
: const0_rtx
;
2457 if (GET_MODE_BITSIZE (outermode
) < HOST_BITS_PER_WIDE_INT
)
2458 val
= trunc_int_for_mode (val
, outermode
);
2459 return GEN_INT (val
);
2466 /* Changing mode twice with SUBREG => just change it once,
2467 or not at all if changing back op starting mode. */
2468 if (GET_CODE (op
) == SUBREG
)
2470 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
2471 int final_offset
= byte
+ SUBREG_BYTE (op
);
2474 if (outermode
== innermostmode
2475 && byte
== 0 && SUBREG_BYTE (op
) == 0)
2476 return SUBREG_REG (op
);
2478 /* The SUBREG_BYTE represents offset, as if the value were stored
2479 in memory. Irritating exception is paradoxical subreg, where
2480 we define SUBREG_BYTE to be 0. On big endian machines, this
2481 value should be negative. For a moment, undo this exception. */
2482 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
2484 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
2485 if (WORDS_BIG_ENDIAN
)
2486 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2487 if (BYTES_BIG_ENDIAN
)
2488 final_offset
+= difference
% UNITS_PER_WORD
;
2490 if (SUBREG_BYTE (op
) == 0
2491 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
2493 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
2494 if (WORDS_BIG_ENDIAN
)
2495 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2496 if (BYTES_BIG_ENDIAN
)
2497 final_offset
+= difference
% UNITS_PER_WORD
;
2500 /* See whether resulting subreg will be paradoxical. */
2501 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
2503 /* In nonparadoxical subregs we can't handle negative offsets. */
2504 if (final_offset
< 0)
2506 /* Bail out in case resulting subreg would be incorrect. */
2507 if (final_offset
% GET_MODE_SIZE (outermode
)
2508 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
2514 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
2516 /* In paradoxical subreg, see if we are still looking on lower part.
2517 If so, our SUBREG_BYTE will be 0. */
2518 if (WORDS_BIG_ENDIAN
)
2519 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2520 if (BYTES_BIG_ENDIAN
)
2521 offset
+= difference
% UNITS_PER_WORD
;
2522 if (offset
== final_offset
)
2528 /* Recurse for futher possible simplifications. */
2529 new = simplify_subreg (outermode
, SUBREG_REG (op
),
2530 GET_MODE (SUBREG_REG (op
)),
2534 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
2537 /* SUBREG of a hard register => just change the register number
2538 and/or mode. If the hard register is not valid in that mode,
2539 suppress this simplification. If the hard register is the stack,
2540 frame, or argument pointer, leave this as a SUBREG. */
2543 && (! REG_FUNCTION_VALUE_P (op
)
2544 || ! rtx_equal_function_value_matters
)
2545 && REGNO (op
) < FIRST_PSEUDO_REGISTER
2546 #ifdef CANNOT_CHANGE_MODE_CLASS
2547 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), outermode
, innermode
)
2548 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
2549 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
2551 && ((reload_completed
&& !frame_pointer_needed
)
2552 || (REGNO (op
) != FRAME_POINTER_REGNUM
2553 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2554 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
2557 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2558 && REGNO (op
) != ARG_POINTER_REGNUM
2560 && REGNO (op
) != STACK_POINTER_REGNUM
)
2562 int final_regno
= subreg_hard_regno (gen_rtx_SUBREG (outermode
, op
, byte
),
2565 /* ??? We do allow it if the current REG is not valid for
2566 its mode. This is a kludge to work around how float/complex
2567 arguments are passed on 32-bit SPARC and should be fixed. */
2568 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
2569 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
2571 rtx x
= gen_rtx_REG (outermode
, final_regno
);
2573 /* Propagate original regno. We don't have any way to specify
2574 the offset inside original regno, so do so only for lowpart.
2575 The information is used only by alias analysis that can not
2576 grog partial register anyway. */
2578 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
2579 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
2584 /* If we have a SUBREG of a register that we are replacing and we are
2585 replacing it with a MEM, make a new MEM and try replacing the
2586 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2587 or if we would be widening it. */
2589 if (GET_CODE (op
) == MEM
2590 && ! mode_dependent_address_p (XEXP (op
, 0))
2591 /* Allow splitting of volatile memory references in case we don't
2592 have instruction to move the whole thing. */
2593 && (! MEM_VOLATILE_P (op
)
2594 || ! have_insn_for (SET
, innermode
))
2595 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
2596 return adjust_address_nv (op
, outermode
, byte
);
2598 /* Handle complex values represented as CONCAT
2599 of real and imaginary part. */
2600 if (GET_CODE (op
) == CONCAT
)
2602 int is_realpart
= byte
< GET_MODE_UNIT_SIZE (innermode
);
2603 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
2604 unsigned int final_offset
;
2607 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
2608 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
2611 /* We can at least simplify it by referring directly to the relevant part. */
2612 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
2617 /* Make a SUBREG operation or equivalent if it folds. */
2620 simplify_gen_subreg (outermode
, op
, innermode
, byte
)
2623 enum machine_mode outermode
, innermode
;
2626 /* Little bit of sanity checking. */
2627 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2628 || innermode
== BLKmode
|| outermode
== BLKmode
)
2631 if (GET_MODE (op
) != innermode
2632 && GET_MODE (op
) != VOIDmode
)
2635 if (byte
% GET_MODE_SIZE (outermode
)
2636 || byte
>= GET_MODE_SIZE (innermode
))
2639 if (GET_CODE (op
) == QUEUED
)
2642 new = simplify_subreg (outermode
, op
, innermode
, byte
);
2646 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
2649 return gen_rtx_SUBREG (outermode
, op
, byte
);
2651 /* Simplify X, an rtx expression.
2653 Return the simplified expression or NULL if no simplifications
2656 This is the preferred entry point into the simplification routines;
2657 however, we still allow passes to call the more specific routines.
2659 Right now GCC has three (yes, three) major bodies of RTL simplification
2660 code that need to be unified.
2662 1. fold_rtx in cse.c. This code uses various CSE specific
2663 information to aid in RTL simplification.
2665 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2666 it uses combine specific information to aid in RTL
2669 3. The routines in this file.
2672 Long term we want to only have one body of simplification code; to
2673 get to that state I recommend the following steps:
2675 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2676 which are not pass dependent state into these routines.
2678 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2679 use this routine whenever possible.
2681 3. Allow for pass dependent state to be provided to these
2682 routines and add simplifications based on the pass dependent
2683 state. Remove code from cse.c & combine.c that becomes
2686 It will take time, but ultimately the compiler will be easier to
2687 maintain and improve. It's totally silly that when we add a
2688 simplification that it needs to be added to 4 places (3 for RTL
2689 simplification and 1 for tree simplification. */
2695 enum rtx_code code
= GET_CODE (x
);
2696 enum machine_mode mode
= GET_MODE (x
);
2698 switch (GET_RTX_CLASS (code
))
2701 return simplify_unary_operation (code
, mode
,
2702 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
2704 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
2709 XEXP (x
, 0) = XEXP (x
, 1);
2711 return simplify_binary_operation (code
, mode
,
2712 XEXP (x
, 0), XEXP (x
, 1));
2716 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
2720 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
2721 XEXP (x
, 0), XEXP (x
, 1),
2725 return simplify_relational_operation (code
,
2726 ((GET_MODE (XEXP (x
, 0))
2728 ? GET_MODE (XEXP (x
, 0))
2729 : GET_MODE (XEXP (x
, 1))),
2730 XEXP (x
, 0), XEXP (x
, 1));
2732 /* The only case we try to handle is a SUBREG. */
2734 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
2735 GET_MODE (SUBREG_REG (x
)),