1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
57 /* Negate a CONST_INT rtx, truncating (because a conversion from a
58 maximally negative number can overflow). */
60 neg_const_int (enum machine_mode mode
, rtx i
)
62 return gen_int_mode (- INTVAL (i
), mode
);
66 /* Make a binary operation by properly ordering the operands and
67 seeing if the expression folds. */
70 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
75 /* Put complex operands first and constants second if commutative. */
76 if (GET_RTX_CLASS (code
) == 'c'
77 && swap_commutative_operands_p (op0
, op1
))
78 tem
= op0
, op0
= op1
, op1
= tem
;
80 /* If this simplifies, do it. */
81 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
85 /* Handle addition and subtraction specially. Otherwise, just form
88 if (code
== PLUS
|| code
== MINUS
)
90 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
95 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
98 /* If X is a MEM referencing the constant pool, return the real value.
99 Otherwise return X. */
101 avoid_constant_pool_reference (rtx x
)
104 enum machine_mode cmode
;
106 switch (GET_CODE (x
))
112 /* Handle float extensions of constant pool references. */
114 c
= avoid_constant_pool_reference (tmp
);
115 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
119 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
120 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
130 /* Call target hook to avoid the effects of -fpic etc... */
131 addr
= (*targetm
.delegitimize_address
) (addr
);
133 if (GET_CODE (addr
) == LO_SUM
)
134 addr
= XEXP (addr
, 1);
136 if (GET_CODE (addr
) != SYMBOL_REF
137 || ! CONSTANT_POOL_ADDRESS_P (addr
))
140 c
= get_pool_constant (addr
);
141 cmode
= get_pool_mode (addr
);
143 /* If we're accessing the constant in a different mode than it was
144 originally stored, attempt to fix that up via subreg simplifications.
145 If that fails we have no choice but to return the original memory. */
146 if (cmode
!= GET_MODE (x
))
148 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
155 /* Make a unary operation by first seeing if it folds and otherwise making
156 the specified operation. */
159 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
160 enum machine_mode op_mode
)
164 /* If this simplifies, use it. */
165 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
168 return gen_rtx_fmt_e (code
, mode
, op
);
171 /* Likewise for ternary operations. */
174 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
175 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
179 /* If this simplifies, use it. */
180 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
184 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
187 /* Likewise, for relational operations.
188 CMP_MODE specifies mode comparison is done in.
192 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
193 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
197 if ((tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
)) != 0)
200 /* For the following tests, ensure const0_rtx is op1. */
201 if (op0
== const0_rtx
&& swap_commutative_operands_p (op0
, op1
))
202 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
204 /* If op0 is a compare, extract the comparison arguments from it. */
205 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
206 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
208 /* If op0 is a comparison, extract the comparison arguments form it. */
209 if (code
== NE
&& op1
== const0_rtx
210 && GET_RTX_CLASS (GET_CODE (op0
)) == '<')
212 else if (code
== EQ
&& op1
== const0_rtx
)
214 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
215 enum rtx_code
new = reversed_comparison_code (op0
, NULL_RTX
);
225 /* Put complex operands first and constants second. */
226 if (swap_commutative_operands_p (op0
, op1
))
227 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
229 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
232 /* Replace all occurrences of OLD in X with NEW and try to simplify the
233 resulting RTX. Return a new RTX which is as simplified as possible. */
236 simplify_replace_rtx (rtx x
, rtx old
, rtx
new)
238 enum rtx_code code
= GET_CODE (x
);
239 enum machine_mode mode
= GET_MODE (x
);
241 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
242 to build a new expression substituting recursively. If we can't do
243 anything, return our input. */
248 switch (GET_RTX_CLASS (code
))
252 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
253 rtx op
= (XEXP (x
, 0) == old
254 ? new : simplify_replace_rtx (XEXP (x
, 0), old
, new));
256 return simplify_gen_unary (code
, mode
, op
, op_mode
);
262 simplify_gen_binary (code
, mode
,
263 simplify_replace_rtx (XEXP (x
, 0), old
, new),
264 simplify_replace_rtx (XEXP (x
, 1), old
, new));
267 enum machine_mode op_mode
= (GET_MODE (XEXP (x
, 0)) != VOIDmode
268 ? GET_MODE (XEXP (x
, 0))
269 : GET_MODE (XEXP (x
, 1)));
270 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
271 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
272 rtx temp
= simplify_gen_relational (code
, mode
,
275 : GET_MODE (op0
) != VOIDmode
279 #ifdef FLOAT_STORE_FLAG_VALUE
280 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
282 if (temp
== const0_rtx
)
283 temp
= CONST0_RTX (mode
);
284 else if (temp
== const_true_rtx
)
285 temp
= CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode
),
295 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
296 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
299 simplify_gen_ternary (code
, mode
,
304 simplify_replace_rtx (XEXP (x
, 1), old
, new),
305 simplify_replace_rtx (XEXP (x
, 2), old
, new));
309 /* The only case we try to handle is a SUBREG. */
313 exp
= simplify_gen_subreg (GET_MODE (x
),
314 simplify_replace_rtx (SUBREG_REG (x
),
316 GET_MODE (SUBREG_REG (x
)),
325 return replace_equiv_address_nv (x
,
326 simplify_replace_rtx (XEXP (x
, 0),
328 else if (code
== LO_SUM
)
330 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
331 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
333 /* (lo_sum (high x) x) -> x */
334 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
337 return gen_rtx_LO_SUM (mode
, op0
, op1
);
339 else if (code
== REG
)
341 if (REG_P (old
) && REGNO (x
) == REGNO (old
))
353 /* Try to simplify a unary operation CODE whose output mode is to be
354 MODE with input operand OP whose mode was originally OP_MODE.
355 Return zero if no simplification can be made. */
357 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
358 rtx op
, enum machine_mode op_mode
)
360 unsigned int width
= GET_MODE_BITSIZE (mode
);
361 rtx trueop
= avoid_constant_pool_reference (op
);
363 if (code
== VEC_DUPLICATE
)
365 if (!VECTOR_MODE_P (mode
))
367 if (GET_MODE (trueop
) != VOIDmode
368 && !VECTOR_MODE_P (GET_MODE (trueop
))
369 && GET_MODE_INNER (mode
) != GET_MODE (trueop
))
371 if (GET_MODE (trueop
) != VOIDmode
372 && VECTOR_MODE_P (GET_MODE (trueop
))
373 && GET_MODE_INNER (mode
) != GET_MODE_INNER (GET_MODE (trueop
)))
375 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
376 || GET_CODE (trueop
) == CONST_VECTOR
)
378 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
379 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
380 rtvec v
= rtvec_alloc (n_elts
);
383 if (GET_CODE (trueop
) != CONST_VECTOR
)
384 for (i
= 0; i
< n_elts
; i
++)
385 RTVEC_ELT (v
, i
) = trueop
;
388 enum machine_mode inmode
= GET_MODE (trueop
);
389 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
390 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
392 if (in_n_elts
>= n_elts
|| n_elts
% in_n_elts
)
394 for (i
= 0; i
< n_elts
; i
++)
395 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
397 return gen_rtx_CONST_VECTOR (mode
, v
);
401 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
403 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
404 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
405 enum machine_mode opmode
= GET_MODE (trueop
);
406 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
407 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
408 rtvec v
= rtvec_alloc (n_elts
);
411 if (op_n_elts
!= n_elts
)
414 for (i
= 0; i
< n_elts
; i
++)
416 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
417 CONST_VECTOR_ELT (trueop
, i
),
418 GET_MODE_INNER (opmode
));
421 RTVEC_ELT (v
, i
) = x
;
423 return gen_rtx_CONST_VECTOR (mode
, v
);
426 /* The order of these tests is critical so that, for example, we don't
427 check the wrong mode (input vs. output) for a conversion operation,
428 such as FIX. At some point, this should be simplified. */
430 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
431 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
433 HOST_WIDE_INT hv
, lv
;
436 if (GET_CODE (trueop
) == CONST_INT
)
437 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
439 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
441 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
442 d
= real_value_truncate (mode
, d
);
443 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
445 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
446 && (GET_CODE (trueop
) == CONST_DOUBLE
447 || GET_CODE (trueop
) == CONST_INT
))
449 HOST_WIDE_INT hv
, lv
;
452 if (GET_CODE (trueop
) == CONST_INT
)
453 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
455 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
457 if (op_mode
== VOIDmode
)
459 /* We don't know how to interpret negative-looking numbers in
460 this case, so don't try to fold those. */
464 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
467 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
469 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
470 d
= real_value_truncate (mode
, d
);
471 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
474 if (GET_CODE (trueop
) == CONST_INT
475 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
477 HOST_WIDE_INT arg0
= INTVAL (trueop
);
491 val
= (arg0
>= 0 ? arg0
: - arg0
);
495 /* Don't use ffs here. Instead, get low order bit and then its
496 number. If arg0 is zero, this will return 0, as desired. */
497 arg0
&= GET_MODE_MASK (mode
);
498 val
= exact_log2 (arg0
& (- arg0
)) + 1;
502 arg0
&= GET_MODE_MASK (mode
);
503 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
506 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
510 arg0
&= GET_MODE_MASK (mode
);
513 /* Even if the value at zero is undefined, we have to come
514 up with some replacement. Seems good enough. */
515 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
516 val
= GET_MODE_BITSIZE (mode
);
519 val
= exact_log2 (arg0
& -arg0
);
523 arg0
&= GET_MODE_MASK (mode
);
526 val
++, arg0
&= arg0
- 1;
530 arg0
&= GET_MODE_MASK (mode
);
533 val
++, arg0
&= arg0
- 1;
542 /* When zero-extending a CONST_INT, we need to know its
544 if (op_mode
== VOIDmode
)
546 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
548 /* If we were really extending the mode,
549 we would have to distinguish between zero-extension
550 and sign-extension. */
551 if (width
!= GET_MODE_BITSIZE (op_mode
))
555 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
556 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
562 if (op_mode
== VOIDmode
)
564 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
566 /* If we were really extending the mode,
567 we would have to distinguish between zero-extension
568 and sign-extension. */
569 if (width
!= GET_MODE_BITSIZE (op_mode
))
573 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
576 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
578 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
579 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
596 val
= trunc_int_for_mode (val
, mode
);
598 return GEN_INT (val
);
601 /* We can do some operations on integer CONST_DOUBLEs. Also allow
602 for a DImode operation on a CONST_INT. */
603 else if (GET_MODE (trueop
) == VOIDmode
604 && width
<= HOST_BITS_PER_WIDE_INT
* 2
605 && (GET_CODE (trueop
) == CONST_DOUBLE
606 || GET_CODE (trueop
) == CONST_INT
))
608 unsigned HOST_WIDE_INT l1
, lv
;
609 HOST_WIDE_INT h1
, hv
;
611 if (GET_CODE (trueop
) == CONST_DOUBLE
)
612 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
614 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
624 neg_double (l1
, h1
, &lv
, &hv
);
629 neg_double (l1
, h1
, &lv
, &hv
);
641 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
644 lv
= exact_log2 (l1
& -l1
) + 1;
650 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
652 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
653 - HOST_BITS_PER_WIDE_INT
;
661 lv
= GET_MODE_BITSIZE (mode
);
663 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
666 lv
= exact_log2 (l1
& -l1
);
689 /* This is just a change-of-mode, so do nothing. */
694 if (op_mode
== VOIDmode
)
697 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
701 lv
= l1
& GET_MODE_MASK (op_mode
);
705 if (op_mode
== VOIDmode
706 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
710 lv
= l1
& GET_MODE_MASK (op_mode
);
711 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
712 && (lv
& ((HOST_WIDE_INT
) 1
713 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
714 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
716 hv
= HWI_SIGN_EXTEND (lv
);
727 return immed_double_const (lv
, hv
, mode
);
730 else if (GET_CODE (trueop
) == CONST_DOUBLE
731 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
733 REAL_VALUE_TYPE d
, t
;
734 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
739 if (HONOR_SNANS (mode
) && real_isnan (&d
))
741 real_sqrt (&t
, mode
, &d
);
745 d
= REAL_VALUE_ABS (d
);
748 d
= REAL_VALUE_NEGATE (d
);
751 d
= real_value_truncate (mode
, d
);
754 /* All this does is change the mode. */
757 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
763 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
766 else if (GET_CODE (trueop
) == CONST_DOUBLE
767 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
768 && GET_MODE_CLASS (mode
) == MODE_INT
769 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
773 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
776 case FIX
: i
= REAL_VALUE_FIX (d
); break;
777 case UNSIGNED_FIX
: i
= REAL_VALUE_UNSIGNED_FIX (d
); break;
781 return gen_int_mode (i
, mode
);
784 /* This was formerly used only for non-IEEE float.
785 eggert@twinsun.com says it is safe for IEEE also. */
788 enum rtx_code reversed
;
789 /* There are some simplifications we can do even if the operands
794 /* (not (not X)) == X. */
795 if (GET_CODE (op
) == NOT
)
798 /* (not (eq X Y)) == (ne X Y), etc. */
799 if (mode
== BImode
&& GET_RTX_CLASS (GET_CODE (op
)) == '<'
800 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
802 return gen_rtx_fmt_ee (reversed
,
803 op_mode
, XEXP (op
, 0), XEXP (op
, 1));
807 /* (neg (neg X)) == X. */
808 if (GET_CODE (op
) == NEG
)
813 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
814 becomes just the MINUS if its mode is MODE. This allows
815 folding switch statements on machines using casesi (such as
817 if (GET_CODE (op
) == TRUNCATE
818 && GET_MODE (XEXP (op
, 0)) == mode
819 && GET_CODE (XEXP (op
, 0)) == MINUS
820 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
821 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
824 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
825 if (! POINTERS_EXTEND_UNSIGNED
826 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
828 || (GET_CODE (op
) == SUBREG
829 && GET_CODE (SUBREG_REG (op
)) == REG
830 && REG_POINTER (SUBREG_REG (op
))
831 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
832 return convert_memory_address (Pmode
, op
);
836 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
838 if (POINTERS_EXTEND_UNSIGNED
> 0
839 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
841 || (GET_CODE (op
) == SUBREG
842 && GET_CODE (SUBREG_REG (op
)) == REG
843 && REG_POINTER (SUBREG_REG (op
))
844 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
845 return convert_memory_address (Pmode
, op
);
857 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
858 and OP1. Return 0 if no simplification is possible.
860 Don't use this for relational operations such as EQ or LT.
861 Use simplify_relational_operation instead. */
863 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
866 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
868 unsigned int width
= GET_MODE_BITSIZE (mode
);
870 rtx trueop0
= avoid_constant_pool_reference (op0
);
871 rtx trueop1
= avoid_constant_pool_reference (op1
);
873 /* Relational operations don't work here. We must know the mode
874 of the operands in order to do the comparison correctly.
875 Assuming a full word can give incorrect results.
876 Consider comparing 128 with -128 in QImode. */
878 if (GET_RTX_CLASS (code
) == '<')
881 /* Make sure the constant is second. */
882 if (GET_RTX_CLASS (code
) == 'c'
883 && swap_commutative_operands_p (trueop0
, trueop1
))
885 tem
= op0
, op0
= op1
, op1
= tem
;
886 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
889 if (VECTOR_MODE_P (mode
)
890 && GET_CODE (trueop0
) == CONST_VECTOR
891 && GET_CODE (trueop1
) == CONST_VECTOR
)
893 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
894 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
895 enum machine_mode op0mode
= GET_MODE (trueop0
);
896 int op0_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op0mode
));
897 unsigned op0_n_elts
= (GET_MODE_SIZE (op0mode
) / op0_elt_size
);
898 enum machine_mode op1mode
= GET_MODE (trueop1
);
899 int op1_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op1mode
));
900 unsigned op1_n_elts
= (GET_MODE_SIZE (op1mode
) / op1_elt_size
);
901 rtvec v
= rtvec_alloc (n_elts
);
904 if (op0_n_elts
!= n_elts
|| op1_n_elts
!= n_elts
)
907 for (i
= 0; i
< n_elts
; i
++)
909 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
910 CONST_VECTOR_ELT (trueop0
, i
),
911 CONST_VECTOR_ELT (trueop1
, i
));
914 RTVEC_ELT (v
, i
) = x
;
917 return gen_rtx_CONST_VECTOR (mode
, v
);
920 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
921 && GET_CODE (trueop0
) == CONST_DOUBLE
922 && GET_CODE (trueop1
) == CONST_DOUBLE
923 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
925 REAL_VALUE_TYPE f0
, f1
, value
;
927 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
928 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
929 f0
= real_value_truncate (mode
, f0
);
930 f1
= real_value_truncate (mode
, f1
);
932 if (HONOR_SNANS (mode
)
933 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
937 && REAL_VALUES_EQUAL (f1
, dconst0
)
938 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
941 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
943 value
= real_value_truncate (mode
, value
);
944 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
947 /* We can fold some multi-word operations. */
948 if (GET_MODE_CLASS (mode
) == MODE_INT
949 && width
== HOST_BITS_PER_WIDE_INT
* 2
950 && (GET_CODE (trueop0
) == CONST_DOUBLE
951 || GET_CODE (trueop0
) == CONST_INT
)
952 && (GET_CODE (trueop1
) == CONST_DOUBLE
953 || GET_CODE (trueop1
) == CONST_INT
))
955 unsigned HOST_WIDE_INT l1
, l2
, lv
;
956 HOST_WIDE_INT h1
, h2
, hv
;
958 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
959 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
961 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
963 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
964 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
966 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
971 /* A - B == A + (-B). */
972 neg_double (l2
, h2
, &lv
, &hv
);
975 /* .. fall through ... */
978 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
982 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
985 case DIV
: case MOD
: case UDIV
: case UMOD
:
986 /* We'd need to include tree.h to do this and it doesn't seem worth
991 lv
= l1
& l2
, hv
= h1
& h2
;
995 lv
= l1
| l2
, hv
= h1
| h2
;
999 lv
= l1
^ l2
, hv
= h1
^ h2
;
1005 && ((unsigned HOST_WIDE_INT
) l1
1006 < (unsigned HOST_WIDE_INT
) l2
)))
1015 && ((unsigned HOST_WIDE_INT
) l1
1016 > (unsigned HOST_WIDE_INT
) l2
)))
1023 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1025 && ((unsigned HOST_WIDE_INT
) l1
1026 < (unsigned HOST_WIDE_INT
) l2
)))
1033 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1035 && ((unsigned HOST_WIDE_INT
) l1
1036 > (unsigned HOST_WIDE_INT
) l2
)))
1042 case LSHIFTRT
: case ASHIFTRT
:
1044 case ROTATE
: case ROTATERT
:
1045 #ifdef SHIFT_COUNT_TRUNCATED
1046 if (SHIFT_COUNT_TRUNCATED
)
1047 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1050 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1053 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1054 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1056 else if (code
== ASHIFT
)
1057 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1058 else if (code
== ROTATE
)
1059 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1060 else /* code == ROTATERT */
1061 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1068 return immed_double_const (lv
, hv
, mode
);
1071 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1072 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1074 /* Even if we can't compute a constant result,
1075 there are some cases worth simplifying. */
1080 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1081 when x is NaN, infinite, or finite and nonzero. They aren't
1082 when x is -0 and the rounding mode is not towards -infinity,
1083 since (-0) + 0 is then 0. */
1084 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1087 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1088 transformations are safe even for IEEE. */
1089 if (GET_CODE (op0
) == NEG
)
1090 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1091 else if (GET_CODE (op1
) == NEG
)
1092 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1094 /* (~a) + 1 -> -a */
1095 if (INTEGRAL_MODE_P (mode
)
1096 && GET_CODE (op0
) == NOT
1097 && trueop1
== const1_rtx
)
1098 return gen_rtx_NEG (mode
, XEXP (op0
, 0));
1100 /* Handle both-operands-constant cases. We can only add
1101 CONST_INTs to constants since the sum of relocatable symbols
1102 can't be handled by most assemblers. Don't add CONST_INT
1103 to CONST_INT since overflow won't be computed properly if wider
1104 than HOST_BITS_PER_WIDE_INT. */
1106 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1107 && GET_CODE (op1
) == CONST_INT
)
1108 return plus_constant (op0
, INTVAL (op1
));
1109 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1110 && GET_CODE (op0
) == CONST_INT
)
1111 return plus_constant (op1
, INTVAL (op0
));
1113 /* See if this is something like X * C - X or vice versa or
1114 if the multiplication is written as a shift. If so, we can
1115 distribute and make a new multiply, shift, or maybe just
1116 have X (if C is 2 in the example above). But don't make
1117 real multiply if we didn't have one before. */
1119 if (! FLOAT_MODE_P (mode
))
1121 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1122 rtx lhs
= op0
, rhs
= op1
;
1125 if (GET_CODE (lhs
) == NEG
)
1126 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1127 else if (GET_CODE (lhs
) == MULT
1128 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1130 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1133 else if (GET_CODE (lhs
) == ASHIFT
1134 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1135 && INTVAL (XEXP (lhs
, 1)) >= 0
1136 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1138 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1139 lhs
= XEXP (lhs
, 0);
1142 if (GET_CODE (rhs
) == NEG
)
1143 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1144 else if (GET_CODE (rhs
) == MULT
1145 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1147 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1150 else if (GET_CODE (rhs
) == ASHIFT
1151 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1152 && INTVAL (XEXP (rhs
, 1)) >= 0
1153 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1155 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1156 rhs
= XEXP (rhs
, 0);
1159 if (rtx_equal_p (lhs
, rhs
))
1161 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1162 GEN_INT (coeff0
+ coeff1
));
1163 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1167 /* If one of the operands is a PLUS or a MINUS, see if we can
1168 simplify this by the associative law.
1169 Don't use the associative law for floating point.
1170 The inaccuracy makes it nonassociative,
1171 and subtle programs can break if operations are associated. */
1173 if (INTEGRAL_MODE_P (mode
)
1174 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1175 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1176 || (GET_CODE (op0
) == CONST
1177 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1178 || (GET_CODE (op1
) == CONST
1179 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1180 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1186 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1187 using cc0, in which case we want to leave it as a COMPARE
1188 so we can distinguish it from a register-register-copy.
1190 In IEEE floating point, x-0 is not the same as x. */
1192 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1193 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1194 && trueop1
== CONST0_RTX (mode
))
1198 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1199 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1200 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1201 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1203 rtx xop00
= XEXP (op0
, 0);
1204 rtx xop10
= XEXP (op1
, 0);
1207 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1209 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1210 && GET_MODE (xop00
) == GET_MODE (xop10
)
1211 && REGNO (xop00
) == REGNO (xop10
)
1212 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1213 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1220 /* We can't assume x-x is 0 even with non-IEEE floating point,
1221 but since it is zero except in very strange circumstances, we
1222 will treat it as zero with -funsafe-math-optimizations. */
1223 if (rtx_equal_p (trueop0
, trueop1
)
1224 && ! side_effects_p (op0
)
1225 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1226 return CONST0_RTX (mode
);
1228 /* Change subtraction from zero into negation. (0 - x) is the
1229 same as -x when x is NaN, infinite, or finite and nonzero.
1230 But if the mode has signed zeros, and does not round towards
1231 -infinity, then 0 - 0 is 0, not -0. */
1232 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1233 return gen_rtx_NEG (mode
, op1
);
1235 /* (-1 - a) is ~a. */
1236 if (trueop0
== constm1_rtx
)
1237 return gen_rtx_NOT (mode
, op1
);
1239 /* Subtracting 0 has no effect unless the mode has signed zeros
1240 and supports rounding towards -infinity. In such a case,
1242 if (!(HONOR_SIGNED_ZEROS (mode
)
1243 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1244 && trueop1
== CONST0_RTX (mode
))
1247 /* See if this is something like X * C - X or vice versa or
1248 if the multiplication is written as a shift. If so, we can
1249 distribute and make a new multiply, shift, or maybe just
1250 have X (if C is 2 in the example above). But don't make
1251 real multiply if we didn't have one before. */
1253 if (! FLOAT_MODE_P (mode
))
1255 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1256 rtx lhs
= op0
, rhs
= op1
;
1259 if (GET_CODE (lhs
) == NEG
)
1260 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1261 else if (GET_CODE (lhs
) == MULT
1262 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1264 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1267 else if (GET_CODE (lhs
) == ASHIFT
1268 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1269 && INTVAL (XEXP (lhs
, 1)) >= 0
1270 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1272 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1273 lhs
= XEXP (lhs
, 0);
1276 if (GET_CODE (rhs
) == NEG
)
1277 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1278 else if (GET_CODE (rhs
) == MULT
1279 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1281 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1284 else if (GET_CODE (rhs
) == ASHIFT
1285 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1286 && INTVAL (XEXP (rhs
, 1)) >= 0
1287 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1289 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1290 rhs
= XEXP (rhs
, 0);
1293 if (rtx_equal_p (lhs
, rhs
))
1295 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1296 GEN_INT (coeff0
- coeff1
));
1297 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1301 /* (a - (-b)) -> (a + b). True even for IEEE. */
1302 if (GET_CODE (op1
) == NEG
)
1303 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1305 /* If one of the operands is a PLUS or a MINUS, see if we can
1306 simplify this by the associative law.
1307 Don't use the associative law for floating point.
1308 The inaccuracy makes it nonassociative,
1309 and subtle programs can break if operations are associated. */
1311 if (INTEGRAL_MODE_P (mode
)
1312 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1313 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1314 || (GET_CODE (op0
) == CONST
1315 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1316 || (GET_CODE (op1
) == CONST
1317 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1318 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1321 /* Don't let a relocatable value get a negative coeff. */
1322 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1323 return simplify_gen_binary (PLUS
, mode
,
1325 neg_const_int (mode
, op1
));
1327 /* (x - (x & y)) -> (x & ~y) */
1328 if (GET_CODE (op1
) == AND
)
1330 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1332 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1333 GET_MODE (XEXP (op1
, 1)));
1334 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1336 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1338 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1339 GET_MODE (XEXP (op1
, 0)));
1340 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1346 if (trueop1
== constm1_rtx
)
1348 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
1350 return tem
? tem
: gen_rtx_NEG (mode
, op0
);
1353 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1354 x is NaN, since x * 0 is then also NaN. Nor is it valid
1355 when the mode has signed zeros, since multiplying a negative
1356 number by 0 will give -0, not 0. */
1357 if (!HONOR_NANS (mode
)
1358 && !HONOR_SIGNED_ZEROS (mode
)
1359 && trueop1
== CONST0_RTX (mode
)
1360 && ! side_effects_p (op0
))
1363 /* In IEEE floating point, x*1 is not equivalent to x for
1365 if (!HONOR_SNANS (mode
)
1366 && trueop1
== CONST1_RTX (mode
))
1369 /* Convert multiply by constant power of two into shift unless
1370 we are still generating RTL. This test is a kludge. */
1371 if (GET_CODE (trueop1
) == CONST_INT
1372 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1373 /* If the mode is larger than the host word size, and the
1374 uppermost bit is set, then this isn't a power of two due
1375 to implicit sign extension. */
1376 && (width
<= HOST_BITS_PER_WIDE_INT
1377 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1378 && ! rtx_equal_function_value_matters
)
1379 return gen_rtx_ASHIFT (mode
, op0
, GEN_INT (val
));
1381 /* x*2 is x+x and x*(-1) is -x */
1382 if (GET_CODE (trueop1
) == CONST_DOUBLE
1383 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1384 && GET_MODE (op0
) == mode
)
1387 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1389 if (REAL_VALUES_EQUAL (d
, dconst2
))
1390 return gen_rtx_PLUS (mode
, op0
, copy_rtx (op0
));
1392 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1393 return gen_rtx_NEG (mode
, op0
);
1398 if (trueop1
== const0_rtx
)
1400 if (GET_CODE (trueop1
) == CONST_INT
1401 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1402 == GET_MODE_MASK (mode
)))
1404 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1406 /* A | (~A) -> -1 */
1407 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1408 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1409 && ! side_effects_p (op0
)
1410 && GET_MODE_CLASS (mode
) != MODE_CC
)
1415 if (trueop1
== const0_rtx
)
1417 if (GET_CODE (trueop1
) == CONST_INT
1418 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1419 == GET_MODE_MASK (mode
)))
1420 return gen_rtx_NOT (mode
, op0
);
1421 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1422 && GET_MODE_CLASS (mode
) != MODE_CC
)
1427 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1429 if (GET_CODE (trueop1
) == CONST_INT
1430 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1431 == GET_MODE_MASK (mode
)))
1433 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1434 && GET_MODE_CLASS (mode
) != MODE_CC
)
1437 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1438 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1439 && ! side_effects_p (op0
)
1440 && GET_MODE_CLASS (mode
) != MODE_CC
)
1445 /* Convert divide by power of two into shift (divide by 1 handled
1447 if (GET_CODE (trueop1
) == CONST_INT
1448 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1449 return gen_rtx_LSHIFTRT (mode
, op0
, GEN_INT (arg1
));
1451 /* ... fall through ... */
1454 if (trueop1
== CONST1_RTX (mode
))
1456 /* On some platforms DIV uses narrower mode than its
1458 rtx x
= gen_lowpart_common (mode
, op0
);
1461 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1462 return gen_lowpart_SUBREG (mode
, op0
);
1467 /* Maybe change 0 / x to 0. This transformation isn't safe for
1468 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1469 Nor is it safe for modes with signed zeros, since dividing
1470 0 by a negative number gives -0, not 0. */
1471 if (!HONOR_NANS (mode
)
1472 && !HONOR_SIGNED_ZEROS (mode
)
1473 && trueop0
== CONST0_RTX (mode
)
1474 && ! side_effects_p (op1
))
1477 /* Change division by a constant into multiplication. Only do
1478 this with -funsafe-math-optimizations. */
1479 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1480 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1481 && trueop1
!= CONST0_RTX (mode
)
1482 && flag_unsafe_math_optimizations
)
1485 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1487 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1489 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1490 return gen_rtx_MULT (mode
, op0
,
1491 CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
));
1497 /* Handle modulus by power of two (mod with 1 handled below). */
1498 if (GET_CODE (trueop1
) == CONST_INT
1499 && exact_log2 (INTVAL (trueop1
)) > 0)
1500 return gen_rtx_AND (mode
, op0
, GEN_INT (INTVAL (op1
) - 1));
1502 /* ... fall through ... */
1505 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1506 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1513 /* Rotating ~0 always results in ~0. */
1514 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1515 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1516 && ! side_effects_p (op1
))
1519 /* ... fall through ... */
1523 if (trueop1
== const0_rtx
)
1525 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1530 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1
) == CONST_INT
1531 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1532 && ! side_effects_p (op0
))
1534 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1539 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1
) == CONST_INT
1540 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1541 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1542 && ! side_effects_p (op0
))
1544 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1549 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1551 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1556 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1558 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1566 /* ??? There are simplifications that can be done. */
1570 if (!VECTOR_MODE_P (mode
))
1572 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1574 != GET_MODE_INNER (GET_MODE (trueop0
)))
1575 || GET_CODE (trueop1
) != PARALLEL
1576 || XVECLEN (trueop1
, 0) != 1
1577 || GET_CODE (XVECEXP (trueop1
, 0, 0)) != CONST_INT
)
1580 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1581 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP (trueop1
, 0, 0)));
1585 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1586 || (GET_MODE_INNER (mode
)
1587 != GET_MODE_INNER (GET_MODE (trueop0
)))
1588 || GET_CODE (trueop1
) != PARALLEL
)
1591 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1593 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1594 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1595 rtvec v
= rtvec_alloc (n_elts
);
1598 if (XVECLEN (trueop1
, 0) != (int) n_elts
)
1600 for (i
= 0; i
< n_elts
; i
++)
1602 rtx x
= XVECEXP (trueop1
, 0, i
);
1604 if (GET_CODE (x
) != CONST_INT
)
1606 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, INTVAL (x
));
1609 return gen_rtx_CONST_VECTOR (mode
, v
);
1615 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
1616 ? GET_MODE (trueop0
)
1617 : GET_MODE_INNER (mode
));
1618 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
1619 ? GET_MODE (trueop1
)
1620 : GET_MODE_INNER (mode
));
1622 if (!VECTOR_MODE_P (mode
)
1623 || (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
1624 != GET_MODE_SIZE (mode
)))
1627 if ((VECTOR_MODE_P (op0_mode
)
1628 && (GET_MODE_INNER (mode
)
1629 != GET_MODE_INNER (op0_mode
)))
1630 || (!VECTOR_MODE_P (op0_mode
)
1631 && GET_MODE_INNER (mode
) != op0_mode
))
1634 if ((VECTOR_MODE_P (op1_mode
)
1635 && (GET_MODE_INNER (mode
)
1636 != GET_MODE_INNER (op1_mode
)))
1637 || (!VECTOR_MODE_P (op1_mode
)
1638 && GET_MODE_INNER (mode
) != op1_mode
))
1641 if ((GET_CODE (trueop0
) == CONST_VECTOR
1642 || GET_CODE (trueop0
) == CONST_INT
1643 || GET_CODE (trueop0
) == CONST_DOUBLE
)
1644 && (GET_CODE (trueop1
) == CONST_VECTOR
1645 || GET_CODE (trueop1
) == CONST_INT
1646 || GET_CODE (trueop1
) == CONST_DOUBLE
))
1648 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1649 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1650 rtvec v
= rtvec_alloc (n_elts
);
1652 unsigned in_n_elts
= 1;
1654 if (VECTOR_MODE_P (op0_mode
))
1655 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
1656 for (i
= 0; i
< n_elts
; i
++)
1660 if (!VECTOR_MODE_P (op0_mode
))
1661 RTVEC_ELT (v
, i
) = trueop0
;
1663 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
1667 if (!VECTOR_MODE_P (op1_mode
))
1668 RTVEC_ELT (v
, i
) = trueop1
;
1670 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
1675 return gen_rtx_CONST_VECTOR (mode
, v
);
1687 /* Get the integer argument values in two forms:
1688 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1690 arg0
= INTVAL (trueop0
);
1691 arg1
= INTVAL (trueop1
);
1693 if (width
< HOST_BITS_PER_WIDE_INT
)
1695 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1696 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1699 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1700 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1703 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1704 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1712 /* Compute the value of the arithmetic. */
1717 val
= arg0s
+ arg1s
;
1721 val
= arg0s
- arg1s
;
1725 val
= arg0s
* arg1s
;
1730 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1733 val
= arg0s
/ arg1s
;
1738 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1741 val
= arg0s
% arg1s
;
1746 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1749 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1754 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1757 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1773 /* If shift count is undefined, don't fold it; let the machine do
1774 what it wants. But truncate it if the machine will do that. */
1778 #ifdef SHIFT_COUNT_TRUNCATED
1779 if (SHIFT_COUNT_TRUNCATED
)
1783 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1790 #ifdef SHIFT_COUNT_TRUNCATED
1791 if (SHIFT_COUNT_TRUNCATED
)
1795 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1802 #ifdef SHIFT_COUNT_TRUNCATED
1803 if (SHIFT_COUNT_TRUNCATED
)
1807 val
= arg0s
>> arg1
;
1809 /* Bootstrap compiler may not have sign extended the right shift.
1810 Manually extend the sign to insure bootstrap cc matches gcc. */
1811 if (arg0s
< 0 && arg1
> 0)
1812 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1821 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1822 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1830 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1831 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1835 /* Do nothing here. */
1839 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1843 val
= ((unsigned HOST_WIDE_INT
) arg0
1844 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1848 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1852 val
= ((unsigned HOST_WIDE_INT
) arg0
1853 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1860 /* ??? There are simplifications that can be done. */
1867 val
= trunc_int_for_mode (val
, mode
);
1869 return GEN_INT (val
);
1872 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1875 Rather than test for specific case, we do this by a brute-force method
1876 and do all possible simplifications until no more changes occur. Then
1877 we rebuild the operation.
1879 If FORCE is true, then always generate the rtx. This is used to
1880 canonicalize stuff emitted from simplify_gen_binary. Note that this
1881 can still fail if the rtx is too complex. It won't fail just because
1882 the result is not 'simpler' than the input, however. */
1884 struct simplify_plus_minus_op_data
1891 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
1893 const struct simplify_plus_minus_op_data
*d1
= p1
;
1894 const struct simplify_plus_minus_op_data
*d2
= p2
;
1896 return (commutative_operand_precedence (d2
->op
)
1897 - commutative_operand_precedence (d1
->op
));
1901 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
1904 struct simplify_plus_minus_op_data ops
[8];
1906 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
1907 int first
, negate
, changed
;
1910 memset (ops
, 0, sizeof ops
);
1912 /* Set up the two operands and then expand them until nothing has been
1913 changed. If we run out of room in our array, give up; this should
1914 almost never happen. */
1919 ops
[1].neg
= (code
== MINUS
);
1925 for (i
= 0; i
< n_ops
; i
++)
1927 rtx this_op
= ops
[i
].op
;
1928 int this_neg
= ops
[i
].neg
;
1929 enum rtx_code this_code
= GET_CODE (this_op
);
1938 ops
[n_ops
].op
= XEXP (this_op
, 1);
1939 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
1942 ops
[i
].op
= XEXP (this_op
, 0);
1948 ops
[i
].op
= XEXP (this_op
, 0);
1949 ops
[i
].neg
= ! this_neg
;
1955 && GET_CODE (XEXP (this_op
, 0)) == PLUS
1956 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
1957 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
1959 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
1960 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
1961 ops
[n_ops
].neg
= this_neg
;
1969 /* ~a -> (-a - 1) */
1972 ops
[n_ops
].op
= constm1_rtx
;
1973 ops
[n_ops
++].neg
= this_neg
;
1974 ops
[i
].op
= XEXP (this_op
, 0);
1975 ops
[i
].neg
= !this_neg
;
1983 ops
[i
].op
= neg_const_int (mode
, this_op
);
1996 /* If we only have two operands, we can't do anything. */
1997 if (n_ops
<= 2 && !force
)
2000 /* Count the number of CONSTs we didn't split above. */
2001 for (i
= 0; i
< n_ops
; i
++)
2002 if (GET_CODE (ops
[i
].op
) == CONST
)
2005 /* Now simplify each pair of operands until nothing changes. The first
2006 time through just simplify constants against each other. */
2013 for (i
= 0; i
< n_ops
- 1; i
++)
2014 for (j
= i
+ 1; j
< n_ops
; j
++)
2016 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2017 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2019 if (lhs
!= 0 && rhs
!= 0
2020 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2022 enum rtx_code ncode
= PLUS
;
2028 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2030 else if (swap_commutative_operands_p (lhs
, rhs
))
2031 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2033 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2035 /* Reject "simplifications" that just wrap the two
2036 arguments in a CONST. Failure to do so can result
2037 in infinite recursion with simplify_binary_operation
2038 when it calls us to simplify CONST operations. */
2040 && ! (GET_CODE (tem
) == CONST
2041 && GET_CODE (XEXP (tem
, 0)) == ncode
2042 && XEXP (XEXP (tem
, 0), 0) == lhs
2043 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2044 /* Don't allow -x + -1 -> ~x simplifications in the
2045 first pass. This allows us the chance to combine
2046 the -1 with other constants. */
2048 && GET_CODE (tem
) == NOT
2049 && XEXP (tem
, 0) == rhs
))
2052 if (GET_CODE (tem
) == NEG
)
2053 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2054 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2055 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2059 ops
[j
].op
= NULL_RTX
;
2069 /* Pack all the operands to the lower-numbered entries. */
2070 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2075 /* Sort the operations based on swap_commutative_operands_p. */
2076 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2078 /* We suppressed creation of trivial CONST expressions in the
2079 combination loop to avoid recursion. Create one manually now.
2080 The combination loop should have ensured that there is exactly
2081 one CONST_INT, and the sort will have ensured that it is last
2082 in the array and that any other constant will be next-to-last. */
2085 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2086 && CONSTANT_P (ops
[n_ops
- 2].op
))
2088 rtx value
= ops
[n_ops
- 1].op
;
2089 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2090 value
= neg_const_int (mode
, value
);
2091 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2095 /* Count the number of CONSTs that we generated. */
2097 for (i
= 0; i
< n_ops
; i
++)
2098 if (GET_CODE (ops
[i
].op
) == CONST
)
2101 /* Give up if we didn't reduce the number of operands we had. Make
2102 sure we count a CONST as two operands. If we have the same
2103 number of operands, but have made more CONSTs than before, this
2104 is also an improvement, so accept it. */
2106 && (n_ops
+ n_consts
> input_ops
2107 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2110 /* Put a non-negated operand first. If there aren't any, make all
2111 operands positive and negate the whole thing later. */
2114 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2118 for (i
= 0; i
< n_ops
; i
++)
2130 /* Now make the result by performing the requested operations. */
2132 for (i
= 1; i
< n_ops
; i
++)
2133 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2134 mode
, result
, ops
[i
].op
);
2136 return negate
? gen_rtx_NEG (mode
, result
) : result
;
2139 /* Like simplify_binary_operation except used for relational operators.
2140 MODE is the mode of the operands, not that of the result. If MODE
2141 is VOIDmode, both operands must also be VOIDmode and we compare the
2142 operands in "infinite precision".
2144 If no simplification is possible, this function returns zero. Otherwise,
2145 it returns either const_true_rtx or const0_rtx. */
2148 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2151 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2156 if (mode
== VOIDmode
2157 && (GET_MODE (op0
) != VOIDmode
2158 || GET_MODE (op1
) != VOIDmode
))
2161 /* If op0 is a compare, extract the comparison arguments from it. */
2162 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2163 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2165 trueop0
= avoid_constant_pool_reference (op0
);
2166 trueop1
= avoid_constant_pool_reference (op1
);
2168 /* We can't simplify MODE_CC values since we don't know what the
2169 actual comparison is. */
2170 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2173 /* Make sure the constant is second. */
2174 if (swap_commutative_operands_p (trueop0
, trueop1
))
2176 tem
= op0
, op0
= op1
, op1
= tem
;
2177 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
2178 code
= swap_condition (code
);
2181 /* For integer comparisons of A and B maybe we can simplify A - B and can
2182 then simplify a comparison of that with zero. If A and B are both either
2183 a register or a CONST_INT, this can't help; testing for these cases will
2184 prevent infinite recursion here and speed things up.
2186 If CODE is an unsigned comparison, then we can never do this optimization,
2187 because it gives an incorrect result if the subtraction wraps around zero.
2188 ANSI C defines unsigned operations such that they never overflow, and
2189 thus such cases can not be ignored. */
2191 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2192 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
2193 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
2194 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2195 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2196 return simplify_relational_operation (signed_condition (code
),
2197 mode
, tem
, const0_rtx
);
2199 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2200 return const_true_rtx
;
2202 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2205 /* For modes without NaNs, if the two operands are equal, we know the
2206 result except if they have side-effects. */
2207 if (! HONOR_NANS (GET_MODE (trueop0
))
2208 && rtx_equal_p (trueop0
, trueop1
)
2209 && ! side_effects_p (trueop0
))
2210 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2212 /* If the operands are floating-point constants, see if we can fold
2214 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2215 && GET_CODE (trueop1
) == CONST_DOUBLE
2216 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2218 REAL_VALUE_TYPE d0
, d1
;
2220 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2221 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2223 /* Comparisons are unordered iff at least one of the values is NaN. */
2224 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2234 return const_true_rtx
;
2247 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2248 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2249 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2252 /* Otherwise, see if the operands are both integers. */
2253 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2254 && (GET_CODE (trueop0
) == CONST_DOUBLE
2255 || GET_CODE (trueop0
) == CONST_INT
)
2256 && (GET_CODE (trueop1
) == CONST_DOUBLE
2257 || GET_CODE (trueop1
) == CONST_INT
))
2259 int width
= GET_MODE_BITSIZE (mode
);
2260 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2261 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2263 /* Get the two words comprising each integer constant. */
2264 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2266 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2267 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2271 l0u
= l0s
= INTVAL (trueop0
);
2272 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2275 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2277 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2278 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2282 l1u
= l1s
= INTVAL (trueop1
);
2283 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2286 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2287 we have to sign or zero-extend the values. */
2288 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2290 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2291 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2293 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2294 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2296 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2297 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2299 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2300 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2302 equal
= (h0u
== h1u
&& l0u
== l1u
);
2303 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2304 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2305 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2306 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2309 /* Otherwise, there are some code-specific tests we can make. */
2315 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2320 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2321 return const_true_rtx
;
2325 /* Unsigned values are never negative. */
2326 if (trueop1
== const0_rtx
)
2327 return const_true_rtx
;
2331 if (trueop1
== const0_rtx
)
2336 /* Unsigned values are never greater than the largest
2338 if (GET_CODE (trueop1
) == CONST_INT
2339 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2340 && INTEGRAL_MODE_P (mode
))
2341 return const_true_rtx
;
2345 if (GET_CODE (trueop1
) == CONST_INT
2346 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2347 && INTEGRAL_MODE_P (mode
))
2352 /* Optimize abs(x) < 0.0. */
2353 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
2355 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2357 if (GET_CODE (tem
) == ABS
)
2363 /* Optimize abs(x) >= 0.0. */
2364 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
2366 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2368 if (GET_CODE (tem
) == ABS
)
2369 return const_true_rtx
;
2374 /* Optimize ! (abs(x) < 0.0). */
2375 if (trueop1
== CONST0_RTX (mode
))
2377 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2379 if (GET_CODE (tem
) == ABS
)
2380 return const_true_rtx
;
2391 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2397 return equal
? const_true_rtx
: const0_rtx
;
2400 return ! equal
? const_true_rtx
: const0_rtx
;
2403 return op0lt
? const_true_rtx
: const0_rtx
;
2406 return op1lt
? const_true_rtx
: const0_rtx
;
2408 return op0ltu
? const_true_rtx
: const0_rtx
;
2410 return op1ltu
? const_true_rtx
: const0_rtx
;
2413 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2416 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2418 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2420 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2422 return const_true_rtx
;
2430 /* Simplify CODE, an operation with result mode MODE and three operands,
2431 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2432 a constant. Return 0 if no simplifications is possible. */
2435 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
2436 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
2439 unsigned int width
= GET_MODE_BITSIZE (mode
);
2441 /* VOIDmode means "infinite" precision. */
2443 width
= HOST_BITS_PER_WIDE_INT
;
2449 if (GET_CODE (op0
) == CONST_INT
2450 && GET_CODE (op1
) == CONST_INT
2451 && GET_CODE (op2
) == CONST_INT
2452 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2453 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2455 /* Extracting a bit-field from a constant */
2456 HOST_WIDE_INT val
= INTVAL (op0
);
2458 if (BITS_BIG_ENDIAN
)
2459 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2460 - INTVAL (op2
) - INTVAL (op1
));
2462 val
>>= INTVAL (op2
);
2464 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2466 /* First zero-extend. */
2467 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2468 /* If desired, propagate sign bit. */
2469 if (code
== SIGN_EXTRACT
2470 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2471 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2474 /* Clear the bits that don't belong in our mode,
2475 unless they and our sign bit are all one.
2476 So we get either a reasonable negative value or a reasonable
2477 unsigned value for this mode. */
2478 if (width
< HOST_BITS_PER_WIDE_INT
2479 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2480 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2481 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2483 return GEN_INT (val
);
2488 if (GET_CODE (op0
) == CONST_INT
)
2489 return op0
!= const0_rtx
? op1
: op2
;
2491 /* Convert a == b ? b : a to "a". */
2492 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
2493 && !HONOR_NANS (mode
)
2494 && rtx_equal_p (XEXP (op0
, 0), op1
)
2495 && rtx_equal_p (XEXP (op0
, 1), op2
))
2497 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
2498 && !HONOR_NANS (mode
)
2499 && rtx_equal_p (XEXP (op0
, 1), op1
)
2500 && rtx_equal_p (XEXP (op0
, 0), op2
))
2502 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2504 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2505 ? GET_MODE (XEXP (op0
, 1))
2506 : GET_MODE (XEXP (op0
, 0)));
2508 if (cmp_mode
== VOIDmode
)
2509 cmp_mode
= op0_mode
;
2510 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2511 XEXP (op0
, 0), XEXP (op0
, 1));
2513 /* See if any simplifications were possible. */
2514 if (temp
== const0_rtx
)
2516 else if (temp
== const1_rtx
)
2521 /* Look for happy constants in op1 and op2. */
2522 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2524 HOST_WIDE_INT t
= INTVAL (op1
);
2525 HOST_WIDE_INT f
= INTVAL (op2
);
2527 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2528 code
= GET_CODE (op0
);
2529 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2532 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2540 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2545 if (GET_MODE (op0
) != mode
2546 || GET_MODE (op1
) != mode
2547 || !VECTOR_MODE_P (mode
))
2549 op2
= avoid_constant_pool_reference (op2
);
2550 if (GET_CODE (op2
) == CONST_INT
)
2552 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2553 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2554 int mask
= (1 << n_elts
) - 1;
2556 if (!(INTVAL (op2
) & mask
))
2558 if ((INTVAL (op2
) & mask
) == mask
)
2561 op0
= avoid_constant_pool_reference (op0
);
2562 op1
= avoid_constant_pool_reference (op1
);
2563 if (GET_CODE (op0
) == CONST_VECTOR
2564 && GET_CODE (op1
) == CONST_VECTOR
)
2566 rtvec v
= rtvec_alloc (n_elts
);
2569 for (i
= 0; i
< n_elts
; i
++)
2570 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
2571 ? CONST_VECTOR_ELT (op0
, i
)
2572 : CONST_VECTOR_ELT (op1
, i
));
2573 return gen_rtx_CONST_VECTOR (mode
, v
);
2585 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2586 Return 0 if no simplifications is possible. */
2588 simplify_subreg (enum machine_mode outermode
, rtx op
,
2589 enum machine_mode innermode
, unsigned int byte
)
2591 /* Little bit of sanity checking. */
2592 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2593 || innermode
== BLKmode
|| outermode
== BLKmode
)
2596 if (GET_MODE (op
) != innermode
2597 && GET_MODE (op
) != VOIDmode
)
2600 if (byte
% GET_MODE_SIZE (outermode
)
2601 || byte
>= GET_MODE_SIZE (innermode
))
2604 if (outermode
== innermode
&& !byte
)
2607 /* Simplify subregs of vector constants. */
2608 if (GET_CODE (op
) == CONST_VECTOR
)
2610 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (innermode
));
2611 const unsigned int offset
= byte
/ elt_size
;
2614 if (GET_MODE_INNER (innermode
) == outermode
)
2616 elt
= CONST_VECTOR_ELT (op
, offset
);
2618 /* ?? We probably don't need this copy_rtx because constants
2619 can be shared. ?? */
2621 return copy_rtx (elt
);
2623 else if (GET_MODE_INNER (innermode
) == GET_MODE_INNER (outermode
)
2624 && GET_MODE_SIZE (innermode
) > GET_MODE_SIZE (outermode
))
2626 return (gen_rtx_CONST_VECTOR
2628 gen_rtvec_v (GET_MODE_NUNITS (outermode
),
2629 &CONST_VECTOR_ELT (op
, offset
))));
2631 else if (GET_MODE_CLASS (outermode
) == MODE_INT
2632 && (GET_MODE_SIZE (outermode
) % elt_size
== 0))
2634 /* This happens when the target register size is smaller then
2635 the vector mode, and we synthesize operations with vectors
2636 of elements that are smaller than the register size. */
2637 HOST_WIDE_INT sum
= 0, high
= 0;
2638 unsigned n_elts
= (GET_MODE_SIZE (outermode
) / elt_size
);
2639 unsigned i
= BYTES_BIG_ENDIAN
? offset
: offset
+ n_elts
- 1;
2640 unsigned step
= BYTES_BIG_ENDIAN
? 1 : -1;
2641 int shift
= BITS_PER_UNIT
* elt_size
;
2642 unsigned HOST_WIDE_INT unit_mask
;
2644 unit_mask
= (unsigned HOST_WIDE_INT
) -1
2645 >> (sizeof (HOST_WIDE_INT
) * BITS_PER_UNIT
- shift
);
2647 for (; n_elts
--; i
+= step
)
2649 elt
= CONST_VECTOR_ELT (op
, i
);
2650 if (GET_CODE (elt
) == CONST_DOUBLE
2651 && GET_MODE_CLASS (GET_MODE (elt
)) == MODE_FLOAT
)
2653 elt
= gen_lowpart_common (int_mode_for_mode (GET_MODE (elt
)),
2658 if (GET_CODE (elt
) != CONST_INT
)
2660 /* Avoid overflow. */
2661 if (high
>> (HOST_BITS_PER_WIDE_INT
- shift
))
2663 high
= high
<< shift
| sum
>> (HOST_BITS_PER_WIDE_INT
- shift
);
2664 sum
= (sum
<< shift
) + (INTVAL (elt
) & unit_mask
);
2666 if (GET_MODE_BITSIZE (outermode
) <= HOST_BITS_PER_WIDE_INT
)
2667 return GEN_INT (trunc_int_for_mode (sum
, outermode
));
2668 else if (GET_MODE_BITSIZE (outermode
) == 2* HOST_BITS_PER_WIDE_INT
)
2669 return immed_double_const (sum
, high
, outermode
);
2673 else if (GET_MODE_CLASS (outermode
) == MODE_INT
2674 && (elt_size
% GET_MODE_SIZE (outermode
) == 0))
2676 enum machine_mode new_mode
2677 = int_mode_for_mode (GET_MODE_INNER (innermode
));
2678 int subbyte
= byte
% elt_size
;
2680 op
= simplify_subreg (new_mode
, op
, innermode
, byte
- subbyte
);
2683 return simplify_subreg (outermode
, op
, new_mode
, subbyte
);
2685 else if (GET_MODE_CLASS (outermode
) == MODE_INT
)
2686 /* This shouldn't happen, but let's not do anything stupid. */
2690 /* Attempt to simplify constant to non-SUBREG expression. */
2691 if (CONSTANT_P (op
))
2694 unsigned HOST_WIDE_INT val
= 0;
2696 if (VECTOR_MODE_P (outermode
))
2698 /* Construct a CONST_VECTOR from individual subregs. */
2699 enum machine_mode submode
= GET_MODE_INNER (outermode
);
2700 int subsize
= GET_MODE_UNIT_SIZE (outermode
);
2701 int i
, elts
= GET_MODE_NUNITS (outermode
);
2702 rtvec v
= rtvec_alloc (elts
);
2705 for (i
= 0; i
< elts
; i
++, byte
+= subsize
)
2707 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2708 /* ??? It would be nice if we could actually make such subregs
2709 on targets that allow such relocations. */
2710 if (byte
>= GET_MODE_SIZE (innermode
))
2711 elt
= CONST0_RTX (submode
);
2713 elt
= simplify_subreg (submode
, op
, innermode
, byte
);
2716 RTVEC_ELT (v
, i
) = elt
;
2718 return gen_rtx_CONST_VECTOR (outermode
, v
);
2721 /* ??? This code is partly redundant with code below, but can handle
2722 the subregs of floats and similar corner cases.
2723 Later it we should move all simplification code here and rewrite
2724 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2725 using SIMPLIFY_SUBREG. */
2726 if (subreg_lowpart_offset (outermode
, innermode
) == byte
2727 && GET_CODE (op
) != CONST_VECTOR
)
2729 rtx
new = gen_lowpart_if_possible (outermode
, op
);
2734 /* Similar comment as above apply here. */
2735 if (GET_MODE_SIZE (outermode
) == UNITS_PER_WORD
2736 && GET_MODE_SIZE (innermode
) > UNITS_PER_WORD
2737 && GET_MODE_CLASS (outermode
) == MODE_INT
)
2739 rtx
new = constant_subword (op
,
2740 (byte
/ UNITS_PER_WORD
),
2746 if (GET_MODE_CLASS (outermode
) != MODE_INT
2747 && GET_MODE_CLASS (outermode
) != MODE_CC
)
2749 enum machine_mode new_mode
= int_mode_for_mode (outermode
);
2751 if (new_mode
!= innermode
|| byte
!= 0)
2753 op
= simplify_subreg (new_mode
, op
, innermode
, byte
);
2756 return simplify_subreg (outermode
, op
, new_mode
, 0);
2760 offset
= byte
* BITS_PER_UNIT
;
2761 switch (GET_CODE (op
))
2764 if (GET_MODE (op
) != VOIDmode
)
2767 /* We can't handle this case yet. */
2768 if (GET_MODE_BITSIZE (outermode
) >= HOST_BITS_PER_WIDE_INT
)
2771 part
= offset
>= HOST_BITS_PER_WIDE_INT
;
2772 if ((BITS_PER_WORD
> HOST_BITS_PER_WIDE_INT
2773 && BYTES_BIG_ENDIAN
)
2774 || (BITS_PER_WORD
<= HOST_BITS_PER_WIDE_INT
2775 && WORDS_BIG_ENDIAN
))
2777 val
= part
? CONST_DOUBLE_HIGH (op
) : CONST_DOUBLE_LOW (op
);
2778 offset
%= HOST_BITS_PER_WIDE_INT
;
2780 /* We've already picked the word we want from a double, so
2781 pretend this is actually an integer. */
2782 innermode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
2786 if (GET_CODE (op
) == CONST_INT
)
2789 /* We don't handle synthesizing of non-integral constants yet. */
2790 if (GET_MODE_CLASS (outermode
) != MODE_INT
)
2793 if (BYTES_BIG_ENDIAN
|| WORDS_BIG_ENDIAN
)
2795 if (WORDS_BIG_ENDIAN
)
2796 offset
= (GET_MODE_BITSIZE (innermode
)
2797 - GET_MODE_BITSIZE (outermode
) - offset
);
2798 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
2799 && GET_MODE_SIZE (outermode
) < UNITS_PER_WORD
)
2800 offset
= (offset
+ BITS_PER_WORD
- GET_MODE_BITSIZE (outermode
)
2801 - 2 * (offset
% BITS_PER_WORD
));
2804 if (offset
>= HOST_BITS_PER_WIDE_INT
)
2805 return ((HOST_WIDE_INT
) val
< 0) ? constm1_rtx
: const0_rtx
;
2809 if (GET_MODE_BITSIZE (outermode
) < HOST_BITS_PER_WIDE_INT
)
2810 val
= trunc_int_for_mode (val
, outermode
);
2811 return GEN_INT (val
);
2818 /* Changing mode twice with SUBREG => just change it once,
2819 or not at all if changing back op starting mode. */
2820 if (GET_CODE (op
) == SUBREG
)
2822 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
2823 int final_offset
= byte
+ SUBREG_BYTE (op
);
2826 if (outermode
== innermostmode
2827 && byte
== 0 && SUBREG_BYTE (op
) == 0)
2828 return SUBREG_REG (op
);
2830 /* The SUBREG_BYTE represents offset, as if the value were stored
2831 in memory. Irritating exception is paradoxical subreg, where
2832 we define SUBREG_BYTE to be 0. On big endian machines, this
2833 value should be negative. For a moment, undo this exception. */
2834 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
2836 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
2837 if (WORDS_BIG_ENDIAN
)
2838 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2839 if (BYTES_BIG_ENDIAN
)
2840 final_offset
+= difference
% UNITS_PER_WORD
;
2842 if (SUBREG_BYTE (op
) == 0
2843 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
2845 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
2846 if (WORDS_BIG_ENDIAN
)
2847 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2848 if (BYTES_BIG_ENDIAN
)
2849 final_offset
+= difference
% UNITS_PER_WORD
;
2852 /* See whether resulting subreg will be paradoxical. */
2853 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
2855 /* In nonparadoxical subregs we can't handle negative offsets. */
2856 if (final_offset
< 0)
2858 /* Bail out in case resulting subreg would be incorrect. */
2859 if (final_offset
% GET_MODE_SIZE (outermode
)
2860 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
2866 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
2868 /* In paradoxical subreg, see if we are still looking on lower part.
2869 If so, our SUBREG_BYTE will be 0. */
2870 if (WORDS_BIG_ENDIAN
)
2871 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2872 if (BYTES_BIG_ENDIAN
)
2873 offset
+= difference
% UNITS_PER_WORD
;
2874 if (offset
== final_offset
)
2880 /* Recurse for further possible simplifications. */
2881 new = simplify_subreg (outermode
, SUBREG_REG (op
),
2882 GET_MODE (SUBREG_REG (op
)),
2886 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
2889 /* SUBREG of a hard register => just change the register number
2890 and/or mode. If the hard register is not valid in that mode,
2891 suppress this simplification. If the hard register is the stack,
2892 frame, or argument pointer, leave this as a SUBREG. */
2895 && (! REG_FUNCTION_VALUE_P (op
)
2896 || ! rtx_equal_function_value_matters
)
2897 && REGNO (op
) < FIRST_PSEUDO_REGISTER
2898 #ifdef CANNOT_CHANGE_MODE_CLASS
2899 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
2900 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
2901 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
2903 && ((reload_completed
&& !frame_pointer_needed
)
2904 || (REGNO (op
) != FRAME_POINTER_REGNUM
2905 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2906 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
2909 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2910 && REGNO (op
) != ARG_POINTER_REGNUM
2912 && REGNO (op
) != STACK_POINTER_REGNUM
)
2914 int final_regno
= subreg_hard_regno (gen_rtx_SUBREG (outermode
, op
, byte
),
2917 /* ??? We do allow it if the current REG is not valid for
2918 its mode. This is a kludge to work around how float/complex
2919 arguments are passed on 32-bit SPARC and should be fixed. */
2920 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
2921 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
2923 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
2925 /* Propagate original regno. We don't have any way to specify
2926 the offset inside original regno, so do so only for lowpart.
2927 The information is used only by alias analysis that can not
2928 grog partial register anyway. */
2930 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
2931 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
2936 /* If we have a SUBREG of a register that we are replacing and we are
2937 replacing it with a MEM, make a new MEM and try replacing the
2938 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2939 or if we would be widening it. */
2941 if (GET_CODE (op
) == MEM
2942 && ! mode_dependent_address_p (XEXP (op
, 0))
2943 /* Allow splitting of volatile memory references in case we don't
2944 have instruction to move the whole thing. */
2945 && (! MEM_VOLATILE_P (op
)
2946 || ! have_insn_for (SET
, innermode
))
2947 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
2948 return adjust_address_nv (op
, outermode
, byte
);
2950 /* Handle complex values represented as CONCAT
2951 of real and imaginary part. */
2952 if (GET_CODE (op
) == CONCAT
)
2954 int is_realpart
= byte
< GET_MODE_UNIT_SIZE (innermode
);
2955 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
2956 unsigned int final_offset
;
2959 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
2960 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
2963 /* We can at least simplify it by referring directly to the relevant part. */
2964 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
2969 /* Make a SUBREG operation or equivalent if it folds. */
2972 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
2973 enum machine_mode innermode
, unsigned int byte
)
2976 /* Little bit of sanity checking. */
2977 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2978 || innermode
== BLKmode
|| outermode
== BLKmode
)
2981 if (GET_MODE (op
) != innermode
2982 && GET_MODE (op
) != VOIDmode
)
2985 if (byte
% GET_MODE_SIZE (outermode
)
2986 || byte
>= GET_MODE_SIZE (innermode
))
2989 if (GET_CODE (op
) == QUEUED
)
2992 new = simplify_subreg (outermode
, op
, innermode
, byte
);
2996 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
2999 return gen_rtx_SUBREG (outermode
, op
, byte
);
3001 /* Simplify X, an rtx expression.
3003 Return the simplified expression or NULL if no simplifications
3006 This is the preferred entry point into the simplification routines;
3007 however, we still allow passes to call the more specific routines.
3009 Right now GCC has three (yes, three) major bodies of RTL simplification
3010 code that need to be unified.
3012 1. fold_rtx in cse.c. This code uses various CSE specific
3013 information to aid in RTL simplification.
3015 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3016 it uses combine specific information to aid in RTL
3019 3. The routines in this file.
3022 Long term we want to only have one body of simplification code; to
3023 get to that state I recommend the following steps:
3025 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3026 which are not pass dependent state into these routines.
3028 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3029 use this routine whenever possible.
3031 3. Allow for pass dependent state to be provided to these
3032 routines and add simplifications based on the pass dependent
3033 state. Remove code from cse.c & combine.c that becomes
3036 It will take time, but ultimately the compiler will be easier to
3037 maintain and improve. It's totally silly that when we add a
3038 simplification that it needs to be added to 4 places (3 for RTL
3039 simplification and 1 for tree simplification. */
3042 simplify_rtx (rtx x
)
3044 enum rtx_code code
= GET_CODE (x
);
3045 enum machine_mode mode
= GET_MODE (x
);
3048 switch (GET_RTX_CLASS (code
))
3051 return simplify_unary_operation (code
, mode
,
3052 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3054 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3055 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
3057 /* ... fall through ... */
3060 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3064 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3065 XEXP (x
, 0), XEXP (x
, 1),
3069 temp
= simplify_relational_operation (code
,
3070 ((GET_MODE (XEXP (x
, 0))
3072 ? GET_MODE (XEXP (x
, 0))
3073 : GET_MODE (XEXP (x
, 1))),
3074 XEXP (x
, 0), XEXP (x
, 1));
3075 #ifdef FLOAT_STORE_FLAG_VALUE
3076 if (temp
!= 0 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
3078 if (temp
== const0_rtx
)
3079 temp
= CONST0_RTX (mode
);
3081 temp
= CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode
),
3089 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
3090 GET_MODE (SUBREG_REG (x
)),
3092 if (code
== CONSTANT_P_RTX
)
3094 if (CONSTANT_P (XEXP (x
, 0)))
3102 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3103 if (GET_CODE (XEXP (x
, 0)) == HIGH
3104 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))