1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
56 static bool associative_constant_p (rtx
);
57 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 /* Negate a CONST_INT rtx, truncating (because a conversion from a
61 maximally negative number can overflow). */
63 neg_const_int (enum machine_mode mode
, rtx i
)
65 return gen_int_mode (- INTVAL (i
), mode
);
69 /* Make a binary operation by properly ordering the operands and
70 seeing if the expression folds. */
73 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
78 /* Put complex operands first and constants second if commutative. */
79 if (GET_RTX_CLASS (code
) == 'c'
80 && swap_commutative_operands_p (op0
, op1
))
81 tem
= op0
, op0
= op1
, op1
= tem
;
83 /* If this simplifies, do it. */
84 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
88 /* Handle addition and subtraction specially. Otherwise, just form
91 if (code
== PLUS
|| code
== MINUS
)
93 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
98 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
101 /* If X is a MEM referencing the constant pool, return the real value.
102 Otherwise return X. */
104 avoid_constant_pool_reference (rtx x
)
107 enum machine_mode cmode
;
109 switch (GET_CODE (x
))
115 /* Handle float extensions of constant pool references. */
117 c
= avoid_constant_pool_reference (tmp
);
118 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
122 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
123 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
133 /* Call target hook to avoid the effects of -fpic etc.... */
134 addr
= (*targetm
.delegitimize_address
) (addr
);
136 if (GET_CODE (addr
) == LO_SUM
)
137 addr
= XEXP (addr
, 1);
139 if (GET_CODE (addr
) != SYMBOL_REF
140 || ! CONSTANT_POOL_ADDRESS_P (addr
))
143 c
= get_pool_constant (addr
);
144 cmode
= get_pool_mode (addr
);
146 /* If we're accessing the constant in a different mode than it was
147 originally stored, attempt to fix that up via subreg simplifications.
148 If that fails we have no choice but to return the original memory. */
149 if (cmode
!= GET_MODE (x
))
151 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
158 /* Make a unary operation by first seeing if it folds and otherwise making
159 the specified operation. */
162 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
163 enum machine_mode op_mode
)
167 /* If this simplifies, use it. */
168 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
171 return gen_rtx_fmt_e (code
, mode
, op
);
174 /* Likewise for ternary operations. */
177 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
178 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
182 /* If this simplifies, use it. */
183 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
187 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
190 /* Likewise, for relational operations.
191 CMP_MODE specifies mode comparison is done in.
195 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
196 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
200 if ((tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
)) != 0)
203 /* For the following tests, ensure const0_rtx is op1. */
204 if (op0
== const0_rtx
&& swap_commutative_operands_p (op0
, op1
))
205 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
207 /* If op0 is a compare, extract the comparison arguments from it. */
208 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
209 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
211 /* If op0 is a comparison, extract the comparison arguments form it. */
212 if (code
== NE
&& op1
== const0_rtx
213 && GET_RTX_CLASS (GET_CODE (op0
)) == '<')
215 else if (code
== EQ
&& op1
== const0_rtx
)
217 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
218 enum rtx_code
new = reversed_comparison_code (op0
, NULL_RTX
);
228 /* Put complex operands first and constants second. */
229 if (swap_commutative_operands_p (op0
, op1
))
230 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
232 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
235 /* Replace all occurrences of OLD in X with NEW and try to simplify the
236 resulting RTX. Return a new RTX which is as simplified as possible. */
239 simplify_replace_rtx (rtx x
, rtx old
, rtx
new)
241 enum rtx_code code
= GET_CODE (x
);
242 enum machine_mode mode
= GET_MODE (x
);
244 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
245 to build a new expression substituting recursively. If we can't do
246 anything, return our input. */
251 switch (GET_RTX_CLASS (code
))
255 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
256 rtx op
= (XEXP (x
, 0) == old
257 ? new : simplify_replace_rtx (XEXP (x
, 0), old
, new));
259 return simplify_gen_unary (code
, mode
, op
, op_mode
);
265 simplify_gen_binary (code
, mode
,
266 simplify_replace_rtx (XEXP (x
, 0), old
, new),
267 simplify_replace_rtx (XEXP (x
, 1), old
, new));
270 enum machine_mode op_mode
= (GET_MODE (XEXP (x
, 0)) != VOIDmode
271 ? GET_MODE (XEXP (x
, 0))
272 : GET_MODE (XEXP (x
, 1)));
273 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
274 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
275 rtx temp
= simplify_gen_relational (code
, mode
,
278 : GET_MODE (op0
) != VOIDmode
282 #ifdef FLOAT_STORE_FLAG_VALUE
283 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
285 if (temp
== const0_rtx
)
286 temp
= CONST0_RTX (mode
);
287 else if (temp
== const_true_rtx
)
288 temp
= CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode
),
298 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
299 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
302 simplify_gen_ternary (code
, mode
,
307 simplify_replace_rtx (XEXP (x
, 1), old
, new),
308 simplify_replace_rtx (XEXP (x
, 2), old
, new));
312 /* The only case we try to handle is a SUBREG. */
316 exp
= simplify_gen_subreg (GET_MODE (x
),
317 simplify_replace_rtx (SUBREG_REG (x
),
319 GET_MODE (SUBREG_REG (x
)),
328 return replace_equiv_address_nv (x
,
329 simplify_replace_rtx (XEXP (x
, 0),
331 else if (code
== LO_SUM
)
333 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
334 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
336 /* (lo_sum (high x) x) -> x */
337 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
340 return gen_rtx_LO_SUM (mode
, op0
, op1
);
342 else if (code
== REG
)
344 if (REG_P (old
) && REGNO (x
) == REGNO (old
))
356 /* Try to simplify a unary operation CODE whose output mode is to be
357 MODE with input operand OP whose mode was originally OP_MODE.
358 Return zero if no simplification can be made. */
360 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
361 rtx op
, enum machine_mode op_mode
)
363 unsigned int width
= GET_MODE_BITSIZE (mode
);
364 rtx trueop
= avoid_constant_pool_reference (op
);
366 if (code
== VEC_DUPLICATE
)
368 if (!VECTOR_MODE_P (mode
))
370 if (GET_MODE (trueop
) != VOIDmode
371 && !VECTOR_MODE_P (GET_MODE (trueop
))
372 && GET_MODE_INNER (mode
) != GET_MODE (trueop
))
374 if (GET_MODE (trueop
) != VOIDmode
375 && VECTOR_MODE_P (GET_MODE (trueop
))
376 && GET_MODE_INNER (mode
) != GET_MODE_INNER (GET_MODE (trueop
)))
378 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
379 || GET_CODE (trueop
) == CONST_VECTOR
)
381 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
382 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
383 rtvec v
= rtvec_alloc (n_elts
);
386 if (GET_CODE (trueop
) != CONST_VECTOR
)
387 for (i
= 0; i
< n_elts
; i
++)
388 RTVEC_ELT (v
, i
) = trueop
;
391 enum machine_mode inmode
= GET_MODE (trueop
);
392 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
393 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
395 if (in_n_elts
>= n_elts
|| n_elts
% in_n_elts
)
397 for (i
= 0; i
< n_elts
; i
++)
398 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
400 return gen_rtx_CONST_VECTOR (mode
, v
);
404 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
406 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
407 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
408 enum machine_mode opmode
= GET_MODE (trueop
);
409 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
410 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
411 rtvec v
= rtvec_alloc (n_elts
);
414 if (op_n_elts
!= n_elts
)
417 for (i
= 0; i
< n_elts
; i
++)
419 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
420 CONST_VECTOR_ELT (trueop
, i
),
421 GET_MODE_INNER (opmode
));
424 RTVEC_ELT (v
, i
) = x
;
426 return gen_rtx_CONST_VECTOR (mode
, v
);
429 /* The order of these tests is critical so that, for example, we don't
430 check the wrong mode (input vs. output) for a conversion operation,
431 such as FIX. At some point, this should be simplified. */
433 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
434 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
436 HOST_WIDE_INT hv
, lv
;
439 if (GET_CODE (trueop
) == CONST_INT
)
440 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
442 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
444 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
445 d
= real_value_truncate (mode
, d
);
446 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
448 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
449 && (GET_CODE (trueop
) == CONST_DOUBLE
450 || GET_CODE (trueop
) == CONST_INT
))
452 HOST_WIDE_INT hv
, lv
;
455 if (GET_CODE (trueop
) == CONST_INT
)
456 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
458 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
460 if (op_mode
== VOIDmode
)
462 /* We don't know how to interpret negative-looking numbers in
463 this case, so don't try to fold those. */
467 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
470 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
472 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
473 d
= real_value_truncate (mode
, d
);
474 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
477 if (GET_CODE (trueop
) == CONST_INT
478 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
480 HOST_WIDE_INT arg0
= INTVAL (trueop
);
494 val
= (arg0
>= 0 ? arg0
: - arg0
);
498 /* Don't use ffs here. Instead, get low order bit and then its
499 number. If arg0 is zero, this will return 0, as desired. */
500 arg0
&= GET_MODE_MASK (mode
);
501 val
= exact_log2 (arg0
& (- arg0
)) + 1;
505 arg0
&= GET_MODE_MASK (mode
);
506 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
509 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
513 arg0
&= GET_MODE_MASK (mode
);
516 /* Even if the value at zero is undefined, we have to come
517 up with some replacement. Seems good enough. */
518 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
519 val
= GET_MODE_BITSIZE (mode
);
522 val
= exact_log2 (arg0
& -arg0
);
526 arg0
&= GET_MODE_MASK (mode
);
529 val
++, arg0
&= arg0
- 1;
533 arg0
&= GET_MODE_MASK (mode
);
536 val
++, arg0
&= arg0
- 1;
545 /* When zero-extending a CONST_INT, we need to know its
547 if (op_mode
== VOIDmode
)
549 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
551 /* If we were really extending the mode,
552 we would have to distinguish between zero-extension
553 and sign-extension. */
554 if (width
!= GET_MODE_BITSIZE (op_mode
))
558 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
559 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
565 if (op_mode
== VOIDmode
)
567 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
569 /* If we were really extending the mode,
570 we would have to distinguish between zero-extension
571 and sign-extension. */
572 if (width
!= GET_MODE_BITSIZE (op_mode
))
576 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
579 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
581 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
582 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
599 val
= trunc_int_for_mode (val
, mode
);
601 return GEN_INT (val
);
604 /* We can do some operations on integer CONST_DOUBLEs. Also allow
605 for a DImode operation on a CONST_INT. */
606 else if (GET_MODE (trueop
) == VOIDmode
607 && width
<= HOST_BITS_PER_WIDE_INT
* 2
608 && (GET_CODE (trueop
) == CONST_DOUBLE
609 || GET_CODE (trueop
) == CONST_INT
))
611 unsigned HOST_WIDE_INT l1
, lv
;
612 HOST_WIDE_INT h1
, hv
;
614 if (GET_CODE (trueop
) == CONST_DOUBLE
)
615 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
617 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
627 neg_double (l1
, h1
, &lv
, &hv
);
632 neg_double (l1
, h1
, &lv
, &hv
);
644 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
647 lv
= exact_log2 (l1
& -l1
) + 1;
653 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
655 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
656 - HOST_BITS_PER_WIDE_INT
;
664 lv
= GET_MODE_BITSIZE (mode
);
666 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
669 lv
= exact_log2 (l1
& -l1
);
692 /* This is just a change-of-mode, so do nothing. */
697 if (op_mode
== VOIDmode
)
700 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
704 lv
= l1
& GET_MODE_MASK (op_mode
);
708 if (op_mode
== VOIDmode
709 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
713 lv
= l1
& GET_MODE_MASK (op_mode
);
714 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
715 && (lv
& ((HOST_WIDE_INT
) 1
716 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
717 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
719 hv
= HWI_SIGN_EXTEND (lv
);
730 return immed_double_const (lv
, hv
, mode
);
733 else if (GET_CODE (trueop
) == CONST_DOUBLE
734 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
736 REAL_VALUE_TYPE d
, t
;
737 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
742 if (HONOR_SNANS (mode
) && real_isnan (&d
))
744 real_sqrt (&t
, mode
, &d
);
748 d
= REAL_VALUE_ABS (d
);
751 d
= REAL_VALUE_NEGATE (d
);
754 d
= real_value_truncate (mode
, d
);
757 /* All this does is change the mode. */
760 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
766 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
769 else if (GET_CODE (trueop
) == CONST_DOUBLE
770 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
771 && GET_MODE_CLASS (mode
) == MODE_INT
772 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
776 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
779 case FIX
: i
= REAL_VALUE_FIX (d
); break;
780 case UNSIGNED_FIX
: i
= REAL_VALUE_UNSIGNED_FIX (d
); break;
784 return gen_int_mode (i
, mode
);
787 /* This was formerly used only for non-IEEE float.
788 eggert@twinsun.com says it is safe for IEEE also. */
791 enum rtx_code reversed
;
792 /* There are some simplifications we can do even if the operands
797 /* (not (not X)) == X. */
798 if (GET_CODE (op
) == NOT
)
801 /* (not (eq X Y)) == (ne X Y), etc. */
802 if (mode
== BImode
&& GET_RTX_CLASS (GET_CODE (op
)) == '<'
803 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
805 return gen_rtx_fmt_ee (reversed
,
806 op_mode
, XEXP (op
, 0), XEXP (op
, 1));
810 /* (neg (neg X)) == X. */
811 if (GET_CODE (op
) == NEG
)
816 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
817 becomes just the MINUS if its mode is MODE. This allows
818 folding switch statements on machines using casesi (such as
820 if (GET_CODE (op
) == TRUNCATE
821 && GET_MODE (XEXP (op
, 0)) == mode
822 && GET_CODE (XEXP (op
, 0)) == MINUS
823 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
824 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
827 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
828 if (! POINTERS_EXTEND_UNSIGNED
829 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
831 || (GET_CODE (op
) == SUBREG
832 && GET_CODE (SUBREG_REG (op
)) == REG
833 && REG_POINTER (SUBREG_REG (op
))
834 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
835 return convert_memory_address (Pmode
, op
);
839 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
841 if (POINTERS_EXTEND_UNSIGNED
> 0
842 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
844 || (GET_CODE (op
) == SUBREG
845 && GET_CODE (SUBREG_REG (op
)) == REG
846 && REG_POINTER (SUBREG_REG (op
))
847 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
848 return convert_memory_address (Pmode
, op
);
860 /* Subroutine of simplify_associative_operation. Return true if rtx OP
861 is a suitable integer or floating point immediate constant. */
863 associative_constant_p (rtx op
)
865 if (GET_CODE (op
) == CONST_INT
866 || GET_CODE (op
) == CONST_DOUBLE
)
868 op
= avoid_constant_pool_reference (op
);
869 return GET_CODE (op
) == CONST_INT
870 || GET_CODE (op
) == CONST_DOUBLE
;
873 /* Subroutine of simplify_binary_operation to simplify an associative
874 binary operation CODE with result mode MODE, operating on OP0 and OP1.
875 Return 0 if no simplification is possible. */
877 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
882 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
883 if (GET_CODE (op0
) == code
884 && associative_constant_p (op1
)
885 && associative_constant_p (XEXP (op0
, 1)))
887 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
890 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
893 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
894 if (GET_CODE (op0
) == code
895 && GET_CODE (op1
) == code
896 && associative_constant_p (XEXP (op0
, 1))
897 && associative_constant_p (XEXP (op1
, 1)))
899 rtx c
= simplify_binary_operation (code
, mode
,
900 XEXP (op0
, 1), XEXP (op1
, 1));
903 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
904 return simplify_gen_binary (code
, mode
, tem
, c
);
907 /* Canonicalize (x op c) op y as (x op y) op c. */
908 if (GET_CODE (op0
) == code
909 && associative_constant_p (XEXP (op0
, 1)))
911 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
912 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
915 /* Canonicalize x op (y op c) as (x op y) op c. */
916 if (GET_CODE (op1
) == code
917 && associative_constant_p (XEXP (op1
, 1)))
919 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
920 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
926 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
927 and OP1. Return 0 if no simplification is possible.
929 Don't use this for relational operations such as EQ or LT.
930 Use simplify_relational_operation instead. */
932 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
935 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
937 unsigned int width
= GET_MODE_BITSIZE (mode
);
939 rtx trueop0
= avoid_constant_pool_reference (op0
);
940 rtx trueop1
= avoid_constant_pool_reference (op1
);
942 /* Relational operations don't work here. We must know the mode
943 of the operands in order to do the comparison correctly.
944 Assuming a full word can give incorrect results.
945 Consider comparing 128 with -128 in QImode. */
947 if (GET_RTX_CLASS (code
) == '<')
950 /* Make sure the constant is second. */
951 if (GET_RTX_CLASS (code
) == 'c'
952 && swap_commutative_operands_p (trueop0
, trueop1
))
954 tem
= op0
, op0
= op1
, op1
= tem
;
955 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
958 if (VECTOR_MODE_P (mode
)
959 && GET_CODE (trueop0
) == CONST_VECTOR
960 && GET_CODE (trueop1
) == CONST_VECTOR
)
962 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
963 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
964 enum machine_mode op0mode
= GET_MODE (trueop0
);
965 int op0_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op0mode
));
966 unsigned op0_n_elts
= (GET_MODE_SIZE (op0mode
) / op0_elt_size
);
967 enum machine_mode op1mode
= GET_MODE (trueop1
);
968 int op1_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op1mode
));
969 unsigned op1_n_elts
= (GET_MODE_SIZE (op1mode
) / op1_elt_size
);
970 rtvec v
= rtvec_alloc (n_elts
);
973 if (op0_n_elts
!= n_elts
|| op1_n_elts
!= n_elts
)
976 for (i
= 0; i
< n_elts
; i
++)
978 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
979 CONST_VECTOR_ELT (trueop0
, i
),
980 CONST_VECTOR_ELT (trueop1
, i
));
983 RTVEC_ELT (v
, i
) = x
;
986 return gen_rtx_CONST_VECTOR (mode
, v
);
989 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
990 && GET_CODE (trueop0
) == CONST_DOUBLE
991 && GET_CODE (trueop1
) == CONST_DOUBLE
992 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
994 REAL_VALUE_TYPE f0
, f1
, value
;
996 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
997 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
998 f0
= real_value_truncate (mode
, f0
);
999 f1
= real_value_truncate (mode
, f1
);
1001 if (HONOR_SNANS (mode
)
1002 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
1006 && REAL_VALUES_EQUAL (f1
, dconst0
)
1007 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
1010 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
1012 value
= real_value_truncate (mode
, value
);
1013 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
1016 /* We can fold some multi-word operations. */
1017 if (GET_MODE_CLASS (mode
) == MODE_INT
1018 && width
== HOST_BITS_PER_WIDE_INT
* 2
1019 && (GET_CODE (trueop0
) == CONST_DOUBLE
1020 || GET_CODE (trueop0
) == CONST_INT
)
1021 && (GET_CODE (trueop1
) == CONST_DOUBLE
1022 || GET_CODE (trueop1
) == CONST_INT
))
1024 unsigned HOST_WIDE_INT l1
, l2
, lv
;
1025 HOST_WIDE_INT h1
, h2
, hv
;
1027 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1028 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
1030 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
1032 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1033 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
1035 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
1040 /* A - B == A + (-B). */
1041 neg_double (l2
, h2
, &lv
, &hv
);
1044 /* Fall through.... */
1047 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1051 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1054 case DIV
: case MOD
: case UDIV
: case UMOD
:
1055 /* We'd need to include tree.h to do this and it doesn't seem worth
1060 lv
= l1
& l2
, hv
= h1
& h2
;
1064 lv
= l1
| l2
, hv
= h1
| h2
;
1068 lv
= l1
^ l2
, hv
= h1
^ h2
;
1074 && ((unsigned HOST_WIDE_INT
) l1
1075 < (unsigned HOST_WIDE_INT
) l2
)))
1084 && ((unsigned HOST_WIDE_INT
) l1
1085 > (unsigned HOST_WIDE_INT
) l2
)))
1092 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1094 && ((unsigned HOST_WIDE_INT
) l1
1095 < (unsigned HOST_WIDE_INT
) l2
)))
1102 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1104 && ((unsigned HOST_WIDE_INT
) l1
1105 > (unsigned HOST_WIDE_INT
) l2
)))
1111 case LSHIFTRT
: case ASHIFTRT
:
1113 case ROTATE
: case ROTATERT
:
1114 #ifdef SHIFT_COUNT_TRUNCATED
1115 if (SHIFT_COUNT_TRUNCATED
)
1116 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1119 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1122 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1123 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1125 else if (code
== ASHIFT
)
1126 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1127 else if (code
== ROTATE
)
1128 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1129 else /* code == ROTATERT */
1130 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1137 return immed_double_const (lv
, hv
, mode
);
1140 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1141 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1143 /* Even if we can't compute a constant result,
1144 there are some cases worth simplifying. */
1149 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1150 when x is NaN, infinite, or finite and nonzero. They aren't
1151 when x is -0 and the rounding mode is not towards -infinity,
1152 since (-0) + 0 is then 0. */
1153 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1156 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1157 transformations are safe even for IEEE. */
1158 if (GET_CODE (op0
) == NEG
)
1159 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1160 else if (GET_CODE (op1
) == NEG
)
1161 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1163 /* (~a) + 1 -> -a */
1164 if (INTEGRAL_MODE_P (mode
)
1165 && GET_CODE (op0
) == NOT
1166 && trueop1
== const1_rtx
)
1167 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1169 /* Handle both-operands-constant cases. We can only add
1170 CONST_INTs to constants since the sum of relocatable symbols
1171 can't be handled by most assemblers. Don't add CONST_INT
1172 to CONST_INT since overflow won't be computed properly if wider
1173 than HOST_BITS_PER_WIDE_INT. */
1175 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1176 && GET_CODE (op1
) == CONST_INT
)
1177 return plus_constant (op0
, INTVAL (op1
));
1178 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1179 && GET_CODE (op0
) == CONST_INT
)
1180 return plus_constant (op1
, INTVAL (op0
));
1182 /* See if this is something like X * C - X or vice versa or
1183 if the multiplication is written as a shift. If so, we can
1184 distribute and make a new multiply, shift, or maybe just
1185 have X (if C is 2 in the example above). But don't make
1186 real multiply if we didn't have one before. */
1188 if (! FLOAT_MODE_P (mode
))
1190 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1191 rtx lhs
= op0
, rhs
= op1
;
1194 if (GET_CODE (lhs
) == NEG
)
1195 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1196 else if (GET_CODE (lhs
) == MULT
1197 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1199 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1202 else if (GET_CODE (lhs
) == ASHIFT
1203 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1204 && INTVAL (XEXP (lhs
, 1)) >= 0
1205 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1207 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1208 lhs
= XEXP (lhs
, 0);
1211 if (GET_CODE (rhs
) == NEG
)
1212 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1213 else if (GET_CODE (rhs
) == MULT
1214 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1216 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1219 else if (GET_CODE (rhs
) == ASHIFT
1220 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1221 && INTVAL (XEXP (rhs
, 1)) >= 0
1222 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1224 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1225 rhs
= XEXP (rhs
, 0);
1228 if (rtx_equal_p (lhs
, rhs
))
1230 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1231 GEN_INT (coeff0
+ coeff1
));
1232 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1236 /* If one of the operands is a PLUS or a MINUS, see if we can
1237 simplify this by the associative law.
1238 Don't use the associative law for floating point.
1239 The inaccuracy makes it nonassociative,
1240 and subtle programs can break if operations are associated. */
1242 if (INTEGRAL_MODE_P (mode
)
1243 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1244 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1245 || (GET_CODE (op0
) == CONST
1246 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1247 || (GET_CODE (op1
) == CONST
1248 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1249 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1252 /* Reassociate floating point addition only when the user
1253 specifies unsafe math optimizations. */
1254 if (FLOAT_MODE_P (mode
)
1255 && flag_unsafe_math_optimizations
)
1257 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1265 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1266 using cc0, in which case we want to leave it as a COMPARE
1267 so we can distinguish it from a register-register-copy.
1269 In IEEE floating point, x-0 is not the same as x. */
1271 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1272 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1273 && trueop1
== CONST0_RTX (mode
))
1277 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1278 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1279 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1280 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1282 rtx xop00
= XEXP (op0
, 0);
1283 rtx xop10
= XEXP (op1
, 0);
1286 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1288 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1289 && GET_MODE (xop00
) == GET_MODE (xop10
)
1290 && REGNO (xop00
) == REGNO (xop10
)
1291 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1292 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1299 /* We can't assume x-x is 0 even with non-IEEE floating point,
1300 but since it is zero except in very strange circumstances, we
1301 will treat it as zero with -funsafe-math-optimizations. */
1302 if (rtx_equal_p (trueop0
, trueop1
)
1303 && ! side_effects_p (op0
)
1304 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1305 return CONST0_RTX (mode
);
1307 /* Change subtraction from zero into negation. (0 - x) is the
1308 same as -x when x is NaN, infinite, or finite and nonzero.
1309 But if the mode has signed zeros, and does not round towards
1310 -infinity, then 0 - 0 is 0, not -0. */
1311 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1312 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1314 /* (-1 - a) is ~a. */
1315 if (trueop0
== constm1_rtx
)
1316 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1318 /* Subtracting 0 has no effect unless the mode has signed zeros
1319 and supports rounding towards -infinity. In such a case,
1321 if (!(HONOR_SIGNED_ZEROS (mode
)
1322 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1323 && trueop1
== CONST0_RTX (mode
))
1326 /* See if this is something like X * C - X or vice versa or
1327 if the multiplication is written as a shift. If so, we can
1328 distribute and make a new multiply, shift, or maybe just
1329 have X (if C is 2 in the example above). But don't make
1330 real multiply if we didn't have one before. */
1332 if (! FLOAT_MODE_P (mode
))
1334 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1335 rtx lhs
= op0
, rhs
= op1
;
1338 if (GET_CODE (lhs
) == NEG
)
1339 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1340 else if (GET_CODE (lhs
) == MULT
1341 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1343 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1346 else if (GET_CODE (lhs
) == ASHIFT
1347 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1348 && INTVAL (XEXP (lhs
, 1)) >= 0
1349 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1351 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1352 lhs
= XEXP (lhs
, 0);
1355 if (GET_CODE (rhs
) == NEG
)
1356 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1357 else if (GET_CODE (rhs
) == MULT
1358 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1360 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1363 else if (GET_CODE (rhs
) == ASHIFT
1364 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1365 && INTVAL (XEXP (rhs
, 1)) >= 0
1366 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1368 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1369 rhs
= XEXP (rhs
, 0);
1372 if (rtx_equal_p (lhs
, rhs
))
1374 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1375 GEN_INT (coeff0
- coeff1
));
1376 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1380 /* (a - (-b)) -> (a + b). True even for IEEE. */
1381 if (GET_CODE (op1
) == NEG
)
1382 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1384 /* If one of the operands is a PLUS or a MINUS, see if we can
1385 simplify this by the associative law.
1386 Don't use the associative law for floating point.
1387 The inaccuracy makes it nonassociative,
1388 and subtle programs can break if operations are associated. */
1390 if (INTEGRAL_MODE_P (mode
)
1391 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1392 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1393 || (GET_CODE (op0
) == CONST
1394 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1395 || (GET_CODE (op1
) == CONST
1396 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1397 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1400 /* Don't let a relocatable value get a negative coeff. */
1401 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1402 return simplify_gen_binary (PLUS
, mode
,
1404 neg_const_int (mode
, op1
));
1406 /* (x - (x & y)) -> (x & ~y) */
1407 if (GET_CODE (op1
) == AND
)
1409 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1411 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1412 GET_MODE (XEXP (op1
, 1)));
1413 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1415 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1417 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1418 GET_MODE (XEXP (op1
, 0)));
1419 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1425 if (trueop1
== constm1_rtx
)
1426 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1428 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1429 x is NaN, since x * 0 is then also NaN. Nor is it valid
1430 when the mode has signed zeros, since multiplying a negative
1431 number by 0 will give -0, not 0. */
1432 if (!HONOR_NANS (mode
)
1433 && !HONOR_SIGNED_ZEROS (mode
)
1434 && trueop1
== CONST0_RTX (mode
)
1435 && ! side_effects_p (op0
))
1438 /* In IEEE floating point, x*1 is not equivalent to x for
1440 if (!HONOR_SNANS (mode
)
1441 && trueop1
== CONST1_RTX (mode
))
1444 /* Convert multiply by constant power of two into shift unless
1445 we are still generating RTL. This test is a kludge. */
1446 if (GET_CODE (trueop1
) == CONST_INT
1447 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1448 /* If the mode is larger than the host word size, and the
1449 uppermost bit is set, then this isn't a power of two due
1450 to implicit sign extension. */
1451 && (width
<= HOST_BITS_PER_WIDE_INT
1452 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1453 && ! rtx_equal_function_value_matters
)
1454 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1456 /* x*2 is x+x and x*(-1) is -x */
1457 if (GET_CODE (trueop1
) == CONST_DOUBLE
1458 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1459 && GET_MODE (op0
) == mode
)
1462 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1464 if (REAL_VALUES_EQUAL (d
, dconst2
))
1465 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1467 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1468 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1471 /* Reassociate multiplication, but for floating point MULTs
1472 only when the user specifies unsafe math optimizations. */
1473 if (! FLOAT_MODE_P (mode
)
1474 || flag_unsafe_math_optimizations
)
1476 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1483 if (trueop1
== const0_rtx
)
1485 if (GET_CODE (trueop1
) == CONST_INT
1486 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1487 == GET_MODE_MASK (mode
)))
1489 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1491 /* A | (~A) -> -1 */
1492 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1493 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1494 && ! side_effects_p (op0
)
1495 && GET_MODE_CLASS (mode
) != MODE_CC
)
1497 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1503 if (trueop1
== const0_rtx
)
1505 if (GET_CODE (trueop1
) == CONST_INT
1506 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1507 == GET_MODE_MASK (mode
)))
1508 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1509 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1510 && GET_MODE_CLASS (mode
) != MODE_CC
)
1512 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1518 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1520 if (GET_CODE (trueop1
) == CONST_INT
1521 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1522 == GET_MODE_MASK (mode
)))
1524 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1525 && GET_MODE_CLASS (mode
) != MODE_CC
)
1528 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1529 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1530 && ! side_effects_p (op0
)
1531 && GET_MODE_CLASS (mode
) != MODE_CC
)
1533 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1539 /* Convert divide by power of two into shift (divide by 1 handled
1541 if (GET_CODE (trueop1
) == CONST_INT
1542 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1543 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (arg1
));
1545 /* Fall through.... */
1548 if (trueop1
== CONST1_RTX (mode
))
1550 /* On some platforms DIV uses narrower mode than its
1552 rtx x
= gen_lowpart_common (mode
, op0
);
1555 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1556 return gen_lowpart_SUBREG (mode
, op0
);
1561 /* Maybe change 0 / x to 0. This transformation isn't safe for
1562 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1563 Nor is it safe for modes with signed zeros, since dividing
1564 0 by a negative number gives -0, not 0. */
1565 if (!HONOR_NANS (mode
)
1566 && !HONOR_SIGNED_ZEROS (mode
)
1567 && trueop0
== CONST0_RTX (mode
)
1568 && ! side_effects_p (op1
))
1571 /* Change division by a constant into multiplication. Only do
1572 this with -funsafe-math-optimizations. */
1573 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1574 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1575 && trueop1
!= CONST0_RTX (mode
)
1576 && flag_unsafe_math_optimizations
)
1579 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1581 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1583 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1584 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1585 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
1591 /* Handle modulus by power of two (mod with 1 handled below). */
1592 if (GET_CODE (trueop1
) == CONST_INT
1593 && exact_log2 (INTVAL (trueop1
)) > 0)
1594 return simplify_gen_binary (AND
, mode
, op0
,
1595 GEN_INT (INTVAL (op1
) - 1));
1597 /* Fall through.... */
1600 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1601 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1608 /* Rotating ~0 always results in ~0. */
1609 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1610 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1611 && ! side_effects_p (op1
))
1614 /* Fall through.... */
1618 if (trueop1
== const0_rtx
)
1620 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1625 if (width
<= HOST_BITS_PER_WIDE_INT
1626 && GET_CODE (trueop1
) == CONST_INT
1627 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1628 && ! side_effects_p (op0
))
1630 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1632 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1638 if (width
<= HOST_BITS_PER_WIDE_INT
1639 && GET_CODE (trueop1
) == CONST_INT
1640 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1641 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1642 && ! side_effects_p (op0
))
1644 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1646 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1652 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1654 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1656 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1662 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1664 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1666 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1675 /* ??? There are simplifications that can be done. */
1679 if (!VECTOR_MODE_P (mode
))
1681 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1683 != GET_MODE_INNER (GET_MODE (trueop0
)))
1684 || GET_CODE (trueop1
) != PARALLEL
1685 || XVECLEN (trueop1
, 0) != 1
1686 || GET_CODE (XVECEXP (trueop1
, 0, 0)) != CONST_INT
)
1689 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1690 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP (trueop1
, 0, 0)));
1694 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1695 || (GET_MODE_INNER (mode
)
1696 != GET_MODE_INNER (GET_MODE (trueop0
)))
1697 || GET_CODE (trueop1
) != PARALLEL
)
1700 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1702 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1703 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1704 rtvec v
= rtvec_alloc (n_elts
);
1707 if (XVECLEN (trueop1
, 0) != (int) n_elts
)
1709 for (i
= 0; i
< n_elts
; i
++)
1711 rtx x
= XVECEXP (trueop1
, 0, i
);
1713 if (GET_CODE (x
) != CONST_INT
)
1715 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, INTVAL (x
));
1718 return gen_rtx_CONST_VECTOR (mode
, v
);
1724 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
1725 ? GET_MODE (trueop0
)
1726 : GET_MODE_INNER (mode
));
1727 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
1728 ? GET_MODE (trueop1
)
1729 : GET_MODE_INNER (mode
));
1731 if (!VECTOR_MODE_P (mode
)
1732 || (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
1733 != GET_MODE_SIZE (mode
)))
1736 if ((VECTOR_MODE_P (op0_mode
)
1737 && (GET_MODE_INNER (mode
)
1738 != GET_MODE_INNER (op0_mode
)))
1739 || (!VECTOR_MODE_P (op0_mode
)
1740 && GET_MODE_INNER (mode
) != op0_mode
))
1743 if ((VECTOR_MODE_P (op1_mode
)
1744 && (GET_MODE_INNER (mode
)
1745 != GET_MODE_INNER (op1_mode
)))
1746 || (!VECTOR_MODE_P (op1_mode
)
1747 && GET_MODE_INNER (mode
) != op1_mode
))
1750 if ((GET_CODE (trueop0
) == CONST_VECTOR
1751 || GET_CODE (trueop0
) == CONST_INT
1752 || GET_CODE (trueop0
) == CONST_DOUBLE
)
1753 && (GET_CODE (trueop1
) == CONST_VECTOR
1754 || GET_CODE (trueop1
) == CONST_INT
1755 || GET_CODE (trueop1
) == CONST_DOUBLE
))
1757 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1758 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1759 rtvec v
= rtvec_alloc (n_elts
);
1761 unsigned in_n_elts
= 1;
1763 if (VECTOR_MODE_P (op0_mode
))
1764 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
1765 for (i
= 0; i
< n_elts
; i
++)
1769 if (!VECTOR_MODE_P (op0_mode
))
1770 RTVEC_ELT (v
, i
) = trueop0
;
1772 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
1776 if (!VECTOR_MODE_P (op1_mode
))
1777 RTVEC_ELT (v
, i
) = trueop1
;
1779 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
1784 return gen_rtx_CONST_VECTOR (mode
, v
);
1796 /* Get the integer argument values in two forms:
1797 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1799 arg0
= INTVAL (trueop0
);
1800 arg1
= INTVAL (trueop1
);
1802 if (width
< HOST_BITS_PER_WIDE_INT
)
1804 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1805 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1808 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1809 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1812 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1813 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1821 /* Compute the value of the arithmetic. */
1826 val
= arg0s
+ arg1s
;
1830 val
= arg0s
- arg1s
;
1834 val
= arg0s
* arg1s
;
1839 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1842 val
= arg0s
/ arg1s
;
1847 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1850 val
= arg0s
% arg1s
;
1855 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1858 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1863 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1866 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1882 /* If shift count is undefined, don't fold it; let the machine do
1883 what it wants. But truncate it if the machine will do that. */
1887 #ifdef SHIFT_COUNT_TRUNCATED
1888 if (SHIFT_COUNT_TRUNCATED
)
1892 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1899 #ifdef SHIFT_COUNT_TRUNCATED
1900 if (SHIFT_COUNT_TRUNCATED
)
1904 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1911 #ifdef SHIFT_COUNT_TRUNCATED
1912 if (SHIFT_COUNT_TRUNCATED
)
1916 val
= arg0s
>> arg1
;
1918 /* Bootstrap compiler may not have sign extended the right shift.
1919 Manually extend the sign to insure bootstrap cc matches gcc. */
1920 if (arg0s
< 0 && arg1
> 0)
1921 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1930 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1931 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1939 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1940 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1944 /* Do nothing here. */
1948 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1952 val
= ((unsigned HOST_WIDE_INT
) arg0
1953 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1957 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1961 val
= ((unsigned HOST_WIDE_INT
) arg0
1962 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1969 /* ??? There are simplifications that can be done. */
1976 val
= trunc_int_for_mode (val
, mode
);
1978 return GEN_INT (val
);
1981 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1984 Rather than test for specific case, we do this by a brute-force method
1985 and do all possible simplifications until no more changes occur. Then
1986 we rebuild the operation.
1988 If FORCE is true, then always generate the rtx. This is used to
1989 canonicalize stuff emitted from simplify_gen_binary. Note that this
1990 can still fail if the rtx is too complex. It won't fail just because
1991 the result is not 'simpler' than the input, however. */
1993 struct simplify_plus_minus_op_data
2000 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2002 const struct simplify_plus_minus_op_data
*d1
= p1
;
2003 const struct simplify_plus_minus_op_data
*d2
= p2
;
2005 return (commutative_operand_precedence (d2
->op
)
2006 - commutative_operand_precedence (d1
->op
));
2010 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2013 struct simplify_plus_minus_op_data ops
[8];
2015 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2016 int first
, negate
, changed
;
2019 memset (ops
, 0, sizeof ops
);
2021 /* Set up the two operands and then expand them until nothing has been
2022 changed. If we run out of room in our array, give up; this should
2023 almost never happen. */
2028 ops
[1].neg
= (code
== MINUS
);
2034 for (i
= 0; i
< n_ops
; i
++)
2036 rtx this_op
= ops
[i
].op
;
2037 int this_neg
= ops
[i
].neg
;
2038 enum rtx_code this_code
= GET_CODE (this_op
);
2047 ops
[n_ops
].op
= XEXP (this_op
, 1);
2048 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2051 ops
[i
].op
= XEXP (this_op
, 0);
2057 ops
[i
].op
= XEXP (this_op
, 0);
2058 ops
[i
].neg
= ! this_neg
;
2064 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2065 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2066 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2068 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2069 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2070 ops
[n_ops
].neg
= this_neg
;
2078 /* ~a -> (-a - 1) */
2081 ops
[n_ops
].op
= constm1_rtx
;
2082 ops
[n_ops
++].neg
= this_neg
;
2083 ops
[i
].op
= XEXP (this_op
, 0);
2084 ops
[i
].neg
= !this_neg
;
2092 ops
[i
].op
= neg_const_int (mode
, this_op
);
2105 /* If we only have two operands, we can't do anything. */
2106 if (n_ops
<= 2 && !force
)
2109 /* Count the number of CONSTs we didn't split above. */
2110 for (i
= 0; i
< n_ops
; i
++)
2111 if (GET_CODE (ops
[i
].op
) == CONST
)
2114 /* Now simplify each pair of operands until nothing changes. The first
2115 time through just simplify constants against each other. */
2122 for (i
= 0; i
< n_ops
- 1; i
++)
2123 for (j
= i
+ 1; j
< n_ops
; j
++)
2125 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2126 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2128 if (lhs
!= 0 && rhs
!= 0
2129 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2131 enum rtx_code ncode
= PLUS
;
2137 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2139 else if (swap_commutative_operands_p (lhs
, rhs
))
2140 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2142 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2144 /* Reject "simplifications" that just wrap the two
2145 arguments in a CONST. Failure to do so can result
2146 in infinite recursion with simplify_binary_operation
2147 when it calls us to simplify CONST operations. */
2149 && ! (GET_CODE (tem
) == CONST
2150 && GET_CODE (XEXP (tem
, 0)) == ncode
2151 && XEXP (XEXP (tem
, 0), 0) == lhs
2152 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2153 /* Don't allow -x + -1 -> ~x simplifications in the
2154 first pass. This allows us the chance to combine
2155 the -1 with other constants. */
2157 && GET_CODE (tem
) == NOT
2158 && XEXP (tem
, 0) == rhs
))
2161 if (GET_CODE (tem
) == NEG
)
2162 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2163 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2164 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2168 ops
[j
].op
= NULL_RTX
;
2178 /* Pack all the operands to the lower-numbered entries. */
2179 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2184 /* Sort the operations based on swap_commutative_operands_p. */
2185 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2187 /* We suppressed creation of trivial CONST expressions in the
2188 combination loop to avoid recursion. Create one manually now.
2189 The combination loop should have ensured that there is exactly
2190 one CONST_INT, and the sort will have ensured that it is last
2191 in the array and that any other constant will be next-to-last. */
2194 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2195 && CONSTANT_P (ops
[n_ops
- 2].op
))
2197 rtx value
= ops
[n_ops
- 1].op
;
2198 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2199 value
= neg_const_int (mode
, value
);
2200 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2204 /* Count the number of CONSTs that we generated. */
2206 for (i
= 0; i
< n_ops
; i
++)
2207 if (GET_CODE (ops
[i
].op
) == CONST
)
2210 /* Give up if we didn't reduce the number of operands we had. Make
2211 sure we count a CONST as two operands. If we have the same
2212 number of operands, but have made more CONSTs than before, this
2213 is also an improvement, so accept it. */
2215 && (n_ops
+ n_consts
> input_ops
2216 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2219 /* Put a non-negated operand first. If there aren't any, make all
2220 operands positive and negate the whole thing later. */
2223 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2227 for (i
= 0; i
< n_ops
; i
++)
2239 /* Now make the result by performing the requested operations. */
2241 for (i
= 1; i
< n_ops
; i
++)
2242 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2243 mode
, result
, ops
[i
].op
);
2245 return negate
? gen_rtx_NEG (mode
, result
) : result
;
2248 /* Like simplify_binary_operation except used for relational operators.
2249 MODE is the mode of the operands, not that of the result. If MODE
2250 is VOIDmode, both operands must also be VOIDmode and we compare the
2251 operands in "infinite precision".
2253 If no simplification is possible, this function returns zero. Otherwise,
2254 it returns either const_true_rtx or const0_rtx. */
2257 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2260 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2265 if (mode
== VOIDmode
2266 && (GET_MODE (op0
) != VOIDmode
2267 || GET_MODE (op1
) != VOIDmode
))
2270 /* If op0 is a compare, extract the comparison arguments from it. */
2271 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2272 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2274 trueop0
= avoid_constant_pool_reference (op0
);
2275 trueop1
= avoid_constant_pool_reference (op1
);
2277 /* We can't simplify MODE_CC values since we don't know what the
2278 actual comparison is. */
2279 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2282 /* Make sure the constant is second. */
2283 if (swap_commutative_operands_p (trueop0
, trueop1
))
2285 tem
= op0
, op0
= op1
, op1
= tem
;
2286 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
2287 code
= swap_condition (code
);
2290 /* For integer comparisons of A and B maybe we can simplify A - B and can
2291 then simplify a comparison of that with zero. If A and B are both either
2292 a register or a CONST_INT, this can't help; testing for these cases will
2293 prevent infinite recursion here and speed things up.
2295 If CODE is an unsigned comparison, then we can never do this optimization,
2296 because it gives an incorrect result if the subtraction wraps around zero.
2297 ANSI C defines unsigned operations such that they never overflow, and
2298 thus such cases can not be ignored. */
2300 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2301 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
2302 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
2303 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2304 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2305 return simplify_relational_operation (signed_condition (code
),
2306 mode
, tem
, const0_rtx
);
2308 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2309 return const_true_rtx
;
2311 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2314 /* For modes without NaNs, if the two operands are equal, we know the
2315 result except if they have side-effects. */
2316 if (! HONOR_NANS (GET_MODE (trueop0
))
2317 && rtx_equal_p (trueop0
, trueop1
)
2318 && ! side_effects_p (trueop0
))
2319 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2321 /* If the operands are floating-point constants, see if we can fold
2323 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2324 && GET_CODE (trueop1
) == CONST_DOUBLE
2325 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2327 REAL_VALUE_TYPE d0
, d1
;
2329 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2330 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2332 /* Comparisons are unordered iff at least one of the values is NaN. */
2333 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2343 return const_true_rtx
;
2356 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2357 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2358 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2361 /* Otherwise, see if the operands are both integers. */
2362 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2363 && (GET_CODE (trueop0
) == CONST_DOUBLE
2364 || GET_CODE (trueop0
) == CONST_INT
)
2365 && (GET_CODE (trueop1
) == CONST_DOUBLE
2366 || GET_CODE (trueop1
) == CONST_INT
))
2368 int width
= GET_MODE_BITSIZE (mode
);
2369 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2370 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2372 /* Get the two words comprising each integer constant. */
2373 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2375 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2376 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2380 l0u
= l0s
= INTVAL (trueop0
);
2381 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2384 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2386 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2387 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2391 l1u
= l1s
= INTVAL (trueop1
);
2392 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2395 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2396 we have to sign or zero-extend the values. */
2397 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2399 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2400 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2402 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2403 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2405 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2406 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2408 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2409 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2411 equal
= (h0u
== h1u
&& l0u
== l1u
);
2412 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2413 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2414 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2415 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2418 /* Otherwise, there are some code-specific tests we can make. */
2424 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2429 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2430 return const_true_rtx
;
2434 /* Unsigned values are never negative. */
2435 if (trueop1
== const0_rtx
)
2436 return const_true_rtx
;
2440 if (trueop1
== const0_rtx
)
2445 /* Unsigned values are never greater than the largest
2447 if (GET_CODE (trueop1
) == CONST_INT
2448 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2449 && INTEGRAL_MODE_P (mode
))
2450 return const_true_rtx
;
2454 if (GET_CODE (trueop1
) == CONST_INT
2455 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2456 && INTEGRAL_MODE_P (mode
))
2461 /* Optimize abs(x) < 0.0. */
2462 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
2464 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2466 if (GET_CODE (tem
) == ABS
)
2472 /* Optimize abs(x) >= 0.0. */
2473 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
2475 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2477 if (GET_CODE (tem
) == ABS
)
2478 return const_true_rtx
;
2483 /* Optimize ! (abs(x) < 0.0). */
2484 if (trueop1
== CONST0_RTX (mode
))
2486 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2488 if (GET_CODE (tem
) == ABS
)
2489 return const_true_rtx
;
2500 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2506 return equal
? const_true_rtx
: const0_rtx
;
2509 return ! equal
? const_true_rtx
: const0_rtx
;
2512 return op0lt
? const_true_rtx
: const0_rtx
;
2515 return op1lt
? const_true_rtx
: const0_rtx
;
2517 return op0ltu
? const_true_rtx
: const0_rtx
;
2519 return op1ltu
? const_true_rtx
: const0_rtx
;
2522 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2525 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2527 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2529 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2531 return const_true_rtx
;
2539 /* Simplify CODE, an operation with result mode MODE and three operands,
2540 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2541 a constant. Return 0 if no simplifications is possible. */
2544 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
2545 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
2548 unsigned int width
= GET_MODE_BITSIZE (mode
);
2550 /* VOIDmode means "infinite" precision. */
2552 width
= HOST_BITS_PER_WIDE_INT
;
2558 if (GET_CODE (op0
) == CONST_INT
2559 && GET_CODE (op1
) == CONST_INT
2560 && GET_CODE (op2
) == CONST_INT
2561 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2562 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2564 /* Extracting a bit-field from a constant */
2565 HOST_WIDE_INT val
= INTVAL (op0
);
2567 if (BITS_BIG_ENDIAN
)
2568 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2569 - INTVAL (op2
) - INTVAL (op1
));
2571 val
>>= INTVAL (op2
);
2573 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2575 /* First zero-extend. */
2576 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2577 /* If desired, propagate sign bit. */
2578 if (code
== SIGN_EXTRACT
2579 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2580 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2583 /* Clear the bits that don't belong in our mode,
2584 unless they and our sign bit are all one.
2585 So we get either a reasonable negative value or a reasonable
2586 unsigned value for this mode. */
2587 if (width
< HOST_BITS_PER_WIDE_INT
2588 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2589 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2590 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2592 return GEN_INT (val
);
2597 if (GET_CODE (op0
) == CONST_INT
)
2598 return op0
!= const0_rtx
? op1
: op2
;
2600 /* Convert a == b ? b : a to "a". */
2601 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
2602 && !HONOR_NANS (mode
)
2603 && rtx_equal_p (XEXP (op0
, 0), op1
)
2604 && rtx_equal_p (XEXP (op0
, 1), op2
))
2606 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
2607 && !HONOR_NANS (mode
)
2608 && rtx_equal_p (XEXP (op0
, 1), op1
)
2609 && rtx_equal_p (XEXP (op0
, 0), op2
))
2611 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2613 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2614 ? GET_MODE (XEXP (op0
, 1))
2615 : GET_MODE (XEXP (op0
, 0)));
2617 if (cmp_mode
== VOIDmode
)
2618 cmp_mode
= op0_mode
;
2619 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2620 XEXP (op0
, 0), XEXP (op0
, 1));
2622 /* See if any simplifications were possible. */
2623 if (temp
== const0_rtx
)
2625 else if (temp
== const1_rtx
)
2630 /* Look for happy constants in op1 and op2. */
2631 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2633 HOST_WIDE_INT t
= INTVAL (op1
);
2634 HOST_WIDE_INT f
= INTVAL (op2
);
2636 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2637 code
= GET_CODE (op0
);
2638 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2641 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2649 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2654 if (GET_MODE (op0
) != mode
2655 || GET_MODE (op1
) != mode
2656 || !VECTOR_MODE_P (mode
))
2658 op2
= avoid_constant_pool_reference (op2
);
2659 if (GET_CODE (op2
) == CONST_INT
)
2661 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2662 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2663 int mask
= (1 << n_elts
) - 1;
2665 if (!(INTVAL (op2
) & mask
))
2667 if ((INTVAL (op2
) & mask
) == mask
)
2670 op0
= avoid_constant_pool_reference (op0
);
2671 op1
= avoid_constant_pool_reference (op1
);
2672 if (GET_CODE (op0
) == CONST_VECTOR
2673 && GET_CODE (op1
) == CONST_VECTOR
)
2675 rtvec v
= rtvec_alloc (n_elts
);
2678 for (i
= 0; i
< n_elts
; i
++)
2679 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
2680 ? CONST_VECTOR_ELT (op0
, i
)
2681 : CONST_VECTOR_ELT (op1
, i
));
2682 return gen_rtx_CONST_VECTOR (mode
, v
);
2694 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2695 Return 0 if no simplifications is possible. */
2697 simplify_subreg (enum machine_mode outermode
, rtx op
,
2698 enum machine_mode innermode
, unsigned int byte
)
2700 /* Little bit of sanity checking. */
2701 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2702 || innermode
== BLKmode
|| outermode
== BLKmode
)
2705 if (GET_MODE (op
) != innermode
2706 && GET_MODE (op
) != VOIDmode
)
2709 if (byte
% GET_MODE_SIZE (outermode
)
2710 || byte
>= GET_MODE_SIZE (innermode
))
2713 if (outermode
== innermode
&& !byte
)
2716 /* Simplify subregs of vector constants. */
2717 if (GET_CODE (op
) == CONST_VECTOR
)
2719 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (innermode
));
2720 const unsigned int offset
= byte
/ elt_size
;
2723 if (GET_MODE_INNER (innermode
) == outermode
)
2725 elt
= CONST_VECTOR_ELT (op
, offset
);
2727 /* ?? We probably don't need this copy_rtx because constants
2728 can be shared. ?? */
2730 return copy_rtx (elt
);
2732 else if (GET_MODE_INNER (innermode
) == GET_MODE_INNER (outermode
)
2733 && GET_MODE_SIZE (innermode
) > GET_MODE_SIZE (outermode
))
2735 return (gen_rtx_CONST_VECTOR
2737 gen_rtvec_v (GET_MODE_NUNITS (outermode
),
2738 &CONST_VECTOR_ELT (op
, offset
))));
2740 else if (GET_MODE_CLASS (outermode
) == MODE_INT
2741 && (GET_MODE_SIZE (outermode
) % elt_size
== 0))
2743 /* This happens when the target register size is smaller then
2744 the vector mode, and we synthesize operations with vectors
2745 of elements that are smaller than the register size. */
2746 HOST_WIDE_INT sum
= 0, high
= 0;
2747 unsigned n_elts
= (GET_MODE_SIZE (outermode
) / elt_size
);
2748 unsigned i
= BYTES_BIG_ENDIAN
? offset
: offset
+ n_elts
- 1;
2749 unsigned step
= BYTES_BIG_ENDIAN
? 1 : -1;
2750 int shift
= BITS_PER_UNIT
* elt_size
;
2751 unsigned HOST_WIDE_INT unit_mask
;
2753 unit_mask
= (unsigned HOST_WIDE_INT
) -1
2754 >> (sizeof (HOST_WIDE_INT
) * BITS_PER_UNIT
- shift
);
2756 for (; n_elts
--; i
+= step
)
2758 elt
= CONST_VECTOR_ELT (op
, i
);
2759 if (GET_CODE (elt
) == CONST_DOUBLE
2760 && GET_MODE_CLASS (GET_MODE (elt
)) == MODE_FLOAT
)
2762 elt
= gen_lowpart_common (int_mode_for_mode (GET_MODE (elt
)),
2767 if (GET_CODE (elt
) != CONST_INT
)
2769 /* Avoid overflow. */
2770 if (high
>> (HOST_BITS_PER_WIDE_INT
- shift
))
2772 high
= high
<< shift
| sum
>> (HOST_BITS_PER_WIDE_INT
- shift
);
2773 sum
= (sum
<< shift
) + (INTVAL (elt
) & unit_mask
);
2775 if (GET_MODE_BITSIZE (outermode
) <= HOST_BITS_PER_WIDE_INT
)
2776 return GEN_INT (trunc_int_for_mode (sum
, outermode
));
2777 else if (GET_MODE_BITSIZE (outermode
) == 2* HOST_BITS_PER_WIDE_INT
)
2778 return immed_double_const (sum
, high
, outermode
);
2782 else if (GET_MODE_CLASS (outermode
) == MODE_INT
2783 && (elt_size
% GET_MODE_SIZE (outermode
) == 0))
2785 enum machine_mode new_mode
2786 = int_mode_for_mode (GET_MODE_INNER (innermode
));
2787 int subbyte
= byte
% elt_size
;
2789 op
= simplify_subreg (new_mode
, op
, innermode
, byte
- subbyte
);
2792 return simplify_subreg (outermode
, op
, new_mode
, subbyte
);
2794 else if (GET_MODE_CLASS (outermode
) == MODE_INT
)
2795 /* This shouldn't happen, but let's not do anything stupid. */
2799 /* Attempt to simplify constant to non-SUBREG expression. */
2800 if (CONSTANT_P (op
))
2803 unsigned HOST_WIDE_INT val
= 0;
2805 if (VECTOR_MODE_P (outermode
))
2807 /* Construct a CONST_VECTOR from individual subregs. */
2808 enum machine_mode submode
= GET_MODE_INNER (outermode
);
2809 int subsize
= GET_MODE_UNIT_SIZE (outermode
);
2810 int i
, elts
= GET_MODE_NUNITS (outermode
);
2811 rtvec v
= rtvec_alloc (elts
);
2814 for (i
= 0; i
< elts
; i
++, byte
+= subsize
)
2816 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2817 /* ??? It would be nice if we could actually make such subregs
2818 on targets that allow such relocations. */
2819 if (byte
>= GET_MODE_SIZE (innermode
))
2820 elt
= CONST0_RTX (submode
);
2822 elt
= simplify_subreg (submode
, op
, innermode
, byte
);
2825 RTVEC_ELT (v
, i
) = elt
;
2827 return gen_rtx_CONST_VECTOR (outermode
, v
);
2830 /* ??? This code is partly redundant with code below, but can handle
2831 the subregs of floats and similar corner cases.
2832 Later it we should move all simplification code here and rewrite
2833 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2834 using SIMPLIFY_SUBREG. */
2835 if (subreg_lowpart_offset (outermode
, innermode
) == byte
2836 && GET_CODE (op
) != CONST_VECTOR
)
2838 rtx
new = gen_lowpart_if_possible (outermode
, op
);
2843 /* Similar comment as above apply here. */
2844 if (GET_MODE_SIZE (outermode
) == UNITS_PER_WORD
2845 && GET_MODE_SIZE (innermode
) > UNITS_PER_WORD
2846 && GET_MODE_CLASS (outermode
) == MODE_INT
)
2848 rtx
new = constant_subword (op
,
2849 (byte
/ UNITS_PER_WORD
),
2855 if (GET_MODE_CLASS (outermode
) != MODE_INT
2856 && GET_MODE_CLASS (outermode
) != MODE_CC
)
2858 enum machine_mode new_mode
= int_mode_for_mode (outermode
);
2860 if (new_mode
!= innermode
|| byte
!= 0)
2862 op
= simplify_subreg (new_mode
, op
, innermode
, byte
);
2865 return simplify_subreg (outermode
, op
, new_mode
, 0);
2869 offset
= byte
* BITS_PER_UNIT
;
2870 switch (GET_CODE (op
))
2873 if (GET_MODE (op
) != VOIDmode
)
2876 /* We can't handle this case yet. */
2877 if (GET_MODE_BITSIZE (outermode
) >= HOST_BITS_PER_WIDE_INT
)
2880 part
= offset
>= HOST_BITS_PER_WIDE_INT
;
2881 if ((BITS_PER_WORD
> HOST_BITS_PER_WIDE_INT
2882 && BYTES_BIG_ENDIAN
)
2883 || (BITS_PER_WORD
<= HOST_BITS_PER_WIDE_INT
2884 && WORDS_BIG_ENDIAN
))
2886 val
= part
? CONST_DOUBLE_HIGH (op
) : CONST_DOUBLE_LOW (op
);
2887 offset
%= HOST_BITS_PER_WIDE_INT
;
2889 /* We've already picked the word we want from a double, so
2890 pretend this is actually an integer. */
2891 innermode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
2895 if (GET_CODE (op
) == CONST_INT
)
2898 /* We don't handle synthesizing of non-integral constants yet. */
2899 if (GET_MODE_CLASS (outermode
) != MODE_INT
)
2902 if (BYTES_BIG_ENDIAN
|| WORDS_BIG_ENDIAN
)
2904 if (WORDS_BIG_ENDIAN
)
2905 offset
= (GET_MODE_BITSIZE (innermode
)
2906 - GET_MODE_BITSIZE (outermode
) - offset
);
2907 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
2908 && GET_MODE_SIZE (outermode
) < UNITS_PER_WORD
)
2909 offset
= (offset
+ BITS_PER_WORD
- GET_MODE_BITSIZE (outermode
)
2910 - 2 * (offset
% BITS_PER_WORD
));
2913 if (offset
>= HOST_BITS_PER_WIDE_INT
)
2914 return ((HOST_WIDE_INT
) val
< 0) ? constm1_rtx
: const0_rtx
;
2918 if (GET_MODE_BITSIZE (outermode
) < HOST_BITS_PER_WIDE_INT
)
2919 val
= trunc_int_for_mode (val
, outermode
);
2920 return GEN_INT (val
);
2927 /* Changing mode twice with SUBREG => just change it once,
2928 or not at all if changing back op starting mode. */
2929 if (GET_CODE (op
) == SUBREG
)
2931 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
2932 int final_offset
= byte
+ SUBREG_BYTE (op
);
2935 if (outermode
== innermostmode
2936 && byte
== 0 && SUBREG_BYTE (op
) == 0)
2937 return SUBREG_REG (op
);
2939 /* The SUBREG_BYTE represents offset, as if the value were stored
2940 in memory. Irritating exception is paradoxical subreg, where
2941 we define SUBREG_BYTE to be 0. On big endian machines, this
2942 value should be negative. For a moment, undo this exception. */
2943 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
2945 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
2946 if (WORDS_BIG_ENDIAN
)
2947 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2948 if (BYTES_BIG_ENDIAN
)
2949 final_offset
+= difference
% UNITS_PER_WORD
;
2951 if (SUBREG_BYTE (op
) == 0
2952 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
2954 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
2955 if (WORDS_BIG_ENDIAN
)
2956 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2957 if (BYTES_BIG_ENDIAN
)
2958 final_offset
+= difference
% UNITS_PER_WORD
;
2961 /* See whether resulting subreg will be paradoxical. */
2962 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
2964 /* In nonparadoxical subregs we can't handle negative offsets. */
2965 if (final_offset
< 0)
2967 /* Bail out in case resulting subreg would be incorrect. */
2968 if (final_offset
% GET_MODE_SIZE (outermode
)
2969 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
2975 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
2977 /* In paradoxical subreg, see if we are still looking on lower part.
2978 If so, our SUBREG_BYTE will be 0. */
2979 if (WORDS_BIG_ENDIAN
)
2980 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2981 if (BYTES_BIG_ENDIAN
)
2982 offset
+= difference
% UNITS_PER_WORD
;
2983 if (offset
== final_offset
)
2989 /* Recurse for further possible simplifications. */
2990 new = simplify_subreg (outermode
, SUBREG_REG (op
),
2991 GET_MODE (SUBREG_REG (op
)),
2995 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
2998 /* SUBREG of a hard register => just change the register number
2999 and/or mode. If the hard register is not valid in that mode,
3000 suppress this simplification. If the hard register is the stack,
3001 frame, or argument pointer, leave this as a SUBREG. */
3004 && (! REG_FUNCTION_VALUE_P (op
)
3005 || ! rtx_equal_function_value_matters
)
3006 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3007 #ifdef CANNOT_CHANGE_MODE_CLASS
3008 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3009 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3010 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3012 && ((reload_completed
&& !frame_pointer_needed
)
3013 || (REGNO (op
) != FRAME_POINTER_REGNUM
3014 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3015 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3018 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3019 && REGNO (op
) != ARG_POINTER_REGNUM
3021 && REGNO (op
) != STACK_POINTER_REGNUM
)
3023 int final_regno
= subreg_hard_regno (gen_rtx_SUBREG (outermode
, op
, byte
),
3026 /* ??? We do allow it if the current REG is not valid for
3027 its mode. This is a kludge to work around how float/complex
3028 arguments are passed on 32-bit SPARC and should be fixed. */
3029 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3030 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
3032 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3034 /* Propagate original regno. We don't have any way to specify
3035 the offset inside original regno, so do so only for lowpart.
3036 The information is used only by alias analysis that can not
3037 grog partial register anyway. */
3039 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3040 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3045 /* If we have a SUBREG of a register that we are replacing and we are
3046 replacing it with a MEM, make a new MEM and try replacing the
3047 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3048 or if we would be widening it. */
3050 if (GET_CODE (op
) == MEM
3051 && ! mode_dependent_address_p (XEXP (op
, 0))
3052 /* Allow splitting of volatile memory references in case we don't
3053 have instruction to move the whole thing. */
3054 && (! MEM_VOLATILE_P (op
)
3055 || ! have_insn_for (SET
, innermode
))
3056 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3057 return adjust_address_nv (op
, outermode
, byte
);
3059 /* Handle complex values represented as CONCAT
3060 of real and imaginary part. */
3061 if (GET_CODE (op
) == CONCAT
)
3063 int is_realpart
= byte
< GET_MODE_UNIT_SIZE (innermode
);
3064 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
3065 unsigned int final_offset
;
3068 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
3069 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3072 /* We can at least simplify it by referring directly to the relevant part. */
3073 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3078 /* Make a SUBREG operation or equivalent if it folds. */
3081 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
3082 enum machine_mode innermode
, unsigned int byte
)
3085 /* Little bit of sanity checking. */
3086 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3087 || innermode
== BLKmode
|| outermode
== BLKmode
)
3090 if (GET_MODE (op
) != innermode
3091 && GET_MODE (op
) != VOIDmode
)
3094 if (byte
% GET_MODE_SIZE (outermode
)
3095 || byte
>= GET_MODE_SIZE (innermode
))
3098 if (GET_CODE (op
) == QUEUED
)
3101 new = simplify_subreg (outermode
, op
, innermode
, byte
);
3105 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
3108 return gen_rtx_SUBREG (outermode
, op
, byte
);
3110 /* Simplify X, an rtx expression.
3112 Return the simplified expression or NULL if no simplifications
3115 This is the preferred entry point into the simplification routines;
3116 however, we still allow passes to call the more specific routines.
3118 Right now GCC has three (yes, three) major bodies of RTL simplification
3119 code that need to be unified.
3121 1. fold_rtx in cse.c. This code uses various CSE specific
3122 information to aid in RTL simplification.
3124 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3125 it uses combine specific information to aid in RTL
3128 3. The routines in this file.
3131 Long term we want to only have one body of simplification code; to
3132 get to that state I recommend the following steps:
3134 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3135 which are not pass dependent state into these routines.
3137 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3138 use this routine whenever possible.
3140 3. Allow for pass dependent state to be provided to these
3141 routines and add simplifications based on the pass dependent
3142 state. Remove code from cse.c & combine.c that becomes
3145 It will take time, but ultimately the compiler will be easier to
3146 maintain and improve. It's totally silly that when we add a
3147 simplification that it needs to be added to 4 places (3 for RTL
3148 simplification and 1 for tree simplification. */
3151 simplify_rtx (rtx x
)
3153 enum rtx_code code
= GET_CODE (x
);
3154 enum machine_mode mode
= GET_MODE (x
);
3157 switch (GET_RTX_CLASS (code
))
3160 return simplify_unary_operation (code
, mode
,
3161 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3163 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3164 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
3166 /* Fall through.... */
3169 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3173 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3174 XEXP (x
, 0), XEXP (x
, 1),
3178 temp
= simplify_relational_operation (code
,
3179 ((GET_MODE (XEXP (x
, 0))
3181 ? GET_MODE (XEXP (x
, 0))
3182 : GET_MODE (XEXP (x
, 1))),
3183 XEXP (x
, 0), XEXP (x
, 1));
3184 #ifdef FLOAT_STORE_FLAG_VALUE
3185 if (temp
!= 0 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
3187 if (temp
== const0_rtx
)
3188 temp
= CONST0_RTX (mode
);
3190 temp
= CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode
),
3198 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
3199 GET_MODE (SUBREG_REG (x
)),
3201 if (code
== CONSTANT_P_RTX
)
3203 if (CONSTANT_P (XEXP (x
, 0)))
3211 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3212 if (GET_CODE (XEXP (x
, 0)) == HIGH
3213 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))