1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int
PARAMS ((enum machine_mode
, rtx
));
53 static int simplify_plus_minus_op_data_cmp
PARAMS ((const void *,
55 static rtx simplify_plus_minus
PARAMS ((enum rtx_code
,
56 enum machine_mode
, rtx
,
59 /* Negate a CONST_INT rtx, truncating (because a conversion from a
60 maximally negative number can overflow). */
62 neg_const_int (mode
, i
)
63 enum machine_mode mode
;
66 return gen_int_mode (- INTVAL (i
), mode
);
70 /* Make a binary operation by properly ordering the operands and
71 seeing if the expression folds. */
74 simplify_gen_binary (code
, mode
, op0
, op1
)
76 enum machine_mode mode
;
81 /* Put complex operands first and constants second if commutative. */
82 if (GET_RTX_CLASS (code
) == 'c'
83 && swap_commutative_operands_p (op0
, op1
))
84 tem
= op0
, op0
= op1
, op1
= tem
;
86 /* If this simplifies, do it. */
87 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
91 /* Handle addition and subtraction specially. Otherwise, just form
94 if (code
== PLUS
|| code
== MINUS
)
96 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
101 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
104 /* If X is a MEM referencing the constant pool, return the real value.
105 Otherwise return X. */
107 avoid_constant_pool_reference (x
)
111 enum machine_mode cmode
;
113 switch (GET_CODE (x
))
119 /* Handle float extensions of constant pool references. */
121 c
= avoid_constant_pool_reference (tmp
);
122 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
126 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
127 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
137 /* Call target hook to avoid the effects of -fpic etc... */
138 addr
= (*targetm
.delegitimize_address
) (addr
);
140 if (GET_CODE (addr
) == LO_SUM
)
141 addr
= XEXP (addr
, 1);
143 if (GET_CODE (addr
) != SYMBOL_REF
144 || ! CONSTANT_POOL_ADDRESS_P (addr
))
147 c
= get_pool_constant (addr
);
148 cmode
= get_pool_mode (addr
);
150 /* If we're accessing the constant in a different mode than it was
151 originally stored, attempt to fix that up via subreg simplifications.
152 If that fails we have no choice but to return the original memory. */
153 if (cmode
!= GET_MODE (x
))
155 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
162 /* Make a unary operation by first seeing if it folds and otherwise making
163 the specified operation. */
166 simplify_gen_unary (code
, mode
, op
, op_mode
)
168 enum machine_mode mode
;
170 enum machine_mode op_mode
;
174 /* If this simplifies, use it. */
175 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
178 return gen_rtx_fmt_e (code
, mode
, op
);
181 /* Likewise for ternary operations. */
184 simplify_gen_ternary (code
, mode
, op0_mode
, op0
, op1
, op2
)
186 enum machine_mode mode
, op0_mode
;
191 /* If this simplifies, use it. */
192 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
196 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
199 /* Likewise, for relational operations.
200 CMP_MODE specifies mode comparison is done in.
204 simplify_gen_relational (code
, mode
, cmp_mode
, op0
, op1
)
206 enum machine_mode mode
;
207 enum machine_mode cmp_mode
;
212 if ((tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
)) != 0)
215 /* For the following tests, ensure const0_rtx is op1. */
216 if (op0
== const0_rtx
&& swap_commutative_operands_p (op0
, op1
))
217 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
219 /* If op0 is a compare, extract the comparison arguments from it. */
220 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
221 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
223 /* If op0 is a comparison, extract the comparison arguments form it. */
224 if (code
== NE
&& op1
== const0_rtx
225 && GET_RTX_CLASS (GET_CODE (op0
)) == '<')
227 else if (code
== EQ
&& op1
== const0_rtx
)
229 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
230 enum rtx_code
new = reversed_comparison_code (op0
, NULL_RTX
);
240 /* Put complex operands first and constants second. */
241 if (swap_commutative_operands_p (op0
, op1
))
242 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
244 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
247 /* Replace all occurrences of OLD in X with NEW and try to simplify the
248 resulting RTX. Return a new RTX which is as simplified as possible. */
251 simplify_replace_rtx (x
, old
, new)
256 enum rtx_code code
= GET_CODE (x
);
257 enum machine_mode mode
= GET_MODE (x
);
259 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
260 to build a new expression substituting recursively. If we can't do
261 anything, return our input. */
266 switch (GET_RTX_CLASS (code
))
270 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
271 rtx op
= (XEXP (x
, 0) == old
272 ? new : simplify_replace_rtx (XEXP (x
, 0), old
, new));
274 return simplify_gen_unary (code
, mode
, op
, op_mode
);
280 simplify_gen_binary (code
, mode
,
281 simplify_replace_rtx (XEXP (x
, 0), old
, new),
282 simplify_replace_rtx (XEXP (x
, 1), old
, new));
285 enum machine_mode op_mode
= (GET_MODE (XEXP (x
, 0)) != VOIDmode
286 ? GET_MODE (XEXP (x
, 0))
287 : GET_MODE (XEXP (x
, 1)));
288 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
289 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
292 simplify_gen_relational (code
, mode
,
295 : GET_MODE (op0
) != VOIDmode
304 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
305 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
308 simplify_gen_ternary (code
, mode
,
313 simplify_replace_rtx (XEXP (x
, 1), old
, new),
314 simplify_replace_rtx (XEXP (x
, 2), old
, new));
318 /* The only case we try to handle is a SUBREG. */
322 exp
= simplify_gen_subreg (GET_MODE (x
),
323 simplify_replace_rtx (SUBREG_REG (x
),
325 GET_MODE (SUBREG_REG (x
)),
334 return replace_equiv_address_nv (x
,
335 simplify_replace_rtx (XEXP (x
, 0),
337 else if (code
== LO_SUM
)
339 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
340 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
342 /* (lo_sum (high x) x) -> x */
343 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
346 return gen_rtx_LO_SUM (mode
, op0
, op1
);
348 else if (code
== REG
)
350 if (REG_P (old
) && REGNO (x
) == REGNO (old
))
362 /* Try to simplify a unary operation CODE whose output mode is to be
363 MODE with input operand OP whose mode was originally OP_MODE.
364 Return zero if no simplification can be made. */
366 simplify_unary_operation (code
, mode
, op
, op_mode
)
368 enum machine_mode mode
;
370 enum machine_mode op_mode
;
372 unsigned int width
= GET_MODE_BITSIZE (mode
);
373 rtx trueop
= avoid_constant_pool_reference (op
);
375 if (code
== VEC_DUPLICATE
)
377 if (!VECTOR_MODE_P (mode
))
379 if (GET_MODE (trueop
) != VOIDmode
380 && !VECTOR_MODE_P (GET_MODE (trueop
))
381 && GET_MODE_INNER (mode
) != GET_MODE (trueop
))
383 if (GET_MODE (trueop
) != VOIDmode
384 && VECTOR_MODE_P (GET_MODE (trueop
))
385 && GET_MODE_INNER (mode
) != GET_MODE_INNER (GET_MODE (trueop
)))
387 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
388 || GET_CODE (trueop
) == CONST_VECTOR
)
390 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
391 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
392 rtvec v
= rtvec_alloc (n_elts
);
395 if (GET_CODE (trueop
) != CONST_VECTOR
)
396 for (i
= 0; i
< n_elts
; i
++)
397 RTVEC_ELT (v
, i
) = trueop
;
400 enum machine_mode inmode
= GET_MODE (trueop
);
401 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
402 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
404 if (in_n_elts
>= n_elts
|| n_elts
% in_n_elts
)
406 for (i
= 0; i
< n_elts
; i
++)
407 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
409 return gen_rtx_CONST_VECTOR (mode
, v
);
413 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
415 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
416 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
417 enum machine_mode opmode
= GET_MODE (trueop
);
418 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
419 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
420 rtvec v
= rtvec_alloc (n_elts
);
423 if (op_n_elts
!= n_elts
)
426 for (i
= 0; i
< n_elts
; i
++)
428 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
429 CONST_VECTOR_ELT (trueop
, i
),
430 GET_MODE_INNER (opmode
));
433 RTVEC_ELT (v
, i
) = x
;
435 return gen_rtx_CONST_VECTOR (mode
, v
);
438 /* The order of these tests is critical so that, for example, we don't
439 check the wrong mode (input vs. output) for a conversion operation,
440 such as FIX. At some point, this should be simplified. */
442 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
443 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
445 HOST_WIDE_INT hv
, lv
;
448 if (GET_CODE (trueop
) == CONST_INT
)
449 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
451 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
453 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
454 d
= real_value_truncate (mode
, d
);
455 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
457 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
458 && (GET_CODE (trueop
) == CONST_DOUBLE
459 || GET_CODE (trueop
) == CONST_INT
))
461 HOST_WIDE_INT hv
, lv
;
464 if (GET_CODE (trueop
) == CONST_INT
)
465 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
467 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
469 if (op_mode
== VOIDmode
)
471 /* We don't know how to interpret negative-looking numbers in
472 this case, so don't try to fold those. */
476 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
479 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
481 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
482 d
= real_value_truncate (mode
, d
);
483 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
486 if (GET_CODE (trueop
) == CONST_INT
487 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
489 HOST_WIDE_INT arg0
= INTVAL (trueop
);
503 val
= (arg0
>= 0 ? arg0
: - arg0
);
507 /* Don't use ffs here. Instead, get low order bit and then its
508 number. If arg0 is zero, this will return 0, as desired. */
509 arg0
&= GET_MODE_MASK (mode
);
510 val
= exact_log2 (arg0
& (- arg0
)) + 1;
514 arg0
&= GET_MODE_MASK (mode
);
515 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
518 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
522 arg0
&= GET_MODE_MASK (mode
);
525 /* Even if the value at zero is undefined, we have to come
526 up with some replacement. Seems good enough. */
527 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
528 val
= GET_MODE_BITSIZE (mode
);
531 val
= exact_log2 (arg0
& -arg0
);
535 arg0
&= GET_MODE_MASK (mode
);
538 val
++, arg0
&= arg0
- 1;
542 arg0
&= GET_MODE_MASK (mode
);
545 val
++, arg0
&= arg0
- 1;
554 /* When zero-extending a CONST_INT, we need to know its
556 if (op_mode
== VOIDmode
)
558 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
560 /* If we were really extending the mode,
561 we would have to distinguish between zero-extension
562 and sign-extension. */
563 if (width
!= GET_MODE_BITSIZE (op_mode
))
567 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
568 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
574 if (op_mode
== VOIDmode
)
576 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
578 /* If we were really extending the mode,
579 we would have to distinguish between zero-extension
580 and sign-extension. */
581 if (width
!= GET_MODE_BITSIZE (op_mode
))
585 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
588 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
590 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
591 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
608 val
= trunc_int_for_mode (val
, mode
);
610 return GEN_INT (val
);
613 /* We can do some operations on integer CONST_DOUBLEs. Also allow
614 for a DImode operation on a CONST_INT. */
615 else if (GET_MODE (trueop
) == VOIDmode
616 && width
<= HOST_BITS_PER_WIDE_INT
* 2
617 && (GET_CODE (trueop
) == CONST_DOUBLE
618 || GET_CODE (trueop
) == CONST_INT
))
620 unsigned HOST_WIDE_INT l1
, lv
;
621 HOST_WIDE_INT h1
, hv
;
623 if (GET_CODE (trueop
) == CONST_DOUBLE
)
624 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
626 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
636 neg_double (l1
, h1
, &lv
, &hv
);
641 neg_double (l1
, h1
, &lv
, &hv
);
653 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
656 lv
= exact_log2 (l1
& -l1
) + 1;
662 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
664 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
665 - HOST_BITS_PER_WIDE_INT
;
673 lv
= GET_MODE_BITSIZE (mode
);
675 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
678 lv
= exact_log2 (l1
& -l1
);
701 /* This is just a change-of-mode, so do nothing. */
706 if (op_mode
== VOIDmode
)
709 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
713 lv
= l1
& GET_MODE_MASK (op_mode
);
717 if (op_mode
== VOIDmode
718 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
722 lv
= l1
& GET_MODE_MASK (op_mode
);
723 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
724 && (lv
& ((HOST_WIDE_INT
) 1
725 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
726 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
728 hv
= HWI_SIGN_EXTEND (lv
);
739 return immed_double_const (lv
, hv
, mode
);
742 else if (GET_CODE (trueop
) == CONST_DOUBLE
743 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
745 REAL_VALUE_TYPE d
, t
;
746 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
751 if (HONOR_SNANS (mode
) && real_isnan (&d
))
753 real_sqrt (&t
, mode
, &d
);
757 d
= REAL_VALUE_ABS (d
);
760 d
= REAL_VALUE_NEGATE (d
);
763 d
= real_value_truncate (mode
, d
);
766 /* All this does is change the mode. */
769 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
775 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
778 else if (GET_CODE (trueop
) == CONST_DOUBLE
779 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
780 && GET_MODE_CLASS (mode
) == MODE_INT
781 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
785 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
788 case FIX
: i
= REAL_VALUE_FIX (d
); break;
789 case UNSIGNED_FIX
: i
= REAL_VALUE_UNSIGNED_FIX (d
); break;
793 return gen_int_mode (i
, mode
);
796 /* This was formerly used only for non-IEEE float.
797 eggert@twinsun.com says it is safe for IEEE also. */
800 enum rtx_code reversed
;
801 /* There are some simplifications we can do even if the operands
806 /* (not (not X)) == X. */
807 if (GET_CODE (op
) == NOT
)
810 /* (not (eq X Y)) == (ne X Y), etc. */
811 if (mode
== BImode
&& GET_RTX_CLASS (GET_CODE (op
)) == '<'
812 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
814 return gen_rtx_fmt_ee (reversed
,
815 op_mode
, XEXP (op
, 0), XEXP (op
, 1));
819 /* (neg (neg X)) == X. */
820 if (GET_CODE (op
) == NEG
)
825 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
826 becomes just the MINUS if its mode is MODE. This allows
827 folding switch statements on machines using casesi (such as
829 if (GET_CODE (op
) == TRUNCATE
830 && GET_MODE (XEXP (op
, 0)) == mode
831 && GET_CODE (XEXP (op
, 0)) == MINUS
832 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
833 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
836 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
837 if (! POINTERS_EXTEND_UNSIGNED
838 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
840 || (GET_CODE (op
) == SUBREG
841 && GET_CODE (SUBREG_REG (op
)) == REG
842 && REG_POINTER (SUBREG_REG (op
))
843 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
844 return convert_memory_address (Pmode
, op
);
848 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
850 if (POINTERS_EXTEND_UNSIGNED
> 0
851 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
853 || (GET_CODE (op
) == SUBREG
854 && GET_CODE (SUBREG_REG (op
)) == REG
855 && REG_POINTER (SUBREG_REG (op
))
856 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
857 return convert_memory_address (Pmode
, op
);
869 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
870 and OP1. Return 0 if no simplification is possible.
872 Don't use this for relational operations such as EQ or LT.
873 Use simplify_relational_operation instead. */
875 simplify_binary_operation (code
, mode
, op0
, op1
)
877 enum machine_mode mode
;
880 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
882 unsigned int width
= GET_MODE_BITSIZE (mode
);
884 rtx trueop0
= avoid_constant_pool_reference (op0
);
885 rtx trueop1
= avoid_constant_pool_reference (op1
);
887 /* Relational operations don't work here. We must know the mode
888 of the operands in order to do the comparison correctly.
889 Assuming a full word can give incorrect results.
890 Consider comparing 128 with -128 in QImode. */
892 if (GET_RTX_CLASS (code
) == '<')
895 /* Make sure the constant is second. */
896 if (GET_RTX_CLASS (code
) == 'c'
897 && swap_commutative_operands_p (trueop0
, trueop1
))
899 tem
= op0
, op0
= op1
, op1
= tem
;
900 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
903 if (VECTOR_MODE_P (mode
)
904 && GET_CODE (trueop0
) == CONST_VECTOR
905 && GET_CODE (trueop1
) == CONST_VECTOR
)
907 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
908 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
909 enum machine_mode op0mode
= GET_MODE (trueop0
);
910 int op0_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op0mode
));
911 unsigned op0_n_elts
= (GET_MODE_SIZE (op0mode
) / op0_elt_size
);
912 enum machine_mode op1mode
= GET_MODE (trueop1
);
913 int op1_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op1mode
));
914 unsigned op1_n_elts
= (GET_MODE_SIZE (op1mode
) / op1_elt_size
);
915 rtvec v
= rtvec_alloc (n_elts
);
918 if (op0_n_elts
!= n_elts
|| op1_n_elts
!= n_elts
)
921 for (i
= 0; i
< n_elts
; i
++)
923 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
924 CONST_VECTOR_ELT (trueop0
, i
),
925 CONST_VECTOR_ELT (trueop1
, i
));
928 RTVEC_ELT (v
, i
) = x
;
931 return gen_rtx_CONST_VECTOR (mode
, v
);
934 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
935 && GET_CODE (trueop0
) == CONST_DOUBLE
936 && GET_CODE (trueop1
) == CONST_DOUBLE
937 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
939 REAL_VALUE_TYPE f0
, f1
, value
;
941 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
942 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
943 f0
= real_value_truncate (mode
, f0
);
944 f1
= real_value_truncate (mode
, f1
);
947 && !MODE_HAS_INFINITIES (mode
)
948 && REAL_VALUES_EQUAL (f1
, dconst0
))
951 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
953 value
= real_value_truncate (mode
, value
);
954 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
957 /* We can fold some multi-word operations. */
958 if (GET_MODE_CLASS (mode
) == MODE_INT
959 && width
== HOST_BITS_PER_WIDE_INT
* 2
960 && (GET_CODE (trueop0
) == CONST_DOUBLE
961 || GET_CODE (trueop0
) == CONST_INT
)
962 && (GET_CODE (trueop1
) == CONST_DOUBLE
963 || GET_CODE (trueop1
) == CONST_INT
))
965 unsigned HOST_WIDE_INT l1
, l2
, lv
;
966 HOST_WIDE_INT h1
, h2
, hv
;
968 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
969 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
971 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
973 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
974 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
976 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
981 /* A - B == A + (-B). */
982 neg_double (l2
, h2
, &lv
, &hv
);
985 /* .. fall through ... */
988 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
992 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
995 case DIV
: case MOD
: case UDIV
: case UMOD
:
996 /* We'd need to include tree.h to do this and it doesn't seem worth
1001 lv
= l1
& l2
, hv
= h1
& h2
;
1005 lv
= l1
| l2
, hv
= h1
| h2
;
1009 lv
= l1
^ l2
, hv
= h1
^ h2
;
1015 && ((unsigned HOST_WIDE_INT
) l1
1016 < (unsigned HOST_WIDE_INT
) l2
)))
1025 && ((unsigned HOST_WIDE_INT
) l1
1026 > (unsigned HOST_WIDE_INT
) l2
)))
1033 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1035 && ((unsigned HOST_WIDE_INT
) l1
1036 < (unsigned HOST_WIDE_INT
) l2
)))
1043 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1045 && ((unsigned HOST_WIDE_INT
) l1
1046 > (unsigned HOST_WIDE_INT
) l2
)))
1052 case LSHIFTRT
: case ASHIFTRT
:
1054 case ROTATE
: case ROTATERT
:
1055 #ifdef SHIFT_COUNT_TRUNCATED
1056 if (SHIFT_COUNT_TRUNCATED
)
1057 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1060 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1063 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1064 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1066 else if (code
== ASHIFT
)
1067 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1068 else if (code
== ROTATE
)
1069 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1070 else /* code == ROTATERT */
1071 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1078 return immed_double_const (lv
, hv
, mode
);
1081 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1082 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1084 /* Even if we can't compute a constant result,
1085 there are some cases worth simplifying. */
1090 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1091 when x is NaN, infinite, or finite and nonzero. They aren't
1092 when x is -0 and the rounding mode is not towards -infinity,
1093 since (-0) + 0 is then 0. */
1094 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1097 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1098 transformations are safe even for IEEE. */
1099 if (GET_CODE (op0
) == NEG
)
1100 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1101 else if (GET_CODE (op1
) == NEG
)
1102 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1104 /* (~a) + 1 -> -a */
1105 if (INTEGRAL_MODE_P (mode
)
1106 && GET_CODE (op0
) == NOT
1107 && trueop1
== const1_rtx
)
1108 return gen_rtx_NEG (mode
, XEXP (op0
, 0));
1110 /* Handle both-operands-constant cases. We can only add
1111 CONST_INTs to constants since the sum of relocatable symbols
1112 can't be handled by most assemblers. Don't add CONST_INT
1113 to CONST_INT since overflow won't be computed properly if wider
1114 than HOST_BITS_PER_WIDE_INT. */
1116 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1117 && GET_CODE (op1
) == CONST_INT
)
1118 return plus_constant (op0
, INTVAL (op1
));
1119 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1120 && GET_CODE (op0
) == CONST_INT
)
1121 return plus_constant (op1
, INTVAL (op0
));
1123 /* See if this is something like X * C - X or vice versa or
1124 if the multiplication is written as a shift. If so, we can
1125 distribute and make a new multiply, shift, or maybe just
1126 have X (if C is 2 in the example above). But don't make
1127 real multiply if we didn't have one before. */
1129 if (! FLOAT_MODE_P (mode
))
1131 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1132 rtx lhs
= op0
, rhs
= op1
;
1135 if (GET_CODE (lhs
) == NEG
)
1136 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1137 else if (GET_CODE (lhs
) == MULT
1138 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1140 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1143 else if (GET_CODE (lhs
) == ASHIFT
1144 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1145 && INTVAL (XEXP (lhs
, 1)) >= 0
1146 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1148 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1149 lhs
= XEXP (lhs
, 0);
1152 if (GET_CODE (rhs
) == NEG
)
1153 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1154 else if (GET_CODE (rhs
) == MULT
1155 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1157 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1160 else if (GET_CODE (rhs
) == ASHIFT
1161 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1162 && INTVAL (XEXP (rhs
, 1)) >= 0
1163 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1165 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1166 rhs
= XEXP (rhs
, 0);
1169 if (rtx_equal_p (lhs
, rhs
))
1171 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1172 GEN_INT (coeff0
+ coeff1
));
1173 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1177 /* If one of the operands is a PLUS or a MINUS, see if we can
1178 simplify this by the associative law.
1179 Don't use the associative law for floating point.
1180 The inaccuracy makes it nonassociative,
1181 and subtle programs can break if operations are associated. */
1183 if (INTEGRAL_MODE_P (mode
)
1184 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1185 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1186 || (GET_CODE (op0
) == CONST
1187 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1188 || (GET_CODE (op1
) == CONST
1189 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1190 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1196 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1197 using cc0, in which case we want to leave it as a COMPARE
1198 so we can distinguish it from a register-register-copy.
1200 In IEEE floating point, x-0 is not the same as x. */
1202 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1203 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1204 && trueop1
== CONST0_RTX (mode
))
1208 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1209 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1210 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1211 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1213 rtx xop00
= XEXP (op0
, 0);
1214 rtx xop10
= XEXP (op1
, 0);
1217 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1219 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1220 && GET_MODE (xop00
) == GET_MODE (xop10
)
1221 && REGNO (xop00
) == REGNO (xop10
)
1222 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1223 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1230 /* We can't assume x-x is 0 even with non-IEEE floating point,
1231 but since it is zero except in very strange circumstances, we
1232 will treat it as zero with -funsafe-math-optimizations. */
1233 if (rtx_equal_p (trueop0
, trueop1
)
1234 && ! side_effects_p (op0
)
1235 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1236 return CONST0_RTX (mode
);
1238 /* Change subtraction from zero into negation. (0 - x) is the
1239 same as -x when x is NaN, infinite, or finite and nonzero.
1240 But if the mode has signed zeros, and does not round towards
1241 -infinity, then 0 - 0 is 0, not -0. */
1242 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1243 return gen_rtx_NEG (mode
, op1
);
1245 /* (-1 - a) is ~a. */
1246 if (trueop0
== constm1_rtx
)
1247 return gen_rtx_NOT (mode
, op1
);
1249 /* Subtracting 0 has no effect unless the mode has signed zeros
1250 and supports rounding towards -infinity. In such a case,
1252 if (!(HONOR_SIGNED_ZEROS (mode
)
1253 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1254 && trueop1
== CONST0_RTX (mode
))
1257 /* See if this is something like X * C - X or vice versa or
1258 if the multiplication is written as a shift. If so, we can
1259 distribute and make a new multiply, shift, or maybe just
1260 have X (if C is 2 in the example above). But don't make
1261 real multiply if we didn't have one before. */
1263 if (! FLOAT_MODE_P (mode
))
1265 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1266 rtx lhs
= op0
, rhs
= op1
;
1269 if (GET_CODE (lhs
) == NEG
)
1270 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1271 else if (GET_CODE (lhs
) == MULT
1272 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1274 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1277 else if (GET_CODE (lhs
) == ASHIFT
1278 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1279 && INTVAL (XEXP (lhs
, 1)) >= 0
1280 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1282 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1283 lhs
= XEXP (lhs
, 0);
1286 if (GET_CODE (rhs
) == NEG
)
1287 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1288 else if (GET_CODE (rhs
) == MULT
1289 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1291 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1294 else if (GET_CODE (rhs
) == ASHIFT
1295 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1296 && INTVAL (XEXP (rhs
, 1)) >= 0
1297 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1299 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1300 rhs
= XEXP (rhs
, 0);
1303 if (rtx_equal_p (lhs
, rhs
))
1305 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1306 GEN_INT (coeff0
- coeff1
));
1307 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1311 /* (a - (-b)) -> (a + b). True even for IEEE. */
1312 if (GET_CODE (op1
) == NEG
)
1313 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1315 /* If one of the operands is a PLUS or a MINUS, see if we can
1316 simplify this by the associative law.
1317 Don't use the associative law for floating point.
1318 The inaccuracy makes it nonassociative,
1319 and subtle programs can break if operations are associated. */
1321 if (INTEGRAL_MODE_P (mode
)
1322 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1323 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1324 || (GET_CODE (op0
) == CONST
1325 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1326 || (GET_CODE (op1
) == CONST
1327 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1328 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1331 /* Don't let a relocatable value get a negative coeff. */
1332 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1333 return simplify_gen_binary (PLUS
, mode
,
1335 neg_const_int (mode
, op1
));
1337 /* (x - (x & y)) -> (x & ~y) */
1338 if (GET_CODE (op1
) == AND
)
1340 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1342 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1343 GET_MODE (XEXP (op1
, 1)));
1344 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1346 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1348 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1349 GET_MODE (XEXP (op1
, 0)));
1350 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1356 if (trueop1
== constm1_rtx
)
1358 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
1360 return tem
? tem
: gen_rtx_NEG (mode
, op0
);
1363 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1364 x is NaN, since x * 0 is then also NaN. Nor is it valid
1365 when the mode has signed zeros, since multiplying a negative
1366 number by 0 will give -0, not 0. */
1367 if (!HONOR_NANS (mode
)
1368 && !HONOR_SIGNED_ZEROS (mode
)
1369 && trueop1
== CONST0_RTX (mode
)
1370 && ! side_effects_p (op0
))
1373 /* In IEEE floating point, x*1 is not equivalent to x for
1375 if (!HONOR_SNANS (mode
)
1376 && trueop1
== CONST1_RTX (mode
))
1379 /* Convert multiply by constant power of two into shift unless
1380 we are still generating RTL. This test is a kludge. */
1381 if (GET_CODE (trueop1
) == CONST_INT
1382 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1383 /* If the mode is larger than the host word size, and the
1384 uppermost bit is set, then this isn't a power of two due
1385 to implicit sign extension. */
1386 && (width
<= HOST_BITS_PER_WIDE_INT
1387 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1388 && ! rtx_equal_function_value_matters
)
1389 return gen_rtx_ASHIFT (mode
, op0
, GEN_INT (val
));
1391 /* x*2 is x+x and x*(-1) is -x */
1392 if (GET_CODE (trueop1
) == CONST_DOUBLE
1393 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1394 && GET_MODE (op0
) == mode
)
1397 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1399 if (REAL_VALUES_EQUAL (d
, dconst2
))
1400 return gen_rtx_PLUS (mode
, op0
, copy_rtx (op0
));
1402 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1403 return gen_rtx_NEG (mode
, op0
);
1408 if (trueop1
== const0_rtx
)
1410 if (GET_CODE (trueop1
) == CONST_INT
1411 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1412 == GET_MODE_MASK (mode
)))
1414 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1416 /* A | (~A) -> -1 */
1417 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1418 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1419 && ! side_effects_p (op0
)
1420 && GET_MODE_CLASS (mode
) != MODE_CC
)
1425 if (trueop1
== const0_rtx
)
1427 if (GET_CODE (trueop1
) == CONST_INT
1428 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1429 == GET_MODE_MASK (mode
)))
1430 return gen_rtx_NOT (mode
, op0
);
1431 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1432 && GET_MODE_CLASS (mode
) != MODE_CC
)
1437 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1439 if (GET_CODE (trueop1
) == CONST_INT
1440 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1441 == GET_MODE_MASK (mode
)))
1443 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1444 && GET_MODE_CLASS (mode
) != MODE_CC
)
1447 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1448 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1449 && ! side_effects_p (op0
)
1450 && GET_MODE_CLASS (mode
) != MODE_CC
)
1455 /* Convert divide by power of two into shift (divide by 1 handled
1457 if (GET_CODE (trueop1
) == CONST_INT
1458 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1459 return gen_rtx_LSHIFTRT (mode
, op0
, GEN_INT (arg1
));
1461 /* ... fall through ... */
1464 if (trueop1
== CONST1_RTX (mode
))
1466 /* On some platforms DIV uses narrower mode than its
1468 rtx x
= gen_lowpart_common (mode
, op0
);
1471 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1472 return gen_lowpart_SUBREG (mode
, op0
);
1477 /* Maybe change 0 / x to 0. This transformation isn't safe for
1478 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1479 Nor is it safe for modes with signed zeros, since dividing
1480 0 by a negative number gives -0, not 0. */
1481 if (!HONOR_NANS (mode
)
1482 && !HONOR_SIGNED_ZEROS (mode
)
1483 && trueop0
== CONST0_RTX (mode
)
1484 && ! side_effects_p (op1
))
1487 /* Change division by a constant into multiplication. Only do
1488 this with -funsafe-math-optimizations. */
1489 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1490 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1491 && trueop1
!= CONST0_RTX (mode
)
1492 && flag_unsafe_math_optimizations
)
1495 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1497 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1499 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1500 return gen_rtx_MULT (mode
, op0
,
1501 CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
));
1507 /* Handle modulus by power of two (mod with 1 handled below). */
1508 if (GET_CODE (trueop1
) == CONST_INT
1509 && exact_log2 (INTVAL (trueop1
)) > 0)
1510 return gen_rtx_AND (mode
, op0
, GEN_INT (INTVAL (op1
) - 1));
1512 /* ... fall through ... */
1515 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1516 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1523 /* Rotating ~0 always results in ~0. */
1524 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1525 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1526 && ! side_effects_p (op1
))
1529 /* ... fall through ... */
1533 if (trueop1
== const0_rtx
)
1535 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1540 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1
) == CONST_INT
1541 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1542 && ! side_effects_p (op0
))
1544 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1549 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1
) == CONST_INT
1550 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1551 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1552 && ! side_effects_p (op0
))
1554 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1559 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1561 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1566 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1568 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1576 /* ??? There are simplifications that can be done. */
1580 if (!VECTOR_MODE_P (mode
))
1582 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1584 != GET_MODE_INNER (GET_MODE (trueop0
)))
1585 || GET_CODE (trueop1
) != PARALLEL
1586 || XVECLEN (trueop1
, 0) != 1
1587 || GET_CODE (XVECEXP (trueop1
, 0, 0)) != CONST_INT
)
1590 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1591 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP (trueop1
, 0, 0)));
1595 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1596 || (GET_MODE_INNER (mode
)
1597 != GET_MODE_INNER (GET_MODE (trueop0
)))
1598 || GET_CODE (trueop1
) != PARALLEL
)
1601 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1603 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1604 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1605 rtvec v
= rtvec_alloc (n_elts
);
1608 if (XVECLEN (trueop1
, 0) != (int)n_elts
)
1610 for (i
= 0; i
< n_elts
; i
++)
1612 rtx x
= XVECEXP (trueop1
, 0, i
);
1614 if (GET_CODE (x
) != CONST_INT
)
1616 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, INTVAL (x
));
1619 return gen_rtx_CONST_VECTOR (mode
, v
);
1625 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
1626 ? GET_MODE (trueop0
)
1627 : GET_MODE_INNER (mode
));
1628 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
1629 ? GET_MODE (trueop1
)
1630 : GET_MODE_INNER (mode
));
1632 if (!VECTOR_MODE_P (mode
)
1633 || (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
1634 != GET_MODE_SIZE (mode
)))
1637 if ((VECTOR_MODE_P (op0_mode
)
1638 && (GET_MODE_INNER (mode
)
1639 != GET_MODE_INNER (op0_mode
)))
1640 || (!VECTOR_MODE_P (op0_mode
)
1641 && GET_MODE_INNER (mode
) != op0_mode
))
1644 if ((VECTOR_MODE_P (op1_mode
)
1645 && (GET_MODE_INNER (mode
)
1646 != GET_MODE_INNER (op1_mode
)))
1647 || (!VECTOR_MODE_P (op1_mode
)
1648 && GET_MODE_INNER (mode
) != op1_mode
))
1651 if ((GET_CODE (trueop0
) == CONST_VECTOR
1652 || GET_CODE (trueop0
) == CONST_INT
1653 || GET_CODE (trueop0
) == CONST_DOUBLE
)
1654 && (GET_CODE (trueop1
) == CONST_VECTOR
1655 || GET_CODE (trueop1
) == CONST_INT
1656 || GET_CODE (trueop1
) == CONST_DOUBLE
))
1658 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1659 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1660 rtvec v
= rtvec_alloc (n_elts
);
1662 unsigned in_n_elts
= 1;
1664 if (VECTOR_MODE_P (op0_mode
))
1665 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
1666 for (i
= 0; i
< n_elts
; i
++)
1670 if (!VECTOR_MODE_P (op0_mode
))
1671 RTVEC_ELT (v
, i
) = trueop0
;
1673 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
1677 if (!VECTOR_MODE_P (op1_mode
))
1678 RTVEC_ELT (v
, i
) = trueop1
;
1680 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
1685 return gen_rtx_CONST_VECTOR (mode
, v
);
1697 /* Get the integer argument values in two forms:
1698 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1700 arg0
= INTVAL (trueop0
);
1701 arg1
= INTVAL (trueop1
);
1703 if (width
< HOST_BITS_PER_WIDE_INT
)
1705 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1706 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1709 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1710 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1713 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1714 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1722 /* Compute the value of the arithmetic. */
1727 val
= arg0s
+ arg1s
;
1731 val
= arg0s
- arg1s
;
1735 val
= arg0s
* arg1s
;
1740 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1743 val
= arg0s
/ arg1s
;
1748 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1751 val
= arg0s
% arg1s
;
1756 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1759 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1764 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1767 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1783 /* If shift count is undefined, don't fold it; let the machine do
1784 what it wants. But truncate it if the machine will do that. */
1788 #ifdef SHIFT_COUNT_TRUNCATED
1789 if (SHIFT_COUNT_TRUNCATED
)
1793 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1800 #ifdef SHIFT_COUNT_TRUNCATED
1801 if (SHIFT_COUNT_TRUNCATED
)
1805 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1812 #ifdef SHIFT_COUNT_TRUNCATED
1813 if (SHIFT_COUNT_TRUNCATED
)
1817 val
= arg0s
>> arg1
;
1819 /* Bootstrap compiler may not have sign extended the right shift.
1820 Manually extend the sign to insure bootstrap cc matches gcc. */
1821 if (arg0s
< 0 && arg1
> 0)
1822 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1831 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1832 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1840 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1841 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1845 /* Do nothing here. */
1849 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1853 val
= ((unsigned HOST_WIDE_INT
) arg0
1854 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1858 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1862 val
= ((unsigned HOST_WIDE_INT
) arg0
1863 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1870 val
= trunc_int_for_mode (val
, mode
);
1872 return GEN_INT (val
);
1875 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1878 Rather than test for specific case, we do this by a brute-force method
1879 and do all possible simplifications until no more changes occur. Then
1880 we rebuild the operation.
1882 If FORCE is true, then always generate the rtx. This is used to
1883 canonicalize stuff emitted from simplify_gen_binary. Note that this
1884 can still fail if the rtx is too complex. It won't fail just because
1885 the result is not 'simpler' than the input, however. */
1887 struct simplify_plus_minus_op_data
1894 simplify_plus_minus_op_data_cmp (p1
, p2
)
1898 const struct simplify_plus_minus_op_data
*d1
= p1
;
1899 const struct simplify_plus_minus_op_data
*d2
= p2
;
1901 return (commutative_operand_precedence (d2
->op
)
1902 - commutative_operand_precedence (d1
->op
));
1906 simplify_plus_minus (code
, mode
, op0
, op1
, force
)
1908 enum machine_mode mode
;
1912 struct simplify_plus_minus_op_data ops
[8];
1914 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
1915 int first
, negate
, changed
;
1918 memset ((char *) ops
, 0, sizeof ops
);
1920 /* Set up the two operands and then expand them until nothing has been
1921 changed. If we run out of room in our array, give up; this should
1922 almost never happen. */
1927 ops
[1].neg
= (code
== MINUS
);
1933 for (i
= 0; i
< n_ops
; i
++)
1935 rtx this_op
= ops
[i
].op
;
1936 int this_neg
= ops
[i
].neg
;
1937 enum rtx_code this_code
= GET_CODE (this_op
);
1946 ops
[n_ops
].op
= XEXP (this_op
, 1);
1947 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
1950 ops
[i
].op
= XEXP (this_op
, 0);
1956 ops
[i
].op
= XEXP (this_op
, 0);
1957 ops
[i
].neg
= ! this_neg
;
1963 && GET_CODE (XEXP (this_op
, 0)) == PLUS
1964 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
1965 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
1967 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
1968 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
1969 ops
[n_ops
].neg
= this_neg
;
1977 /* ~a -> (-a - 1) */
1980 ops
[n_ops
].op
= constm1_rtx
;
1981 ops
[n_ops
++].neg
= this_neg
;
1982 ops
[i
].op
= XEXP (this_op
, 0);
1983 ops
[i
].neg
= !this_neg
;
1991 ops
[i
].op
= neg_const_int (mode
, this_op
);
2004 /* If we only have two operands, we can't do anything. */
2005 if (n_ops
<= 2 && !force
)
2008 /* Count the number of CONSTs we didn't split above. */
2009 for (i
= 0; i
< n_ops
; i
++)
2010 if (GET_CODE (ops
[i
].op
) == CONST
)
2013 /* Now simplify each pair of operands until nothing changes. The first
2014 time through just simplify constants against each other. */
2021 for (i
= 0; i
< n_ops
- 1; i
++)
2022 for (j
= i
+ 1; j
< n_ops
; j
++)
2024 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2025 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2027 if (lhs
!= 0 && rhs
!= 0
2028 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2030 enum rtx_code ncode
= PLUS
;
2036 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2038 else if (swap_commutative_operands_p (lhs
, rhs
))
2039 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2041 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2043 /* Reject "simplifications" that just wrap the two
2044 arguments in a CONST. Failure to do so can result
2045 in infinite recursion with simplify_binary_operation
2046 when it calls us to simplify CONST operations. */
2048 && ! (GET_CODE (tem
) == CONST
2049 && GET_CODE (XEXP (tem
, 0)) == ncode
2050 && XEXP (XEXP (tem
, 0), 0) == lhs
2051 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2052 /* Don't allow -x + -1 -> ~x simplifications in the
2053 first pass. This allows us the chance to combine
2054 the -1 with other constants. */
2056 && GET_CODE (tem
) == NOT
2057 && XEXP (tem
, 0) == rhs
))
2060 if (GET_CODE (tem
) == NEG
)
2061 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2062 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2063 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2067 ops
[j
].op
= NULL_RTX
;
2077 /* Pack all the operands to the lower-numbered entries. */
2078 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2083 /* Sort the operations based on swap_commutative_operands_p. */
2084 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2086 /* We suppressed creation of trivial CONST expressions in the
2087 combination loop to avoid recursion. Create one manually now.
2088 The combination loop should have ensured that there is exactly
2089 one CONST_INT, and the sort will have ensured that it is last
2090 in the array and that any other constant will be next-to-last. */
2093 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2094 && CONSTANT_P (ops
[n_ops
- 2].op
))
2096 rtx value
= ops
[n_ops
- 1].op
;
2097 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2098 value
= neg_const_int (mode
, value
);
2099 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2103 /* Count the number of CONSTs that we generated. */
2105 for (i
= 0; i
< n_ops
; i
++)
2106 if (GET_CODE (ops
[i
].op
) == CONST
)
2109 /* Give up if we didn't reduce the number of operands we had. Make
2110 sure we count a CONST as two operands. If we have the same
2111 number of operands, but have made more CONSTs than before, this
2112 is also an improvement, so accept it. */
2114 && (n_ops
+ n_consts
> input_ops
2115 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2118 /* Put a non-negated operand first. If there aren't any, make all
2119 operands positive and negate the whole thing later. */
2122 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2126 for (i
= 0; i
< n_ops
; i
++)
2138 /* Now make the result by performing the requested operations. */
2140 for (i
= 1; i
< n_ops
; i
++)
2141 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2142 mode
, result
, ops
[i
].op
);
2144 return negate
? gen_rtx_NEG (mode
, result
) : result
;
2147 /* Like simplify_binary_operation except used for relational operators.
2148 MODE is the mode of the operands, not that of the result. If MODE
2149 is VOIDmode, both operands must also be VOIDmode and we compare the
2150 operands in "infinite precision".
2152 If no simplification is possible, this function returns zero. Otherwise,
2153 it returns either const_true_rtx or const0_rtx. */
2156 simplify_relational_operation (code
, mode
, op0
, op1
)
2158 enum machine_mode mode
;
2161 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2166 if (mode
== VOIDmode
2167 && (GET_MODE (op0
) != VOIDmode
2168 || GET_MODE (op1
) != VOIDmode
))
2171 /* If op0 is a compare, extract the comparison arguments from it. */
2172 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2173 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2175 trueop0
= avoid_constant_pool_reference (op0
);
2176 trueop1
= avoid_constant_pool_reference (op1
);
2178 /* We can't simplify MODE_CC values since we don't know what the
2179 actual comparison is. */
2180 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2183 /* Make sure the constant is second. */
2184 if (swap_commutative_operands_p (trueop0
, trueop1
))
2186 tem
= op0
, op0
= op1
, op1
= tem
;
2187 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
2188 code
= swap_condition (code
);
2191 /* For integer comparisons of A and B maybe we can simplify A - B and can
2192 then simplify a comparison of that with zero. If A and B are both either
2193 a register or a CONST_INT, this can't help; testing for these cases will
2194 prevent infinite recursion here and speed things up.
2196 If CODE is an unsigned comparison, then we can never do this optimization,
2197 because it gives an incorrect result if the subtraction wraps around zero.
2198 ANSI C defines unsigned operations such that they never overflow, and
2199 thus such cases can not be ignored. */
2201 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2202 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
2203 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
2204 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2205 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2206 return simplify_relational_operation (signed_condition (code
),
2207 mode
, tem
, const0_rtx
);
2209 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2210 return const_true_rtx
;
2212 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2215 /* For modes without NaNs, if the two operands are equal, we know the
2217 if (!HONOR_NANS (GET_MODE (trueop0
)) && rtx_equal_p (trueop0
, trueop1
))
2218 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2220 /* If the operands are floating-point constants, see if we can fold
2222 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2223 && GET_CODE (trueop1
) == CONST_DOUBLE
2224 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2226 REAL_VALUE_TYPE d0
, d1
;
2228 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2229 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2231 /* Comparisons are unordered iff at least one of the values is NaN. */
2232 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2242 return const_true_rtx
;
2255 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2256 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2257 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2260 /* Otherwise, see if the operands are both integers. */
2261 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2262 && (GET_CODE (trueop0
) == CONST_DOUBLE
2263 || GET_CODE (trueop0
) == CONST_INT
)
2264 && (GET_CODE (trueop1
) == CONST_DOUBLE
2265 || GET_CODE (trueop1
) == CONST_INT
))
2267 int width
= GET_MODE_BITSIZE (mode
);
2268 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2269 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2271 /* Get the two words comprising each integer constant. */
2272 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2274 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2275 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2279 l0u
= l0s
= INTVAL (trueop0
);
2280 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2283 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2285 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2286 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2290 l1u
= l1s
= INTVAL (trueop1
);
2291 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2294 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2295 we have to sign or zero-extend the values. */
2296 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2298 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2299 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2301 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2302 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2304 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2305 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2307 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2308 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2310 equal
= (h0u
== h1u
&& l0u
== l1u
);
2311 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2312 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2313 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2314 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2317 /* Otherwise, there are some code-specific tests we can make. */
2323 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2328 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2329 return const_true_rtx
;
2333 /* Unsigned values are never negative. */
2334 if (trueop1
== const0_rtx
)
2335 return const_true_rtx
;
2339 if (trueop1
== const0_rtx
)
2344 /* Unsigned values are never greater than the largest
2346 if (GET_CODE (trueop1
) == CONST_INT
2347 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2348 && INTEGRAL_MODE_P (mode
))
2349 return const_true_rtx
;
2353 if (GET_CODE (trueop1
) == CONST_INT
2354 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2355 && INTEGRAL_MODE_P (mode
))
2360 /* Optimize abs(x) < 0.0. */
2361 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
2363 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2365 if (GET_CODE (tem
) == ABS
)
2371 /* Optimize abs(x) >= 0.0. */
2372 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
2374 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2376 if (GET_CODE (tem
) == ABS
)
2388 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2394 return equal
? const_true_rtx
: const0_rtx
;
2397 return ! equal
? const_true_rtx
: const0_rtx
;
2400 return op0lt
? const_true_rtx
: const0_rtx
;
2403 return op1lt
? const_true_rtx
: const0_rtx
;
2405 return op0ltu
? const_true_rtx
: const0_rtx
;
2407 return op1ltu
? const_true_rtx
: const0_rtx
;
2410 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2413 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2415 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2417 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2419 return const_true_rtx
;
2427 /* Simplify CODE, an operation with result mode MODE and three operands,
2428 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2429 a constant. Return 0 if no simplifications is possible. */
2432 simplify_ternary_operation (code
, mode
, op0_mode
, op0
, op1
, op2
)
2434 enum machine_mode mode
, op0_mode
;
2437 unsigned int width
= GET_MODE_BITSIZE (mode
);
2439 /* VOIDmode means "infinite" precision. */
2441 width
= HOST_BITS_PER_WIDE_INT
;
2447 if (GET_CODE (op0
) == CONST_INT
2448 && GET_CODE (op1
) == CONST_INT
2449 && GET_CODE (op2
) == CONST_INT
2450 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2451 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2453 /* Extracting a bit-field from a constant */
2454 HOST_WIDE_INT val
= INTVAL (op0
);
2456 if (BITS_BIG_ENDIAN
)
2457 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2458 - INTVAL (op2
) - INTVAL (op1
));
2460 val
>>= INTVAL (op2
);
2462 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2464 /* First zero-extend. */
2465 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2466 /* If desired, propagate sign bit. */
2467 if (code
== SIGN_EXTRACT
2468 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2469 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2472 /* Clear the bits that don't belong in our mode,
2473 unless they and our sign bit are all one.
2474 So we get either a reasonable negative value or a reasonable
2475 unsigned value for this mode. */
2476 if (width
< HOST_BITS_PER_WIDE_INT
2477 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2478 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2479 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2481 return GEN_INT (val
);
2486 if (GET_CODE (op0
) == CONST_INT
)
2487 return op0
!= const0_rtx
? op1
: op2
;
2489 /* Convert a == b ? b : a to "a". */
2490 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
2491 && !HONOR_NANS (mode
)
2492 && rtx_equal_p (XEXP (op0
, 0), op1
)
2493 && rtx_equal_p (XEXP (op0
, 1), op2
))
2495 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
2496 && !HONOR_NANS (mode
)
2497 && rtx_equal_p (XEXP (op0
, 1), op1
)
2498 && rtx_equal_p (XEXP (op0
, 0), op2
))
2500 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2502 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2503 ? GET_MODE (XEXP (op0
, 1))
2504 : GET_MODE (XEXP (op0
, 0)));
2506 if (cmp_mode
== VOIDmode
)
2507 cmp_mode
= op0_mode
;
2508 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2509 XEXP (op0
, 0), XEXP (op0
, 1));
2511 /* See if any simplifications were possible. */
2512 if (temp
== const0_rtx
)
2514 else if (temp
== const1_rtx
)
2519 /* Look for happy constants in op1 and op2. */
2520 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2522 HOST_WIDE_INT t
= INTVAL (op1
);
2523 HOST_WIDE_INT f
= INTVAL (op2
);
2525 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2526 code
= GET_CODE (op0
);
2527 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2530 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2538 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2543 if (GET_MODE (op0
) != mode
2544 || GET_MODE (op1
) != mode
2545 || !VECTOR_MODE_P (mode
))
2547 op2
= avoid_constant_pool_reference (op2
);
2548 if (GET_CODE (op2
) == CONST_INT
)
2550 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2551 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2552 int mask
= (1<<n_elts
) - 1;
2554 if (!(INTVAL (op2
) & mask
))
2556 if ((INTVAL (op2
) & mask
) == mask
)
2559 op0
= avoid_constant_pool_reference (op0
);
2560 op1
= avoid_constant_pool_reference (op1
);
2561 if (GET_CODE (op0
) == CONST_VECTOR
2562 && GET_CODE (op1
) == CONST_VECTOR
)
2564 rtvec v
= rtvec_alloc (n_elts
);
2567 for (i
= 0; i
< n_elts
; i
++)
2568 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
2569 ? CONST_VECTOR_ELT (op0
, i
)
2570 : CONST_VECTOR_ELT (op1
, i
));
2571 return gen_rtx_CONST_VECTOR (mode
, v
);
2583 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2584 Return 0 if no simplifications is possible. */
2586 simplify_subreg (outermode
, op
, innermode
, byte
)
2589 enum machine_mode outermode
, innermode
;
2591 /* Little bit of sanity checking. */
2592 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2593 || innermode
== BLKmode
|| outermode
== BLKmode
)
2596 if (GET_MODE (op
) != innermode
2597 && GET_MODE (op
) != VOIDmode
)
2600 if (byte
% GET_MODE_SIZE (outermode
)
2601 || byte
>= GET_MODE_SIZE (innermode
))
2604 if (outermode
== innermode
&& !byte
)
2607 /* Simplify subregs of vector constants. */
2608 if (GET_CODE (op
) == CONST_VECTOR
)
2610 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (innermode
));
2611 const unsigned int offset
= byte
/ elt_size
;
2614 if (GET_MODE_INNER (innermode
) == outermode
)
2616 elt
= CONST_VECTOR_ELT (op
, offset
);
2618 /* ?? We probably don't need this copy_rtx because constants
2619 can be shared. ?? */
2621 return copy_rtx (elt
);
2623 else if (GET_MODE_INNER (innermode
) == GET_MODE_INNER (outermode
)
2624 && GET_MODE_SIZE (innermode
) > GET_MODE_SIZE (outermode
))
2626 return (gen_rtx_CONST_VECTOR
2628 gen_rtvec_v (GET_MODE_NUNITS (outermode
),
2629 &CONST_VECTOR_ELT (op
, offset
))));
2631 else if (GET_MODE_CLASS (outermode
) == MODE_INT
2632 && (GET_MODE_SIZE (outermode
) % elt_size
== 0))
2634 /* This happens when the target register size is smaller then
2635 the vector mode, and we synthesize operations with vectors
2636 of elements that are smaller than the register size. */
2637 HOST_WIDE_INT sum
= 0, high
= 0;
2638 unsigned n_elts
= (GET_MODE_SIZE (outermode
) / elt_size
);
2639 unsigned i
= BYTES_BIG_ENDIAN
? offset
: offset
+ n_elts
- 1;
2640 unsigned step
= BYTES_BIG_ENDIAN
? 1 : -1;
2641 int shift
= BITS_PER_UNIT
* elt_size
;
2643 for (; n_elts
--; i
+= step
)
2645 elt
= CONST_VECTOR_ELT (op
, i
);
2646 if (GET_CODE (elt
) == CONST_DOUBLE
2647 && GET_MODE_CLASS (GET_MODE (elt
)) == MODE_FLOAT
)
2649 elt
= gen_lowpart_common (int_mode_for_mode (GET_MODE (elt
)),
2654 if (GET_CODE (elt
) != CONST_INT
)
2656 /* Avoid overflow. */
2657 if (high
>> (HOST_BITS_PER_WIDE_INT
- shift
))
2659 high
= high
<< shift
| sum
>> (HOST_BITS_PER_WIDE_INT
- shift
);
2660 sum
= (sum
<< shift
) + INTVAL (elt
);
2662 if (GET_MODE_BITSIZE (outermode
) <= HOST_BITS_PER_WIDE_INT
)
2663 return GEN_INT (trunc_int_for_mode (sum
, outermode
));
2664 else if (GET_MODE_BITSIZE (outermode
) == 2* HOST_BITS_PER_WIDE_INT
)
2665 return immed_double_const (sum
, high
, outermode
);
2669 else if (GET_MODE_CLASS (outermode
) == MODE_INT
2670 && (elt_size
% GET_MODE_SIZE (outermode
) == 0))
2672 enum machine_mode new_mode
2673 = int_mode_for_mode (GET_MODE_INNER (innermode
));
2674 int subbyte
= byte
% elt_size
;
2676 op
= simplify_subreg (new_mode
, op
, innermode
, byte
- subbyte
);
2679 return simplify_subreg (outermode
, op
, new_mode
, subbyte
);
2681 else if (GET_MODE_CLASS (outermode
) == MODE_INT
)
2682 /* This shouldn't happen, but let's not do anything stupid. */
2686 /* Attempt to simplify constant to non-SUBREG expression. */
2687 if (CONSTANT_P (op
))
2690 unsigned HOST_WIDE_INT val
= 0;
2692 if (GET_MODE_CLASS (outermode
) == MODE_VECTOR_INT
2693 || GET_MODE_CLASS (outermode
) == MODE_VECTOR_FLOAT
)
2695 /* Construct a CONST_VECTOR from individual subregs. */
2696 enum machine_mode submode
= GET_MODE_INNER (outermode
);
2697 int subsize
= GET_MODE_UNIT_SIZE (outermode
);
2698 int i
, elts
= GET_MODE_NUNITS (outermode
);
2699 rtvec v
= rtvec_alloc (elts
);
2702 for (i
= 0; i
< elts
; i
++, byte
+= subsize
)
2704 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2705 /* ??? It would be nice if we could actually make such subregs
2706 on targets that allow such relocations. */
2707 if (byte
>= GET_MODE_UNIT_SIZE (innermode
))
2708 elt
= CONST0_RTX (submode
);
2710 elt
= simplify_subreg (submode
, op
, innermode
, byte
);
2713 RTVEC_ELT (v
, i
) = elt
;
2715 return gen_rtx_CONST_VECTOR (outermode
, v
);
2718 /* ??? This code is partly redundant with code below, but can handle
2719 the subregs of floats and similar corner cases.
2720 Later it we should move all simplification code here and rewrite
2721 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2722 using SIMPLIFY_SUBREG. */
2723 if (subreg_lowpart_offset (outermode
, innermode
) == byte
2724 && GET_CODE (op
) != CONST_VECTOR
)
2726 rtx
new = gen_lowpart_if_possible (outermode
, op
);
2731 /* Similar comment as above apply here. */
2732 if (GET_MODE_SIZE (outermode
) == UNITS_PER_WORD
2733 && GET_MODE_SIZE (innermode
) > UNITS_PER_WORD
2734 && GET_MODE_CLASS (outermode
) == MODE_INT
)
2736 rtx
new = constant_subword (op
,
2737 (byte
/ UNITS_PER_WORD
),
2743 if (GET_MODE_CLASS (outermode
) != MODE_INT
2744 && GET_MODE_CLASS (outermode
) != MODE_CC
)
2746 enum machine_mode new_mode
= int_mode_for_mode (outermode
);
2748 if (new_mode
!= innermode
|| byte
!= 0)
2750 op
= simplify_subreg (new_mode
, op
, innermode
, byte
);
2753 return simplify_subreg (outermode
, op
, new_mode
, 0);
2757 offset
= byte
* BITS_PER_UNIT
;
2758 switch (GET_CODE (op
))
2761 if (GET_MODE (op
) != VOIDmode
)
2764 /* We can't handle this case yet. */
2765 if (GET_MODE_BITSIZE (outermode
) >= HOST_BITS_PER_WIDE_INT
)
2768 part
= offset
>= HOST_BITS_PER_WIDE_INT
;
2769 if ((BITS_PER_WORD
> HOST_BITS_PER_WIDE_INT
2770 && BYTES_BIG_ENDIAN
)
2771 || (BITS_PER_WORD
<= HOST_BITS_PER_WIDE_INT
2772 && WORDS_BIG_ENDIAN
))
2774 val
= part
? CONST_DOUBLE_HIGH (op
) : CONST_DOUBLE_LOW (op
);
2775 offset
%= HOST_BITS_PER_WIDE_INT
;
2777 /* We've already picked the word we want from a double, so
2778 pretend this is actually an integer. */
2779 innermode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
2783 if (GET_CODE (op
) == CONST_INT
)
2786 /* We don't handle synthesizing of non-integral constants yet. */
2787 if (GET_MODE_CLASS (outermode
) != MODE_INT
)
2790 if (BYTES_BIG_ENDIAN
|| WORDS_BIG_ENDIAN
)
2792 if (WORDS_BIG_ENDIAN
)
2793 offset
= (GET_MODE_BITSIZE (innermode
)
2794 - GET_MODE_BITSIZE (outermode
) - offset
);
2795 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
2796 && GET_MODE_SIZE (outermode
) < UNITS_PER_WORD
)
2797 offset
= (offset
+ BITS_PER_WORD
- GET_MODE_BITSIZE (outermode
)
2798 - 2 * (offset
% BITS_PER_WORD
));
2801 if (offset
>= HOST_BITS_PER_WIDE_INT
)
2802 return ((HOST_WIDE_INT
) val
< 0) ? constm1_rtx
: const0_rtx
;
2806 if (GET_MODE_BITSIZE (outermode
) < HOST_BITS_PER_WIDE_INT
)
2807 val
= trunc_int_for_mode (val
, outermode
);
2808 return GEN_INT (val
);
2815 /* Changing mode twice with SUBREG => just change it once,
2816 or not at all if changing back op starting mode. */
2817 if (GET_CODE (op
) == SUBREG
)
2819 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
2820 int final_offset
= byte
+ SUBREG_BYTE (op
);
2823 if (outermode
== innermostmode
2824 && byte
== 0 && SUBREG_BYTE (op
) == 0)
2825 return SUBREG_REG (op
);
2827 /* The SUBREG_BYTE represents offset, as if the value were stored
2828 in memory. Irritating exception is paradoxical subreg, where
2829 we define SUBREG_BYTE to be 0. On big endian machines, this
2830 value should be negative. For a moment, undo this exception. */
2831 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
2833 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
2834 if (WORDS_BIG_ENDIAN
)
2835 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2836 if (BYTES_BIG_ENDIAN
)
2837 final_offset
+= difference
% UNITS_PER_WORD
;
2839 if (SUBREG_BYTE (op
) == 0
2840 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
2842 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
2843 if (WORDS_BIG_ENDIAN
)
2844 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2845 if (BYTES_BIG_ENDIAN
)
2846 final_offset
+= difference
% UNITS_PER_WORD
;
2849 /* See whether resulting subreg will be paradoxical. */
2850 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
2852 /* In nonparadoxical subregs we can't handle negative offsets. */
2853 if (final_offset
< 0)
2855 /* Bail out in case resulting subreg would be incorrect. */
2856 if (final_offset
% GET_MODE_SIZE (outermode
)
2857 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
2863 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
2865 /* In paradoxical subreg, see if we are still looking on lower part.
2866 If so, our SUBREG_BYTE will be 0. */
2867 if (WORDS_BIG_ENDIAN
)
2868 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2869 if (BYTES_BIG_ENDIAN
)
2870 offset
+= difference
% UNITS_PER_WORD
;
2871 if (offset
== final_offset
)
2877 /* Recurse for futher possible simplifications. */
2878 new = simplify_subreg (outermode
, SUBREG_REG (op
),
2879 GET_MODE (SUBREG_REG (op
)),
2883 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
2886 /* SUBREG of a hard register => just change the register number
2887 and/or mode. If the hard register is not valid in that mode,
2888 suppress this simplification. If the hard register is the stack,
2889 frame, or argument pointer, leave this as a SUBREG. */
2892 && (! REG_FUNCTION_VALUE_P (op
)
2893 || ! rtx_equal_function_value_matters
)
2894 && REGNO (op
) < FIRST_PSEUDO_REGISTER
2895 #ifdef CANNOT_CHANGE_MODE_CLASS
2896 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
2897 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
2898 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
2900 && ((reload_completed
&& !frame_pointer_needed
)
2901 || (REGNO (op
) != FRAME_POINTER_REGNUM
2902 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2903 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
2906 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2907 && REGNO (op
) != ARG_POINTER_REGNUM
2909 && REGNO (op
) != STACK_POINTER_REGNUM
)
2911 int final_regno
= subreg_hard_regno (gen_rtx_SUBREG (outermode
, op
, byte
),
2914 /* ??? We do allow it if the current REG is not valid for
2915 its mode. This is a kludge to work around how float/complex
2916 arguments are passed on 32-bit SPARC and should be fixed. */
2917 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
2918 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
2920 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
2922 /* Propagate original regno. We don't have any way to specify
2923 the offset inside original regno, so do so only for lowpart.
2924 The information is used only by alias analysis that can not
2925 grog partial register anyway. */
2927 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
2928 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
2933 /* If we have a SUBREG of a register that we are replacing and we are
2934 replacing it with a MEM, make a new MEM and try replacing the
2935 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2936 or if we would be widening it. */
2938 if (GET_CODE (op
) == MEM
2939 && ! mode_dependent_address_p (XEXP (op
, 0))
2940 /* Allow splitting of volatile memory references in case we don't
2941 have instruction to move the whole thing. */
2942 && (! MEM_VOLATILE_P (op
)
2943 || ! have_insn_for (SET
, innermode
))
2944 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
2945 return adjust_address_nv (op
, outermode
, byte
);
2947 /* Handle complex values represented as CONCAT
2948 of real and imaginary part. */
2949 if (GET_CODE (op
) == CONCAT
)
2951 int is_realpart
= byte
< GET_MODE_UNIT_SIZE (innermode
);
2952 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
2953 unsigned int final_offset
;
2956 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
2957 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
2960 /* We can at least simplify it by referring directly to the relevant part. */
2961 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
2966 /* Make a SUBREG operation or equivalent if it folds. */
2969 simplify_gen_subreg (outermode
, op
, innermode
, byte
)
2972 enum machine_mode outermode
, innermode
;
2975 /* Little bit of sanity checking. */
2976 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2977 || innermode
== BLKmode
|| outermode
== BLKmode
)
2980 if (GET_MODE (op
) != innermode
2981 && GET_MODE (op
) != VOIDmode
)
2984 if (byte
% GET_MODE_SIZE (outermode
)
2985 || byte
>= GET_MODE_SIZE (innermode
))
2988 if (GET_CODE (op
) == QUEUED
)
2991 new = simplify_subreg (outermode
, op
, innermode
, byte
);
2995 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
2998 return gen_rtx_SUBREG (outermode
, op
, byte
);
3000 /* Simplify X, an rtx expression.
3002 Return the simplified expression or NULL if no simplifications
3005 This is the preferred entry point into the simplification routines;
3006 however, we still allow passes to call the more specific routines.
3008 Right now GCC has three (yes, three) major bodies of RTL simplification
3009 code that need to be unified.
3011 1. fold_rtx in cse.c. This code uses various CSE specific
3012 information to aid in RTL simplification.
3014 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3015 it uses combine specific information to aid in RTL
3018 3. The routines in this file.
3021 Long term we want to only have one body of simplification code; to
3022 get to that state I recommend the following steps:
3024 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3025 which are not pass dependent state into these routines.
3027 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3028 use this routine whenever possible.
3030 3. Allow for pass dependent state to be provided to these
3031 routines and add simplifications based on the pass dependent
3032 state. Remove code from cse.c & combine.c that becomes
3035 It will take time, but ultimately the compiler will be easier to
3036 maintain and improve. It's totally silly that when we add a
3037 simplification that it needs to be added to 4 places (3 for RTL
3038 simplification and 1 for tree simplification. */
3044 enum rtx_code code
= GET_CODE (x
);
3045 enum machine_mode mode
= GET_MODE (x
);
3047 switch (GET_RTX_CLASS (code
))
3050 return simplify_unary_operation (code
, mode
,
3051 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3053 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3058 XEXP (x
, 0) = XEXP (x
, 1);
3060 return simplify_binary_operation (code
, mode
,
3061 XEXP (x
, 0), XEXP (x
, 1));
3065 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3069 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3070 XEXP (x
, 0), XEXP (x
, 1),
3074 return simplify_relational_operation (code
,
3075 ((GET_MODE (XEXP (x
, 0))
3077 ? GET_MODE (XEXP (x
, 0))
3078 : GET_MODE (XEXP (x
, 1))),
3079 XEXP (x
, 0), XEXP (x
, 1));
3082 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
3083 GET_MODE (SUBREG_REG (x
)),
3085 if (code
== CONSTANT_P_RTX
)
3087 if (CONSTANT_P (XEXP (x
,0)))