1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static bool mode_signbit_p (enum machine_mode
, rtx
);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
57 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
59 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
61 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
62 enum machine_mode
, rtx
, rtx
);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
67 neg_const_int (enum machine_mode mode
, rtx i
)
69 return gen_int_mode (- INTVAL (i
), mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (enum machine_mode mode
, rtx x
)
78 unsigned HOST_WIDE_INT val
;
81 if (GET_MODE_CLASS (mode
) != MODE_INT
)
84 width
= GET_MODE_BITSIZE (mode
);
88 if (width
<= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x
) == CONST_INT
)
91 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x
) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x
) == 0)
95 val
= CONST_DOUBLE_HIGH (x
);
96 width
-= HOST_BITS_PER_WIDE_INT
;
101 if (width
< HOST_BITS_PER_WIDE_INT
)
102 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
103 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
110 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0
, op1
))
118 tem
= op0
, op0
= op1
, op1
= tem
;
120 /* If this simplifies, do it. */
121 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
125 /* Handle addition and subtraction specially. Otherwise, just form
128 if (code
== PLUS
|| code
== MINUS
)
130 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
135 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
141 avoid_constant_pool_reference (rtx x
)
144 enum machine_mode cmode
;
146 switch (GET_CODE (x
))
152 /* Handle float extensions of constant pool references. */
154 c
= avoid_constant_pool_reference (tmp
);
155 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
159 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr
= targetm
.delegitimize_address (addr
);
173 if (GET_CODE (addr
) == LO_SUM
)
174 addr
= XEXP (addr
, 1);
176 if (GET_CODE (addr
) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr
))
180 c
= get_pool_constant (addr
);
181 cmode
= get_pool_mode (addr
);
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode
!= GET_MODE (x
))
188 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
199 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
200 enum machine_mode op_mode
)
204 /* If this simplifies, use it. */
205 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
208 return gen_rtx_fmt_e (code
, mode
, op
);
211 /* Likewise for ternary operations. */
214 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
215 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
219 /* If this simplifies, use it. */
220 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
224 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
231 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
232 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
236 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
240 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
243 /* Replace all occurrences of OLD in X with NEW and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
247 simplify_replace_rtx (rtx x
, rtx old
, rtx
new)
249 enum rtx_code code
= GET_CODE (x
);
250 enum machine_mode mode
= GET_MODE (x
);
251 enum machine_mode op_mode
;
254 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
261 switch (GET_RTX_CLASS (code
))
265 op_mode
= GET_MODE (op0
);
266 op0
= simplify_replace_rtx (op0
, old
, new);
267 if (op0
== XEXP (x
, 0))
269 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
273 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
274 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
275 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
277 return simplify_gen_binary (code
, mode
, op0
, op1
);
280 case RTX_COMM_COMPARE
:
283 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
284 op0
= simplify_replace_rtx (op0
, old
, new);
285 op1
= simplify_replace_rtx (op1
, old
, new);
286 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
288 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
291 case RTX_BITFIELD_OPS
:
293 op_mode
= GET_MODE (op0
);
294 op0
= simplify_replace_rtx (op0
, old
, new);
295 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
296 op2
= simplify_replace_rtx (XEXP (x
, 2), old
, new);
297 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
299 if (op_mode
== VOIDmode
)
300 op_mode
= GET_MODE (op0
);
301 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
304 /* The only case we try to handle is a SUBREG. */
307 op0
= simplify_replace_rtx (SUBREG_REG (x
), old
, new);
308 if (op0
== SUBREG_REG (x
))
310 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
311 GET_MODE (SUBREG_REG (x
)),
313 return op0
? op0
: x
;
320 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
321 if (op0
== XEXP (x
, 0))
323 return replace_equiv_address_nv (x
, op0
);
325 else if (code
== LO_SUM
)
327 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
328 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
334 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
336 return gen_rtx_LO_SUM (mode
, op0
, op1
);
338 else if (code
== REG
)
340 if (REG_P (old
) && REGNO (x
) == REGNO (old
))
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
355 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
356 rtx op
, enum machine_mode op_mode
)
358 unsigned int width
= GET_MODE_BITSIZE (mode
);
359 rtx trueop
= avoid_constant_pool_reference (op
);
361 if (code
== VEC_DUPLICATE
)
363 if (!VECTOR_MODE_P (mode
))
365 if (GET_MODE (trueop
) != VOIDmode
366 && !VECTOR_MODE_P (GET_MODE (trueop
))
367 && GET_MODE_INNER (mode
) != GET_MODE (trueop
))
369 if (GET_MODE (trueop
) != VOIDmode
370 && VECTOR_MODE_P (GET_MODE (trueop
))
371 && GET_MODE_INNER (mode
) != GET_MODE_INNER (GET_MODE (trueop
)))
373 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
374 || GET_CODE (trueop
) == CONST_VECTOR
)
376 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
377 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
378 rtvec v
= rtvec_alloc (n_elts
);
381 if (GET_CODE (trueop
) != CONST_VECTOR
)
382 for (i
= 0; i
< n_elts
; i
++)
383 RTVEC_ELT (v
, i
) = trueop
;
386 enum machine_mode inmode
= GET_MODE (trueop
);
387 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
388 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
390 if (in_n_elts
>= n_elts
|| n_elts
% in_n_elts
)
392 for (i
= 0; i
< n_elts
; i
++)
393 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
395 return gen_rtx_CONST_VECTOR (mode
, v
);
398 else if (GET_CODE (op
) == CONST
)
399 return simplify_unary_operation (code
, mode
, XEXP (op
, 0), op_mode
);
401 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
403 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
404 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
405 enum machine_mode opmode
= GET_MODE (trueop
);
406 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
407 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
408 rtvec v
= rtvec_alloc (n_elts
);
411 if (op_n_elts
!= n_elts
)
414 for (i
= 0; i
< n_elts
; i
++)
416 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
417 CONST_VECTOR_ELT (trueop
, i
),
418 GET_MODE_INNER (opmode
));
421 RTVEC_ELT (v
, i
) = x
;
423 return gen_rtx_CONST_VECTOR (mode
, v
);
426 /* The order of these tests is critical so that, for example, we don't
427 check the wrong mode (input vs. output) for a conversion operation,
428 such as FIX. At some point, this should be simplified. */
430 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
431 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
433 HOST_WIDE_INT hv
, lv
;
436 if (GET_CODE (trueop
) == CONST_INT
)
437 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
439 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
441 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
442 d
= real_value_truncate (mode
, d
);
443 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
445 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
446 && (GET_CODE (trueop
) == CONST_DOUBLE
447 || GET_CODE (trueop
) == CONST_INT
))
449 HOST_WIDE_INT hv
, lv
;
452 if (GET_CODE (trueop
) == CONST_INT
)
453 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
455 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
457 if (op_mode
== VOIDmode
)
459 /* We don't know how to interpret negative-looking numbers in
460 this case, so don't try to fold those. */
464 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
467 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
469 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
470 d
= real_value_truncate (mode
, d
);
471 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
474 if (GET_CODE (trueop
) == CONST_INT
475 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
477 HOST_WIDE_INT arg0
= INTVAL (trueop
);
491 val
= (arg0
>= 0 ? arg0
: - arg0
);
495 /* Don't use ffs here. Instead, get low order bit and then its
496 number. If arg0 is zero, this will return 0, as desired. */
497 arg0
&= GET_MODE_MASK (mode
);
498 val
= exact_log2 (arg0
& (- arg0
)) + 1;
502 arg0
&= GET_MODE_MASK (mode
);
503 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
506 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
510 arg0
&= GET_MODE_MASK (mode
);
513 /* Even if the value at zero is undefined, we have to come
514 up with some replacement. Seems good enough. */
515 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
516 val
= GET_MODE_BITSIZE (mode
);
519 val
= exact_log2 (arg0
& -arg0
);
523 arg0
&= GET_MODE_MASK (mode
);
526 val
++, arg0
&= arg0
- 1;
530 arg0
&= GET_MODE_MASK (mode
);
533 val
++, arg0
&= arg0
- 1;
542 /* When zero-extending a CONST_INT, we need to know its
544 if (op_mode
== VOIDmode
)
546 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
548 /* If we were really extending the mode,
549 we would have to distinguish between zero-extension
550 and sign-extension. */
551 if (width
!= GET_MODE_BITSIZE (op_mode
))
555 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
556 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
562 if (op_mode
== VOIDmode
)
564 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
566 /* If we were really extending the mode,
567 we would have to distinguish between zero-extension
568 and sign-extension. */
569 if (width
!= GET_MODE_BITSIZE (op_mode
))
573 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
576 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
578 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
579 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
596 val
= trunc_int_for_mode (val
, mode
);
598 return GEN_INT (val
);
601 /* We can do some operations on integer CONST_DOUBLEs. Also allow
602 for a DImode operation on a CONST_INT. */
603 else if (GET_MODE (trueop
) == VOIDmode
604 && width
<= HOST_BITS_PER_WIDE_INT
* 2
605 && (GET_CODE (trueop
) == CONST_DOUBLE
606 || GET_CODE (trueop
) == CONST_INT
))
608 unsigned HOST_WIDE_INT l1
, lv
;
609 HOST_WIDE_INT h1
, hv
;
611 if (GET_CODE (trueop
) == CONST_DOUBLE
)
612 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
614 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
624 neg_double (l1
, h1
, &lv
, &hv
);
629 neg_double (l1
, h1
, &lv
, &hv
);
641 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
644 lv
= exact_log2 (l1
& -l1
) + 1;
650 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
651 - HOST_BITS_PER_WIDE_INT
;
653 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
654 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
655 lv
= GET_MODE_BITSIZE (mode
);
661 lv
= exact_log2 (l1
& -l1
);
663 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
664 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
665 lv
= GET_MODE_BITSIZE (mode
);
688 /* This is just a change-of-mode, so do nothing. */
693 if (op_mode
== VOIDmode
)
696 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
700 lv
= l1
& GET_MODE_MASK (op_mode
);
704 if (op_mode
== VOIDmode
705 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
709 lv
= l1
& GET_MODE_MASK (op_mode
);
710 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
711 && (lv
& ((HOST_WIDE_INT
) 1
712 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
713 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
715 hv
= HWI_SIGN_EXTEND (lv
);
726 return immed_double_const (lv
, hv
, mode
);
729 else if (GET_CODE (trueop
) == CONST_DOUBLE
730 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
732 REAL_VALUE_TYPE d
, t
;
733 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
738 if (HONOR_SNANS (mode
) && real_isnan (&d
))
740 real_sqrt (&t
, mode
, &d
);
744 d
= REAL_VALUE_ABS (d
);
747 d
= REAL_VALUE_NEGATE (d
);
750 d
= real_value_truncate (mode
, d
);
753 /* All this does is change the mode. */
756 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
763 real_to_target (tmp
, &d
, GET_MODE (trueop
));
764 for (i
= 0; i
< 4; i
++)
766 real_from_target (&d
, tmp
, mode
);
771 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
774 else if (GET_CODE (trueop
) == CONST_DOUBLE
775 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
776 && GET_MODE_CLASS (mode
) == MODE_INT
777 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
779 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
780 operators are intentionally left unspecified (to ease implementation
781 by target backends), for consistency, this routine implements the
782 same semantics for constant folding as used by the middle-end. */
784 HOST_WIDE_INT xh
, xl
, th
, tl
;
785 REAL_VALUE_TYPE x
, t
;
786 REAL_VALUE_FROM_CONST_DOUBLE (x
, trueop
);
790 if (REAL_VALUE_ISNAN (x
))
793 /* Test against the signed upper bound. */
794 if (width
> HOST_BITS_PER_WIDE_INT
)
796 th
= ((unsigned HOST_WIDE_INT
) 1
797 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
803 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
805 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
806 if (REAL_VALUES_LESS (t
, x
))
813 /* Test against the signed lower bound. */
814 if (width
> HOST_BITS_PER_WIDE_INT
)
816 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
822 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
824 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
825 if (REAL_VALUES_LESS (x
, t
))
831 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
835 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
838 /* Test against the unsigned upper bound. */
839 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
844 else if (width
>= HOST_BITS_PER_WIDE_INT
)
846 th
= ((unsigned HOST_WIDE_INT
) 1
847 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
853 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
855 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
856 if (REAL_VALUES_LESS (t
, x
))
863 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
869 return immed_double_const (xl
, xh
, mode
);
872 /* This was formerly used only for non-IEEE float.
873 eggert@twinsun.com says it is safe for IEEE also. */
876 enum rtx_code reversed
;
879 /* There are some simplifications we can do even if the operands
884 /* (not (not X)) == X. */
885 if (GET_CODE (op
) == NOT
)
888 /* (not (eq X Y)) == (ne X Y), etc. */
889 if (COMPARISON_P (op
)
890 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
891 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
893 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
894 XEXP (op
, 0), XEXP (op
, 1));
896 /* (not (plus X -1)) can become (neg X). */
897 if (GET_CODE (op
) == PLUS
898 && XEXP (op
, 1) == constm1_rtx
)
899 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
901 /* Similarly, (not (neg X)) is (plus X -1). */
902 if (GET_CODE (op
) == NEG
)
903 return plus_constant (XEXP (op
, 0), -1);
905 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
906 if (GET_CODE (op
) == XOR
907 && GET_CODE (XEXP (op
, 1)) == CONST_INT
908 && (temp
= simplify_unary_operation (NOT
, mode
,
911 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
913 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
914 if (GET_CODE (op
) == PLUS
915 && GET_CODE (XEXP (op
, 1)) == CONST_INT
916 && mode_signbit_p (mode
, XEXP (op
, 1))
917 && (temp
= simplify_unary_operation (NOT
, mode
,
920 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
924 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
925 operands other than 1, but that is not valid. We could do a
926 similar simplification for (not (lshiftrt C X)) where C is
927 just the sign bit, but this doesn't seem common enough to
929 if (GET_CODE (op
) == ASHIFT
930 && XEXP (op
, 0) == const1_rtx
)
932 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
933 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
936 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
937 by reversing the comparison code if valid. */
938 if (STORE_FLAG_VALUE
== -1
940 && (reversed
= reversed_comparison_code (op
, NULL_RTX
))
942 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
943 XEXP (op
, 0), XEXP (op
, 1));
945 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
946 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
947 so we can perform the above simplification. */
949 if (STORE_FLAG_VALUE
== -1
950 && GET_CODE (op
) == ASHIFTRT
951 && GET_CODE (XEXP (op
, 1)) == CONST_INT
952 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
953 return simplify_gen_relational (GE
, mode
, VOIDmode
,
954 XEXP (op
, 0), const0_rtx
);
959 /* (neg (neg X)) == X. */
960 if (GET_CODE (op
) == NEG
)
963 /* (neg (plus X 1)) can become (not X). */
964 if (GET_CODE (op
) == PLUS
965 && XEXP (op
, 1) == const1_rtx
)
966 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
968 /* Similarly, (neg (not X)) is (plus X 1). */
969 if (GET_CODE (op
) == NOT
)
970 return plus_constant (XEXP (op
, 0), 1);
972 /* (neg (minus X Y)) can become (minus Y X). This transformation
973 isn't safe for modes with signed zeros, since if X and Y are
974 both +0, (minus Y X) is the same as (minus X Y). If the
975 rounding mode is towards +infinity (or -infinity) then the two
976 expressions will be rounded differently. */
977 if (GET_CODE (op
) == MINUS
978 && !HONOR_SIGNED_ZEROS (mode
)
979 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
980 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1),
983 if (GET_CODE (op
) == PLUS
984 && !HONOR_SIGNED_ZEROS (mode
)
985 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
987 /* (neg (plus A C)) is simplified to (minus -C A). */
988 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
989 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
991 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1),
994 return simplify_gen_binary (MINUS
, mode
, temp
,
998 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
999 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1000 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1003 /* (neg (mult A B)) becomes (mult (neg A) B).
1004 This works even for floating-point values. */
1005 if (GET_CODE (op
) == MULT
1006 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1008 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1009 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
1012 /* NEG commutes with ASHIFT since it is multiplication. Only do
1013 this if we can then eliminate the NEG (e.g., if the operand
1015 if (GET_CODE (op
) == ASHIFT
)
1017 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0),
1020 return simplify_gen_binary (ASHIFT
, mode
, temp
,
1024 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1025 C is equal to the width of MODE minus 1. */
1026 if (GET_CODE (op
) == ASHIFTRT
1027 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1028 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
1029 return simplify_gen_binary (LSHIFTRT
, mode
,
1030 XEXP (op
, 0), XEXP (op
, 1));
1032 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1033 C is equal to the width of MODE minus 1. */
1034 if (GET_CODE (op
) == LSHIFTRT
1035 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1036 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
1037 return simplify_gen_binary (ASHIFTRT
, mode
,
1038 XEXP (op
, 0), XEXP (op
, 1));
1043 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044 becomes just the MINUS if its mode is MODE. This allows
1045 folding switch statements on machines using casesi (such as
1047 if (GET_CODE (op
) == TRUNCATE
1048 && GET_MODE (XEXP (op
, 0)) == mode
1049 && GET_CODE (XEXP (op
, 0)) == MINUS
1050 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1051 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1052 return XEXP (op
, 0);
1054 /* Check for a sign extension of a subreg of a promoted
1055 variable, where the promotion is sign-extended, and the
1056 target mode is the same as the variable's promotion. */
1057 if (GET_CODE (op
) == SUBREG
1058 && SUBREG_PROMOTED_VAR_P (op
)
1059 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1060 && GET_MODE (XEXP (op
, 0)) == mode
)
1061 return XEXP (op
, 0);
1063 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1064 if (! POINTERS_EXTEND_UNSIGNED
1065 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1067 || (GET_CODE (op
) == SUBREG
1068 && GET_CODE (SUBREG_REG (op
)) == REG
1069 && REG_POINTER (SUBREG_REG (op
))
1070 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1071 return convert_memory_address (Pmode
, op
);
1076 /* Check for a zero extension of a subreg of a promoted
1077 variable, where the promotion is zero-extended, and the
1078 target mode is the same as the variable's promotion. */
1079 if (GET_CODE (op
) == SUBREG
1080 && SUBREG_PROMOTED_VAR_P (op
)
1081 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1082 && GET_MODE (XEXP (op
, 0)) == mode
)
1083 return XEXP (op
, 0);
1085 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1086 if (POINTERS_EXTEND_UNSIGNED
> 0
1087 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1089 || (GET_CODE (op
) == SUBREG
1090 && GET_CODE (SUBREG_REG (op
)) == REG
1091 && REG_POINTER (SUBREG_REG (op
))
1092 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1093 return convert_memory_address (Pmode
, op
);
1105 /* Subroutine of simplify_binary_operation to simplify a commutative,
1106 associative binary operation CODE with result mode MODE, operating
1107 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1108 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1109 canonicalization is possible. */
1112 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1117 /* Linearize the operator to the left. */
1118 if (GET_CODE (op1
) == code
)
1120 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1121 if (GET_CODE (op0
) == code
)
1123 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1124 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1127 /* "a op (b op c)" becomes "(b op c) op a". */
1128 if (! swap_commutative_operands_p (op1
, op0
))
1129 return simplify_gen_binary (code
, mode
, op1
, op0
);
1136 if (GET_CODE (op0
) == code
)
1138 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1139 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1141 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1142 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1145 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1146 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1147 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1148 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1150 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1152 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1153 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1154 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1155 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1157 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1163 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1164 and OP1. Return 0 if no simplification is possible.
1166 Don't use this for relational operations such as EQ or LT.
1167 Use simplify_relational_operation instead. */
1169 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1172 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
1174 unsigned int width
= GET_MODE_BITSIZE (mode
);
1175 rtx trueop0
, trueop1
;
1178 #ifdef ENABLE_CHECKING
1179 /* Relational operations don't work here. We must know the mode
1180 of the operands in order to do the comparison correctly.
1181 Assuming a full word can give incorrect results.
1182 Consider comparing 128 with -128 in QImode. */
1184 if (GET_RTX_CLASS (code
) == RTX_COMPARE
1185 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
1189 /* Make sure the constant is second. */
1190 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1191 && swap_commutative_operands_p (op0
, op1
))
1193 tem
= op0
, op0
= op1
, op1
= tem
;
1196 trueop0
= avoid_constant_pool_reference (op0
);
1197 trueop1
= avoid_constant_pool_reference (op1
);
1199 if (VECTOR_MODE_P (mode
)
1200 && GET_CODE (trueop0
) == CONST_VECTOR
1201 && GET_CODE (trueop1
) == CONST_VECTOR
)
1203 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1204 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1205 enum machine_mode op0mode
= GET_MODE (trueop0
);
1206 int op0_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op0mode
));
1207 unsigned op0_n_elts
= (GET_MODE_SIZE (op0mode
) / op0_elt_size
);
1208 enum machine_mode op1mode
= GET_MODE (trueop1
);
1209 int op1_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op1mode
));
1210 unsigned op1_n_elts
= (GET_MODE_SIZE (op1mode
) / op1_elt_size
);
1211 rtvec v
= rtvec_alloc (n_elts
);
1214 if (op0_n_elts
!= n_elts
|| op1_n_elts
!= n_elts
)
1217 for (i
= 0; i
< n_elts
; i
++)
1219 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
1220 CONST_VECTOR_ELT (trueop0
, i
),
1221 CONST_VECTOR_ELT (trueop1
, i
));
1224 RTVEC_ELT (v
, i
) = x
;
1227 return gen_rtx_CONST_VECTOR (mode
, v
);
1230 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1231 && GET_CODE (trueop0
) == CONST_DOUBLE
1232 && GET_CODE (trueop1
) == CONST_DOUBLE
1233 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
1244 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
1246 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
1248 for (i
= 0; i
< 4; i
++)
1252 else if (code
== IOR
)
1254 else if (code
== XOR
)
1259 real_from_target (&r
, tmp0
, mode
);
1260 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
1264 REAL_VALUE_TYPE f0
, f1
, value
;
1266 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
1267 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
1268 f0
= real_value_truncate (mode
, f0
);
1269 f1
= real_value_truncate (mode
, f1
);
1271 if (HONOR_SNANS (mode
)
1272 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
1276 && REAL_VALUES_EQUAL (f1
, dconst0
)
1277 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
1280 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
1282 value
= real_value_truncate (mode
, value
);
1283 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
1287 /* We can fold some multi-word operations. */
1288 if (GET_MODE_CLASS (mode
) == MODE_INT
1289 && width
== HOST_BITS_PER_WIDE_INT
* 2
1290 && (GET_CODE (trueop0
) == CONST_DOUBLE
1291 || GET_CODE (trueop0
) == CONST_INT
)
1292 && (GET_CODE (trueop1
) == CONST_DOUBLE
1293 || GET_CODE (trueop1
) == CONST_INT
))
1295 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
1296 HOST_WIDE_INT h1
, h2
, hv
, ht
;
1298 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1299 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
1301 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
1303 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1304 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
1306 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
1311 /* A - B == A + (-B). */
1312 neg_double (l2
, h2
, &lv
, &hv
);
1315 /* Fall through.... */
1318 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1322 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1326 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
1327 &lv
, &hv
, <
, &ht
))
1332 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
1333 <
, &ht
, &lv
, &hv
))
1338 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
1339 &lv
, &hv
, <
, &ht
))
1344 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
1345 <
, &ht
, &lv
, &hv
))
1350 lv
= l1
& l2
, hv
= h1
& h2
;
1354 lv
= l1
| l2
, hv
= h1
| h2
;
1358 lv
= l1
^ l2
, hv
= h1
^ h2
;
1364 && ((unsigned HOST_WIDE_INT
) l1
1365 < (unsigned HOST_WIDE_INT
) l2
)))
1374 && ((unsigned HOST_WIDE_INT
) l1
1375 > (unsigned HOST_WIDE_INT
) l2
)))
1382 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1384 && ((unsigned HOST_WIDE_INT
) l1
1385 < (unsigned HOST_WIDE_INT
) l2
)))
1392 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1394 && ((unsigned HOST_WIDE_INT
) l1
1395 > (unsigned HOST_WIDE_INT
) l2
)))
1401 case LSHIFTRT
: case ASHIFTRT
:
1403 case ROTATE
: case ROTATERT
:
1404 if (SHIFT_COUNT_TRUNCATED
)
1405 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1407 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1410 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1411 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1413 else if (code
== ASHIFT
)
1414 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1415 else if (code
== ROTATE
)
1416 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1417 else /* code == ROTATERT */
1418 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1425 return immed_double_const (lv
, hv
, mode
);
1428 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1429 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1431 /* Even if we can't compute a constant result,
1432 there are some cases worth simplifying. */
1437 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1438 when x is NaN, infinite, or finite and nonzero. They aren't
1439 when x is -0 and the rounding mode is not towards -infinity,
1440 since (-0) + 0 is then 0. */
1441 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1444 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1445 transformations are safe even for IEEE. */
1446 if (GET_CODE (op0
) == NEG
)
1447 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1448 else if (GET_CODE (op1
) == NEG
)
1449 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1451 /* (~a) + 1 -> -a */
1452 if (INTEGRAL_MODE_P (mode
)
1453 && GET_CODE (op0
) == NOT
1454 && trueop1
== const1_rtx
)
1455 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1457 /* Handle both-operands-constant cases. We can only add
1458 CONST_INTs to constants since the sum of relocatable symbols
1459 can't be handled by most assemblers. Don't add CONST_INT
1460 to CONST_INT since overflow won't be computed properly if wider
1461 than HOST_BITS_PER_WIDE_INT. */
1463 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1464 && GET_CODE (op1
) == CONST_INT
)
1465 return plus_constant (op0
, INTVAL (op1
));
1466 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1467 && GET_CODE (op0
) == CONST_INT
)
1468 return plus_constant (op1
, INTVAL (op0
));
1470 /* See if this is something like X * C - X or vice versa or
1471 if the multiplication is written as a shift. If so, we can
1472 distribute and make a new multiply, shift, or maybe just
1473 have X (if C is 2 in the example above). But don't make
1474 real multiply if we didn't have one before. */
1476 if (! FLOAT_MODE_P (mode
))
1478 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1479 rtx lhs
= op0
, rhs
= op1
;
1482 if (GET_CODE (lhs
) == NEG
)
1483 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1484 else if (GET_CODE (lhs
) == MULT
1485 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1487 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1490 else if (GET_CODE (lhs
) == ASHIFT
1491 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1492 && INTVAL (XEXP (lhs
, 1)) >= 0
1493 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1495 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1496 lhs
= XEXP (lhs
, 0);
1499 if (GET_CODE (rhs
) == NEG
)
1500 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1501 else if (GET_CODE (rhs
) == MULT
1502 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1504 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1507 else if (GET_CODE (rhs
) == ASHIFT
1508 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1509 && INTVAL (XEXP (rhs
, 1)) >= 0
1510 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1512 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1513 rhs
= XEXP (rhs
, 0);
1516 if (rtx_equal_p (lhs
, rhs
))
1518 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1519 GEN_INT (coeff0
+ coeff1
));
1520 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1524 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1525 if ((GET_CODE (op1
) == CONST_INT
1526 || GET_CODE (op1
) == CONST_DOUBLE
)
1527 && GET_CODE (op0
) == XOR
1528 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1529 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1530 && mode_signbit_p (mode
, op1
))
1531 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1532 simplify_gen_binary (XOR
, mode
, op1
,
1535 /* If one of the operands is a PLUS or a MINUS, see if we can
1536 simplify this by the associative law.
1537 Don't use the associative law for floating point.
1538 The inaccuracy makes it nonassociative,
1539 and subtle programs can break if operations are associated. */
1541 if (INTEGRAL_MODE_P (mode
)
1542 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1543 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1544 || (GET_CODE (op0
) == CONST
1545 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1546 || (GET_CODE (op1
) == CONST
1547 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1548 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1551 /* Reassociate floating point addition only when the user
1552 specifies unsafe math optimizations. */
1553 if (FLOAT_MODE_P (mode
)
1554 && flag_unsafe_math_optimizations
)
1556 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1564 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1565 using cc0, in which case we want to leave it as a COMPARE
1566 so we can distinguish it from a register-register-copy.
1568 In IEEE floating point, x-0 is not the same as x. */
1570 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1571 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1572 && trueop1
== CONST0_RTX (mode
))
1576 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1577 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1578 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1579 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1581 rtx xop00
= XEXP (op0
, 0);
1582 rtx xop10
= XEXP (op1
, 0);
1585 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1587 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1588 && GET_MODE (xop00
) == GET_MODE (xop10
)
1589 && REGNO (xop00
) == REGNO (xop10
)
1590 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1591 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1598 /* We can't assume x-x is 0 even with non-IEEE floating point,
1599 but since it is zero except in very strange circumstances, we
1600 will treat it as zero with -funsafe-math-optimizations. */
1601 if (rtx_equal_p (trueop0
, trueop1
)
1602 && ! side_effects_p (op0
)
1603 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1604 return CONST0_RTX (mode
);
1606 /* Change subtraction from zero into negation. (0 - x) is the
1607 same as -x when x is NaN, infinite, or finite and nonzero.
1608 But if the mode has signed zeros, and does not round towards
1609 -infinity, then 0 - 0 is 0, not -0. */
1610 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1611 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1613 /* (-1 - a) is ~a. */
1614 if (trueop0
== constm1_rtx
)
1615 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1617 /* Subtracting 0 has no effect unless the mode has signed zeros
1618 and supports rounding towards -infinity. In such a case,
1620 if (!(HONOR_SIGNED_ZEROS (mode
)
1621 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1622 && trueop1
== CONST0_RTX (mode
))
1625 /* See if this is something like X * C - X or vice versa or
1626 if the multiplication is written as a shift. If so, we can
1627 distribute and make a new multiply, shift, or maybe just
1628 have X (if C is 2 in the example above). But don't make
1629 real multiply if we didn't have one before. */
1631 if (! FLOAT_MODE_P (mode
))
1633 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1634 rtx lhs
= op0
, rhs
= op1
;
1637 if (GET_CODE (lhs
) == NEG
)
1638 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1639 else if (GET_CODE (lhs
) == MULT
1640 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1642 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1645 else if (GET_CODE (lhs
) == ASHIFT
1646 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1647 && INTVAL (XEXP (lhs
, 1)) >= 0
1648 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1650 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1651 lhs
= XEXP (lhs
, 0);
1654 if (GET_CODE (rhs
) == NEG
)
1655 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1656 else if (GET_CODE (rhs
) == MULT
1657 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1659 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1662 else if (GET_CODE (rhs
) == ASHIFT
1663 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1664 && INTVAL (XEXP (rhs
, 1)) >= 0
1665 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1667 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1668 rhs
= XEXP (rhs
, 0);
1671 if (rtx_equal_p (lhs
, rhs
))
1673 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1674 GEN_INT (coeff0
- coeff1
));
1675 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1679 /* (a - (-b)) -> (a + b). True even for IEEE. */
1680 if (GET_CODE (op1
) == NEG
)
1681 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1683 /* (-x - c) may be simplified as (-c - x). */
1684 if (GET_CODE (op0
) == NEG
1685 && (GET_CODE (op1
) == CONST_INT
1686 || GET_CODE (op1
) == CONST_DOUBLE
))
1688 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1690 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1693 /* If one of the operands is a PLUS or a MINUS, see if we can
1694 simplify this by the associative law.
1695 Don't use the associative law for floating point.
1696 The inaccuracy makes it nonassociative,
1697 and subtle programs can break if operations are associated. */
1699 if (INTEGRAL_MODE_P (mode
)
1700 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1701 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1702 || (GET_CODE (op0
) == CONST
1703 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1704 || (GET_CODE (op1
) == CONST
1705 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1706 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1709 /* Don't let a relocatable value get a negative coeff. */
1710 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1711 return simplify_gen_binary (PLUS
, mode
,
1713 neg_const_int (mode
, op1
));
1715 /* (x - (x & y)) -> (x & ~y) */
1716 if (GET_CODE (op1
) == AND
)
1718 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1720 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1721 GET_MODE (XEXP (op1
, 1)));
1722 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1724 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1726 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1727 GET_MODE (XEXP (op1
, 0)));
1728 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1734 if (trueop1
== constm1_rtx
)
1735 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1737 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1738 x is NaN, since x * 0 is then also NaN. Nor is it valid
1739 when the mode has signed zeros, since multiplying a negative
1740 number by 0 will give -0, not 0. */
1741 if (!HONOR_NANS (mode
)
1742 && !HONOR_SIGNED_ZEROS (mode
)
1743 && trueop1
== CONST0_RTX (mode
)
1744 && ! side_effects_p (op0
))
1747 /* In IEEE floating point, x*1 is not equivalent to x for
1749 if (!HONOR_SNANS (mode
)
1750 && trueop1
== CONST1_RTX (mode
))
1753 /* Convert multiply by constant power of two into shift unless
1754 we are still generating RTL. This test is a kludge. */
1755 if (GET_CODE (trueop1
) == CONST_INT
1756 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1757 /* If the mode is larger than the host word size, and the
1758 uppermost bit is set, then this isn't a power of two due
1759 to implicit sign extension. */
1760 && (width
<= HOST_BITS_PER_WIDE_INT
1761 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1762 && ! rtx_equal_function_value_matters
)
1763 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1765 /* x*2 is x+x and x*(-1) is -x */
1766 if (GET_CODE (trueop1
) == CONST_DOUBLE
1767 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1768 && GET_MODE (op0
) == mode
)
1771 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1773 if (REAL_VALUES_EQUAL (d
, dconst2
))
1774 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1776 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1777 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1780 /* Reassociate multiplication, but for floating point MULTs
1781 only when the user specifies unsafe math optimizations. */
1782 if (! FLOAT_MODE_P (mode
)
1783 || flag_unsafe_math_optimizations
)
1785 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1792 if (trueop1
== const0_rtx
)
1794 if (GET_CODE (trueop1
) == CONST_INT
1795 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1796 == GET_MODE_MASK (mode
)))
1798 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1800 /* A | (~A) -> -1 */
1801 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1802 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1803 && ! side_effects_p (op0
)
1804 && GET_MODE_CLASS (mode
) != MODE_CC
)
1806 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1812 if (trueop1
== const0_rtx
)
1814 if (GET_CODE (trueop1
) == CONST_INT
1815 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1816 == GET_MODE_MASK (mode
)))
1817 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1818 if (trueop0
== trueop1
1819 && ! side_effects_p (op0
)
1820 && GET_MODE_CLASS (mode
) != MODE_CC
)
1823 /* Canonicalize XOR of the most significant bit to PLUS. */
1824 if ((GET_CODE (op1
) == CONST_INT
1825 || GET_CODE (op1
) == CONST_DOUBLE
)
1826 && mode_signbit_p (mode
, op1
))
1827 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
1828 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1829 if ((GET_CODE (op1
) == CONST_INT
1830 || GET_CODE (op1
) == CONST_DOUBLE
)
1831 && GET_CODE (op0
) == PLUS
1832 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1833 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1834 && mode_signbit_p (mode
, XEXP (op0
, 1)))
1835 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1836 simplify_gen_binary (XOR
, mode
, op1
,
1839 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1845 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1847 if (GET_CODE (trueop1
) == CONST_INT
1848 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1849 == GET_MODE_MASK (mode
)))
1851 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1852 && GET_MODE_CLASS (mode
) != MODE_CC
)
1855 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1856 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1857 && ! side_effects_p (op0
)
1858 && GET_MODE_CLASS (mode
) != MODE_CC
)
1860 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1866 /* 0/x is 0 (or x&0 if x has side-effects). */
1867 if (trueop0
== const0_rtx
)
1868 return side_effects_p (op1
)
1869 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
1872 if (trueop1
== const1_rtx
)
1874 /* Handle narrowing UDIV. */
1875 rtx x
= gen_lowpart_common (mode
, op0
);
1878 if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1879 return gen_lowpart_SUBREG (mode
, op0
);
1882 /* Convert divide by power of two into shift. */
1883 if (GET_CODE (trueop1
) == CONST_INT
1884 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1885 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (arg1
));
1889 /* Handle floating point and integers separately. */
1890 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1892 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1893 safe for modes with NaNs, since 0.0 / 0.0 will then be
1894 NaN rather than 0.0. Nor is it safe for modes with signed
1895 zeros, since dividing 0 by a negative number gives -0.0 */
1896 if (trueop0
== CONST0_RTX (mode
)
1897 && !HONOR_NANS (mode
)
1898 && !HONOR_SIGNED_ZEROS (mode
)
1899 && ! side_effects_p (op1
))
1902 if (trueop1
== CONST1_RTX (mode
)
1903 && !HONOR_SNANS (mode
))
1906 if (GET_CODE (trueop1
) == CONST_DOUBLE
1907 && trueop1
!= CONST0_RTX (mode
))
1910 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1913 if (REAL_VALUES_EQUAL (d
, dconstm1
)
1914 && !HONOR_SNANS (mode
))
1915 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1917 /* Change FP division by a constant into multiplication.
1918 Only do this with -funsafe-math-optimizations. */
1919 if (flag_unsafe_math_optimizations
1920 && !REAL_VALUES_EQUAL (d
, dconst0
))
1922 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
1923 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1924 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
1930 /* 0/x is 0 (or x&0 if x has side-effects). */
1931 if (trueop0
== const0_rtx
)
1932 return side_effects_p (op1
)
1933 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
1936 if (trueop1
== const1_rtx
)
1938 /* Handle narrowing DIV. */
1939 rtx x
= gen_lowpart_common (mode
, op0
);
1942 if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1943 return gen_lowpart_SUBREG (mode
, op0
);
1947 if (trueop1
== constm1_rtx
)
1949 rtx x
= gen_lowpart_common (mode
, op0
);
1951 x
= (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1952 ? gen_lowpart_SUBREG (mode
, op0
) : op0
;
1953 return simplify_gen_unary (NEG
, mode
, x
, mode
);
1959 /* 0%x is 0 (or x&0 if x has side-effects). */
1960 if (trueop0
== const0_rtx
)
1961 return side_effects_p (op1
)
1962 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
1964 /* x%1 is 0 (of x&0 if x has side-effects). */
1965 if (trueop1
== const1_rtx
)
1966 return side_effects_p (op0
)
1967 ? simplify_gen_binary (AND
, mode
, op0
, const0_rtx
)
1969 /* Implement modulus by power of two as AND. */
1970 if (GET_CODE (trueop1
) == CONST_INT
1971 && exact_log2 (INTVAL (trueop1
)) > 0)
1972 return simplify_gen_binary (AND
, mode
, op0
,
1973 GEN_INT (INTVAL (op1
) - 1));
1977 /* 0%x is 0 (or x&0 if x has side-effects). */
1978 if (trueop0
== const0_rtx
)
1979 return side_effects_p (op1
)
1980 ? simplify_gen_binary (AND
, mode
, op1
, const0_rtx
)
1982 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
1983 if (trueop1
== const1_rtx
|| trueop1
== constm1_rtx
)
1984 return side_effects_p (op0
)
1985 ? simplify_gen_binary (AND
, mode
, op0
, const0_rtx
)
1992 /* Rotating ~0 always results in ~0. */
1993 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1994 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1995 && ! side_effects_p (op1
))
1998 /* Fall through.... */
2002 if (trueop1
== const0_rtx
)
2004 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
2009 if (width
<= HOST_BITS_PER_WIDE_INT
2010 && GET_CODE (trueop1
) == CONST_INT
2011 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2012 && ! side_effects_p (op0
))
2014 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2016 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2022 if (width
<= HOST_BITS_PER_WIDE_INT
2023 && GET_CODE (trueop1
) == CONST_INT
2024 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2025 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2026 && ! side_effects_p (op0
))
2028 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2030 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2036 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
2038 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2040 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2046 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2048 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2050 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2059 /* ??? There are simplifications that can be done. */
2063 if (!VECTOR_MODE_P (mode
))
2065 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
2067 != GET_MODE_INNER (GET_MODE (trueop0
)))
2068 || GET_CODE (trueop1
) != PARALLEL
2069 || XVECLEN (trueop1
, 0) != 1
2070 || GET_CODE (XVECEXP (trueop1
, 0, 0)) != CONST_INT
)
2073 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2074 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP (trueop1
, 0, 0)));
2078 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
2079 || (GET_MODE_INNER (mode
)
2080 != GET_MODE_INNER (GET_MODE (trueop0
)))
2081 || GET_CODE (trueop1
) != PARALLEL
)
2084 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2086 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2087 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2088 rtvec v
= rtvec_alloc (n_elts
);
2091 if (XVECLEN (trueop1
, 0) != (int) n_elts
)
2093 for (i
= 0; i
< n_elts
; i
++)
2095 rtx x
= XVECEXP (trueop1
, 0, i
);
2097 if (GET_CODE (x
) != CONST_INT
)
2099 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, INTVAL (x
));
2102 return gen_rtx_CONST_VECTOR (mode
, v
);
2108 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2109 ? GET_MODE (trueop0
)
2110 : GET_MODE_INNER (mode
));
2111 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2112 ? GET_MODE (trueop1
)
2113 : GET_MODE_INNER (mode
));
2115 if (!VECTOR_MODE_P (mode
)
2116 || (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2117 != GET_MODE_SIZE (mode
)))
2120 if ((VECTOR_MODE_P (op0_mode
)
2121 && (GET_MODE_INNER (mode
)
2122 != GET_MODE_INNER (op0_mode
)))
2123 || (!VECTOR_MODE_P (op0_mode
)
2124 && GET_MODE_INNER (mode
) != op0_mode
))
2127 if ((VECTOR_MODE_P (op1_mode
)
2128 && (GET_MODE_INNER (mode
)
2129 != GET_MODE_INNER (op1_mode
)))
2130 || (!VECTOR_MODE_P (op1_mode
)
2131 && GET_MODE_INNER (mode
) != op1_mode
))
2134 if ((GET_CODE (trueop0
) == CONST_VECTOR
2135 || GET_CODE (trueop0
) == CONST_INT
2136 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2137 && (GET_CODE (trueop1
) == CONST_VECTOR
2138 || GET_CODE (trueop1
) == CONST_INT
2139 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2141 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2142 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2143 rtvec v
= rtvec_alloc (n_elts
);
2145 unsigned in_n_elts
= 1;
2147 if (VECTOR_MODE_P (op0_mode
))
2148 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2149 for (i
= 0; i
< n_elts
; i
++)
2153 if (!VECTOR_MODE_P (op0_mode
))
2154 RTVEC_ELT (v
, i
) = trueop0
;
2156 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2160 if (!VECTOR_MODE_P (op1_mode
))
2161 RTVEC_ELT (v
, i
) = trueop1
;
2163 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2168 return gen_rtx_CONST_VECTOR (mode
, v
);
2180 /* Get the integer argument values in two forms:
2181 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2183 arg0
= INTVAL (trueop0
);
2184 arg1
= INTVAL (trueop1
);
2186 if (width
< HOST_BITS_PER_WIDE_INT
)
2188 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2189 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2192 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2193 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2196 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2197 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2205 /* Compute the value of the arithmetic. */
2210 val
= arg0s
+ arg1s
;
2214 val
= arg0s
- arg1s
;
2218 val
= arg0s
* arg1s
;
2223 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2226 val
= arg0s
/ arg1s
;
2231 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2234 val
= arg0s
% arg1s
;
2239 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2242 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2247 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2250 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2266 /* If shift count is undefined, don't fold it; let the machine do
2267 what it wants. But truncate it if the machine will do that. */
2271 if (SHIFT_COUNT_TRUNCATED
)
2274 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
2281 if (SHIFT_COUNT_TRUNCATED
)
2284 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
2291 if (SHIFT_COUNT_TRUNCATED
)
2294 val
= arg0s
>> arg1
;
2296 /* Bootstrap compiler may not have sign extended the right shift.
2297 Manually extend the sign to insure bootstrap cc matches gcc. */
2298 if (arg0s
< 0 && arg1
> 0)
2299 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
2308 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2309 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2317 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2318 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2322 /* Do nothing here. */
2326 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2330 val
= ((unsigned HOST_WIDE_INT
) arg0
2331 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2335 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2339 val
= ((unsigned HOST_WIDE_INT
) arg0
2340 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2347 /* ??? There are simplifications that can be done. */
2354 val
= trunc_int_for_mode (val
, mode
);
2356 return GEN_INT (val
);
2359 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2362 Rather than test for specific case, we do this by a brute-force method
2363 and do all possible simplifications until no more changes occur. Then
2364 we rebuild the operation.
2366 If FORCE is true, then always generate the rtx. This is used to
2367 canonicalize stuff emitted from simplify_gen_binary. Note that this
2368 can still fail if the rtx is too complex. It won't fail just because
2369 the result is not 'simpler' than the input, however. */
2371 struct simplify_plus_minus_op_data
2378 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2380 const struct simplify_plus_minus_op_data
*d1
= p1
;
2381 const struct simplify_plus_minus_op_data
*d2
= p2
;
2383 return (commutative_operand_precedence (d2
->op
)
2384 - commutative_operand_precedence (d1
->op
));
2388 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2391 struct simplify_plus_minus_op_data ops
[8];
2393 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2397 memset (ops
, 0, sizeof ops
);
2399 /* Set up the two operands and then expand them until nothing has been
2400 changed. If we run out of room in our array, give up; this should
2401 almost never happen. */
2406 ops
[1].neg
= (code
== MINUS
);
2412 for (i
= 0; i
< n_ops
; i
++)
2414 rtx this_op
= ops
[i
].op
;
2415 int this_neg
= ops
[i
].neg
;
2416 enum rtx_code this_code
= GET_CODE (this_op
);
2425 ops
[n_ops
].op
= XEXP (this_op
, 1);
2426 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2429 ops
[i
].op
= XEXP (this_op
, 0);
2435 ops
[i
].op
= XEXP (this_op
, 0);
2436 ops
[i
].neg
= ! this_neg
;
2442 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2443 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2444 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2446 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2447 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2448 ops
[n_ops
].neg
= this_neg
;
2456 /* ~a -> (-a - 1) */
2459 ops
[n_ops
].op
= constm1_rtx
;
2460 ops
[n_ops
++].neg
= this_neg
;
2461 ops
[i
].op
= XEXP (this_op
, 0);
2462 ops
[i
].neg
= !this_neg
;
2470 ops
[i
].op
= neg_const_int (mode
, this_op
);
2483 /* If we only have two operands, we can't do anything. */
2484 if (n_ops
<= 2 && !force
)
2487 /* Count the number of CONSTs we didn't split above. */
2488 for (i
= 0; i
< n_ops
; i
++)
2489 if (GET_CODE (ops
[i
].op
) == CONST
)
2492 /* Now simplify each pair of operands until nothing changes. The first
2493 time through just simplify constants against each other. */
2500 for (i
= 0; i
< n_ops
- 1; i
++)
2501 for (j
= i
+ 1; j
< n_ops
; j
++)
2503 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2504 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2506 if (lhs
!= 0 && rhs
!= 0
2507 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2509 enum rtx_code ncode
= PLUS
;
2515 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2517 else if (swap_commutative_operands_p (lhs
, rhs
))
2518 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2520 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2522 /* Reject "simplifications" that just wrap the two
2523 arguments in a CONST. Failure to do so can result
2524 in infinite recursion with simplify_binary_operation
2525 when it calls us to simplify CONST operations. */
2527 && ! (GET_CODE (tem
) == CONST
2528 && GET_CODE (XEXP (tem
, 0)) == ncode
2529 && XEXP (XEXP (tem
, 0), 0) == lhs
2530 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2531 /* Don't allow -x + -1 -> ~x simplifications in the
2532 first pass. This allows us the chance to combine
2533 the -1 with other constants. */
2535 && GET_CODE (tem
) == NOT
2536 && XEXP (tem
, 0) == rhs
))
2539 if (GET_CODE (tem
) == NEG
)
2540 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2541 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2542 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2546 ops
[j
].op
= NULL_RTX
;
2556 /* Pack all the operands to the lower-numbered entries. */
2557 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2562 /* Sort the operations based on swap_commutative_operands_p. */
2563 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2565 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2567 && GET_CODE (ops
[1].op
) == CONST_INT
2568 && CONSTANT_P (ops
[0].op
)
2570 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
2572 /* We suppressed creation of trivial CONST expressions in the
2573 combination loop to avoid recursion. Create one manually now.
2574 The combination loop should have ensured that there is exactly
2575 one CONST_INT, and the sort will have ensured that it is last
2576 in the array and that any other constant will be next-to-last. */
2579 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2580 && CONSTANT_P (ops
[n_ops
- 2].op
))
2582 rtx value
= ops
[n_ops
- 1].op
;
2583 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2584 value
= neg_const_int (mode
, value
);
2585 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2589 /* Count the number of CONSTs that we generated. */
2591 for (i
= 0; i
< n_ops
; i
++)
2592 if (GET_CODE (ops
[i
].op
) == CONST
)
2595 /* Give up if we didn't reduce the number of operands we had. Make
2596 sure we count a CONST as two operands. If we have the same
2597 number of operands, but have made more CONSTs than before, this
2598 is also an improvement, so accept it. */
2600 && (n_ops
+ n_consts
> input_ops
2601 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2604 /* Put a non-negated operand first, if possible. */
2606 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2609 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
2618 /* Now make the result by performing the requested operations. */
2620 for (i
= 1; i
< n_ops
; i
++)
2621 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2622 mode
, result
, ops
[i
].op
);
2627 /* Like simplify_binary_operation except used for relational operators.
2628 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2631 CMP_MODE specifies in which mode the comparison is done in, so it is
2632 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2633 the operands or, if both are VOIDmode, the operands are compared in
2634 "infinite precision". */
2636 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2637 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2639 rtx tem
, trueop0
, trueop1
;
2641 if (cmp_mode
== VOIDmode
)
2642 cmp_mode
= GET_MODE (op0
);
2643 if (cmp_mode
== VOIDmode
)
2644 cmp_mode
= GET_MODE (op1
);
2646 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
2649 #ifdef FLOAT_STORE_FLAG_VALUE
2650 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2652 if (tem
== const0_rtx
)
2653 return CONST0_RTX (mode
);
2654 else if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2656 REAL_VALUE_TYPE val
;
2657 val
= FLOAT_STORE_FLAG_VALUE (mode
);
2658 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
2666 /* For the following tests, ensure const0_rtx is op1. */
2667 if (swap_commutative_operands_p (op0
, op1
)
2668 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
2669 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
2671 /* If op0 is a compare, extract the comparison arguments from it. */
2672 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2673 return simplify_relational_operation (code
, mode
, VOIDmode
,
2674 XEXP (op0
, 0), XEXP (op0
, 1));
2676 if (mode
== VOIDmode
2677 || GET_MODE_CLASS (cmp_mode
) == MODE_CC
2681 trueop0
= avoid_constant_pool_reference (op0
);
2682 trueop1
= avoid_constant_pool_reference (op1
);
2683 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
2687 /* This part of simplify_relational_operation is only used when CMP_MODE
2688 is not in class MODE_CC (i.e. it is a real comparison).
2690 MODE is the mode of the result, while CMP_MODE specifies in which
2691 mode the comparison is done in, so it is the mode of the operands. */
2693 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
2694 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2696 if (GET_CODE (op1
) == CONST_INT
)
2698 if (INTVAL (op1
) == 0 && COMPARISON_P (op0
))
2700 /* If op0 is a comparison, extract the comparison arguments form it. */
2703 if (GET_MODE (op0
) == cmp_mode
)
2704 return simplify_rtx (op0
);
2706 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
2707 XEXP (op0
, 0), XEXP (op0
, 1));
2709 else if (code
== EQ
)
2711 enum rtx_code
new = reversed_comparison_code (op0
, NULL_RTX
);
2713 return simplify_gen_relational (new, mode
, VOIDmode
,
2714 XEXP (op0
, 0), XEXP (op0
, 1));
2722 /* Check if the given comparison (done in the given MODE) is actually a
2723 tautology or a contradiction.
2724 If no simplification is possible, this function returns zero.
2725 Otherwise, it returns either const_true_rtx or const0_rtx. */
2728 simplify_const_relational_operation (enum rtx_code code
,
2729 enum machine_mode mode
,
2732 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2737 if (mode
== VOIDmode
2738 && (GET_MODE (op0
) != VOIDmode
2739 || GET_MODE (op1
) != VOIDmode
))
2742 /* If op0 is a compare, extract the comparison arguments from it. */
2743 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2744 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2746 /* We can't simplify MODE_CC values since we don't know what the
2747 actual comparison is. */
2748 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2751 /* Make sure the constant is second. */
2752 if (swap_commutative_operands_p (op0
, op1
))
2754 tem
= op0
, op0
= op1
, op1
= tem
;
2755 code
= swap_condition (code
);
2758 trueop0
= avoid_constant_pool_reference (op0
);
2759 trueop1
= avoid_constant_pool_reference (op1
);
2761 /* For integer comparisons of A and B maybe we can simplify A - B and can
2762 then simplify a comparison of that with zero. If A and B are both either
2763 a register or a CONST_INT, this can't help; testing for these cases will
2764 prevent infinite recursion here and speed things up.
2766 If CODE is an unsigned comparison, then we can never do this optimization,
2767 because it gives an incorrect result if the subtraction wraps around zero.
2768 ANSI C defines unsigned operations such that they never overflow, and
2769 thus such cases can not be ignored; but we cannot do it even for
2770 signed comparisons for languages such as Java, so test flag_wrapv. */
2772 if (!flag_wrapv
&& INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2773 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
2774 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
2775 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2776 /* We cannot do this for == or != if tem is a nonzero address. */
2777 && ((code
!= EQ
&& code
!= NE
) || ! nonzero_address_p (tem
))
2778 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2779 return simplify_const_relational_operation (signed_condition (code
),
2780 mode
, tem
, const0_rtx
);
2782 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2783 return const_true_rtx
;
2785 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2788 /* For modes without NaNs, if the two operands are equal, we know the
2789 result except if they have side-effects. */
2790 if (! HONOR_NANS (GET_MODE (trueop0
))
2791 && rtx_equal_p (trueop0
, trueop1
)
2792 && ! side_effects_p (trueop0
))
2793 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2795 /* If the operands are floating-point constants, see if we can fold
2797 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2798 && GET_CODE (trueop1
) == CONST_DOUBLE
2799 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2801 REAL_VALUE_TYPE d0
, d1
;
2803 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2804 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2806 /* Comparisons are unordered iff at least one of the values is NaN. */
2807 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2817 return const_true_rtx
;
2830 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2831 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2832 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2835 /* Otherwise, see if the operands are both integers. */
2836 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2837 && (GET_CODE (trueop0
) == CONST_DOUBLE
2838 || GET_CODE (trueop0
) == CONST_INT
)
2839 && (GET_CODE (trueop1
) == CONST_DOUBLE
2840 || GET_CODE (trueop1
) == CONST_INT
))
2842 int width
= GET_MODE_BITSIZE (mode
);
2843 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2844 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2846 /* Get the two words comprising each integer constant. */
2847 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2849 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2850 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2854 l0u
= l0s
= INTVAL (trueop0
);
2855 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2858 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2860 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2861 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2865 l1u
= l1s
= INTVAL (trueop1
);
2866 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2869 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2870 we have to sign or zero-extend the values. */
2871 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2873 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2874 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2876 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2877 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2879 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2880 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2882 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2883 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2885 equal
= (h0u
== h1u
&& l0u
== l1u
);
2886 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2887 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2888 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2889 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2892 /* Otherwise, there are some code-specific tests we can make. */
2895 /* Optimize comparisons with upper and lower bounds. */
2896 if (INTEGRAL_MODE_P (mode
)
2897 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
2910 get_mode_bounds (mode
, sign
, mode
, &mmin
, &mmax
);
2917 /* x >= min is always true. */
2918 if (rtx_equal_p (trueop1
, mmin
))
2919 tem
= const_true_rtx
;
2925 /* x <= max is always true. */
2926 if (rtx_equal_p (trueop1
, mmax
))
2927 tem
= const_true_rtx
;
2932 /* x > max is always false. */
2933 if (rtx_equal_p (trueop1
, mmax
))
2939 /* x < min is always false. */
2940 if (rtx_equal_p (trueop1
, mmin
))
2947 if (tem
== const0_rtx
2948 || tem
== const_true_rtx
)
2955 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2960 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2961 return const_true_rtx
;
2965 /* Optimize abs(x) < 0.0. */
2966 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
2968 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2970 if (GET_CODE (tem
) == ABS
)
2976 /* Optimize abs(x) >= 0.0. */
2977 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
2979 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2981 if (GET_CODE (tem
) == ABS
)
2982 return const_true_rtx
;
2987 /* Optimize ! (abs(x) < 0.0). */
2988 if (trueop1
== CONST0_RTX (mode
))
2990 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2992 if (GET_CODE (tem
) == ABS
)
2993 return const_true_rtx
;
3004 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3010 return equal
? const_true_rtx
: const0_rtx
;
3013 return ! equal
? const_true_rtx
: const0_rtx
;
3016 return op0lt
? const_true_rtx
: const0_rtx
;
3019 return op1lt
? const_true_rtx
: const0_rtx
;
3021 return op0ltu
? const_true_rtx
: const0_rtx
;
3023 return op1ltu
? const_true_rtx
: const0_rtx
;
3026 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
3029 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
3031 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
3033 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
3035 return const_true_rtx
;
3043 /* Simplify CODE, an operation with result mode MODE and three operands,
3044 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3045 a constant. Return 0 if no simplifications is possible. */
3048 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
3049 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
3052 unsigned int width
= GET_MODE_BITSIZE (mode
);
3054 /* VOIDmode means "infinite" precision. */
3056 width
= HOST_BITS_PER_WIDE_INT
;
3062 if (GET_CODE (op0
) == CONST_INT
3063 && GET_CODE (op1
) == CONST_INT
3064 && GET_CODE (op2
) == CONST_INT
3065 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
3066 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
3068 /* Extracting a bit-field from a constant */
3069 HOST_WIDE_INT val
= INTVAL (op0
);
3071 if (BITS_BIG_ENDIAN
)
3072 val
>>= (GET_MODE_BITSIZE (op0_mode
)
3073 - INTVAL (op2
) - INTVAL (op1
));
3075 val
>>= INTVAL (op2
);
3077 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
3079 /* First zero-extend. */
3080 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
3081 /* If desired, propagate sign bit. */
3082 if (code
== SIGN_EXTRACT
3083 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
3084 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
3087 /* Clear the bits that don't belong in our mode,
3088 unless they and our sign bit are all one.
3089 So we get either a reasonable negative value or a reasonable
3090 unsigned value for this mode. */
3091 if (width
< HOST_BITS_PER_WIDE_INT
3092 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
3093 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
3094 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3096 return GEN_INT (val
);
3101 if (GET_CODE (op0
) == CONST_INT
)
3102 return op0
!= const0_rtx
? op1
: op2
;
3104 /* Convert c ? a : a into "a". */
3105 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
3108 /* Convert a != b ? a : b into "a". */
3109 if (GET_CODE (op0
) == NE
3110 && ! side_effects_p (op0
)
3111 && ! HONOR_NANS (mode
)
3112 && ! HONOR_SIGNED_ZEROS (mode
)
3113 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3114 && rtx_equal_p (XEXP (op0
, 1), op2
))
3115 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3116 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3119 /* Convert a == b ? a : b into "b". */
3120 if (GET_CODE (op0
) == EQ
3121 && ! side_effects_p (op0
)
3122 && ! HONOR_NANS (mode
)
3123 && ! HONOR_SIGNED_ZEROS (mode
)
3124 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3125 && rtx_equal_p (XEXP (op0
, 1), op2
))
3126 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3127 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3130 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
3132 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
3133 ? GET_MODE (XEXP (op0
, 1))
3134 : GET_MODE (XEXP (op0
, 0)));
3137 /* Look for happy constants in op1 and op2. */
3138 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
3140 HOST_WIDE_INT t
= INTVAL (op1
);
3141 HOST_WIDE_INT f
= INTVAL (op2
);
3143 if (t
== STORE_FLAG_VALUE
&& f
== 0)
3144 code
= GET_CODE (op0
);
3145 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
3148 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
3156 return simplify_gen_relational (code
, mode
, cmp_mode
,
3157 XEXP (op0
, 0), XEXP (op0
, 1));
3160 if (cmp_mode
== VOIDmode
)
3161 cmp_mode
= op0_mode
;
3162 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
3163 cmp_mode
, XEXP (op0
, 0),
3166 /* See if any simplifications were possible. */
3169 if (GET_CODE (temp
) == CONST_INT
)
3170 return temp
== const0_rtx
? op2
: op1
;
3172 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
3178 if (GET_MODE (op0
) != mode
3179 || GET_MODE (op1
) != mode
3180 || !VECTOR_MODE_P (mode
))
3182 op2
= avoid_constant_pool_reference (op2
);
3183 if (GET_CODE (op2
) == CONST_INT
)
3185 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3186 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3187 int mask
= (1 << n_elts
) - 1;
3189 if (!(INTVAL (op2
) & mask
))
3191 if ((INTVAL (op2
) & mask
) == mask
)
3194 op0
= avoid_constant_pool_reference (op0
);
3195 op1
= avoid_constant_pool_reference (op1
);
3196 if (GET_CODE (op0
) == CONST_VECTOR
3197 && GET_CODE (op1
) == CONST_VECTOR
)
3199 rtvec v
= rtvec_alloc (n_elts
);
3202 for (i
= 0; i
< n_elts
; i
++)
3203 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
3204 ? CONST_VECTOR_ELT (op0
, i
)
3205 : CONST_VECTOR_ELT (op1
, i
));
3206 return gen_rtx_CONST_VECTOR (mode
, v
);
3218 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3219 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3221 Works by unpacking OP into a collection of 8-bit values
3222 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3223 and then repacking them again for OUTERMODE. */
3226 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
3227 enum machine_mode innermode
, unsigned int byte
)
3229 /* We support up to 512-bit values (for V8DFmode). */
3233 value_mask
= (1 << value_bit
) - 1
3235 unsigned char value
[max_bitsize
/ value_bit
];
3244 rtvec result_v
= NULL
;
3245 enum mode_class outer_class
;
3246 enum machine_mode outer_submode
;
3248 /* Some ports misuse CCmode. */
3249 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
3252 /* Unpack the value. */
3254 if (GET_CODE (op
) == CONST_VECTOR
)
3256 num_elem
= CONST_VECTOR_NUNITS (op
);
3257 elems
= &CONST_VECTOR_ELT (op
, 0);
3258 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
3264 elem_bitsize
= max_bitsize
;
3267 if (BITS_PER_UNIT
% value_bit
!= 0)
3268 abort (); /* Too complicated; reducing value_bit may help. */
3269 if (elem_bitsize
% BITS_PER_UNIT
!= 0)
3270 abort (); /* I don't know how to handle endianness of sub-units. */
3272 for (elem
= 0; elem
< num_elem
; elem
++)
3275 rtx el
= elems
[elem
];
3277 /* Vectors are kept in target memory order. (This is probably
3280 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3281 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3283 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3284 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3285 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3286 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3287 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3290 switch (GET_CODE (el
))
3294 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3296 *vp
++ = INTVAL (el
) >> i
;
3297 /* CONST_INTs are always logically sign-extended. */
3298 for (; i
< elem_bitsize
; i
+= value_bit
)
3299 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
3303 if (GET_MODE (el
) == VOIDmode
)
3305 /* If this triggers, someone should have generated a
3306 CONST_INT instead. */
3307 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3310 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
3311 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
3312 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
3315 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
3318 /* It shouldn't matter what's done here, so fill it with
3320 for (; i
< max_bitsize
; i
+= value_bit
)
3323 else if (GET_MODE_CLASS (GET_MODE (el
)) == MODE_FLOAT
)
3325 long tmp
[max_bitsize
/ 32];
3326 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
3328 if (bitsize
> elem_bitsize
)
3330 if (bitsize
% value_bit
!= 0)
3333 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
3336 /* real_to_target produces its result in words affected by
3337 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3338 and use WORDS_BIG_ENDIAN instead; see the documentation
3339 of SUBREG in rtl.texi. */
3340 for (i
= 0; i
< bitsize
; i
+= value_bit
)
3343 if (WORDS_BIG_ENDIAN
)
3344 ibase
= bitsize
- 1 - i
;
3347 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
3350 /* It shouldn't matter what's done here, so fill it with
3352 for (; i
< elem_bitsize
; i
+= value_bit
)
3364 /* Now, pick the right byte to start with. */
3365 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3366 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3367 will already have offset 0. */
3368 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
3370 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
3372 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3373 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3374 byte
= (subword_byte
% UNITS_PER_WORD
3375 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3378 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3379 so if it's become negative it will instead be very large.) */
3380 if (byte
>= GET_MODE_SIZE (innermode
))
3383 /* Convert from bytes to chunks of size value_bit. */
3384 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
3386 /* Re-pack the value. */
3388 if (VECTOR_MODE_P (outermode
))
3390 num_elem
= GET_MODE_NUNITS (outermode
);
3391 result_v
= rtvec_alloc (num_elem
);
3392 elems
= &RTVEC_ELT (result_v
, 0);
3393 outer_submode
= GET_MODE_INNER (outermode
);
3399 outer_submode
= outermode
;
3402 outer_class
= GET_MODE_CLASS (outer_submode
);
3403 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
3405 if (elem_bitsize
% value_bit
!= 0)
3407 if (elem_bitsize
+ value_start
* value_bit
> max_bitsize
)
3410 for (elem
= 0; elem
< num_elem
; elem
++)
3414 /* Vectors are stored in target memory order. (This is probably
3417 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3418 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3420 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3421 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3422 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3423 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3424 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3427 switch (outer_class
)
3430 case MODE_PARTIAL_INT
:
3432 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
3435 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3437 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
3438 for (; i
< elem_bitsize
; i
+= value_bit
)
3439 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
3440 << (i
- HOST_BITS_PER_WIDE_INT
));
3442 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3444 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3445 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
3447 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
3454 long tmp
[max_bitsize
/ 32];
3456 /* real_from_target wants its input in words affected by
3457 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3458 and use WORDS_BIG_ENDIAN instead; see the documentation
3459 of SUBREG in rtl.texi. */
3460 for (i
= 0; i
< max_bitsize
/ 32; i
++)
3462 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
3465 if (WORDS_BIG_ENDIAN
)
3466 ibase
= elem_bitsize
- 1 - i
;
3469 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
3472 real_from_target (&r
, tmp
, outer_submode
);
3473 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
3481 if (VECTOR_MODE_P (outermode
))
3482 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
3487 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3488 Return 0 if no simplifications are possible. */
3490 simplify_subreg (enum machine_mode outermode
, rtx op
,
3491 enum machine_mode innermode
, unsigned int byte
)
3493 /* Little bit of sanity checking. */
3494 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3495 || innermode
== BLKmode
|| outermode
== BLKmode
)
3498 if (GET_MODE (op
) != innermode
3499 && GET_MODE (op
) != VOIDmode
)
3502 if (byte
% GET_MODE_SIZE (outermode
)
3503 || byte
>= GET_MODE_SIZE (innermode
))
3506 if (outermode
== innermode
&& !byte
)
3509 if (GET_CODE (op
) == CONST_INT
3510 || GET_CODE (op
) == CONST_DOUBLE
3511 || GET_CODE (op
) == CONST_VECTOR
)
3512 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
3514 /* Changing mode twice with SUBREG => just change it once,
3515 or not at all if changing back op starting mode. */
3516 if (GET_CODE (op
) == SUBREG
)
3518 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3519 int final_offset
= byte
+ SUBREG_BYTE (op
);
3522 if (outermode
== innermostmode
3523 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3524 return SUBREG_REG (op
);
3526 /* The SUBREG_BYTE represents offset, as if the value were stored
3527 in memory. Irritating exception is paradoxical subreg, where
3528 we define SUBREG_BYTE to be 0. On big endian machines, this
3529 value should be negative. For a moment, undo this exception. */
3530 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3532 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3533 if (WORDS_BIG_ENDIAN
)
3534 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3535 if (BYTES_BIG_ENDIAN
)
3536 final_offset
+= difference
% UNITS_PER_WORD
;
3538 if (SUBREG_BYTE (op
) == 0
3539 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3541 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3542 if (WORDS_BIG_ENDIAN
)
3543 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3544 if (BYTES_BIG_ENDIAN
)
3545 final_offset
+= difference
% UNITS_PER_WORD
;
3548 /* See whether resulting subreg will be paradoxical. */
3549 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3551 /* In nonparadoxical subregs we can't handle negative offsets. */
3552 if (final_offset
< 0)
3554 /* Bail out in case resulting subreg would be incorrect. */
3555 if (final_offset
% GET_MODE_SIZE (outermode
)
3556 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3562 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3564 /* In paradoxical subreg, see if we are still looking on lower part.
3565 If so, our SUBREG_BYTE will be 0. */
3566 if (WORDS_BIG_ENDIAN
)
3567 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3568 if (BYTES_BIG_ENDIAN
)
3569 offset
+= difference
% UNITS_PER_WORD
;
3570 if (offset
== final_offset
)
3576 /* Recurse for further possible simplifications. */
3577 new = simplify_subreg (outermode
, SUBREG_REG (op
),
3578 GET_MODE (SUBREG_REG (op
)),
3582 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3585 /* SUBREG of a hard register => just change the register number
3586 and/or mode. If the hard register is not valid in that mode,
3587 suppress this simplification. If the hard register is the stack,
3588 frame, or argument pointer, leave this as a SUBREG. */
3591 && (! REG_FUNCTION_VALUE_P (op
)
3592 || ! rtx_equal_function_value_matters
)
3593 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3594 #ifdef CANNOT_CHANGE_MODE_CLASS
3595 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3596 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3597 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3599 && ((reload_completed
&& !frame_pointer_needed
)
3600 || (REGNO (op
) != FRAME_POINTER_REGNUM
3601 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3602 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3605 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3606 && REGNO (op
) != ARG_POINTER_REGNUM
3608 && REGNO (op
) != STACK_POINTER_REGNUM
3609 && subreg_offset_representable_p (REGNO (op
), innermode
,
3612 rtx tem
= gen_rtx_SUBREG (outermode
, op
, byte
);
3613 int final_regno
= subreg_hard_regno (tem
, 0);
3615 /* ??? We do allow it if the current REG is not valid for
3616 its mode. This is a kludge to work around how float/complex
3617 arguments are passed on 32-bit SPARC and should be fixed. */
3618 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3619 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
3621 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3623 /* Propagate original regno. We don't have any way to specify
3624 the offset inside original regno, so do so only for lowpart.
3625 The information is used only by alias analysis that can not
3626 grog partial register anyway. */
3628 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3629 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3634 /* If we have a SUBREG of a register that we are replacing and we are
3635 replacing it with a MEM, make a new MEM and try replacing the
3636 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3637 or if we would be widening it. */
3639 if (GET_CODE (op
) == MEM
3640 && ! mode_dependent_address_p (XEXP (op
, 0))
3641 /* Allow splitting of volatile memory references in case we don't
3642 have instruction to move the whole thing. */
3643 && (! MEM_VOLATILE_P (op
)
3644 || ! have_insn_for (SET
, innermode
))
3645 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3646 return adjust_address_nv (op
, outermode
, byte
);
3648 /* Handle complex values represented as CONCAT
3649 of real and imaginary part. */
3650 if (GET_CODE (op
) == CONCAT
)
3652 int is_realpart
= byte
< (unsigned int) GET_MODE_UNIT_SIZE (innermode
);
3653 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
3654 unsigned int final_offset
;
3657 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
3658 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3661 /* We can at least simplify it by referring directly to the
3663 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3666 /* Optimize SUBREG truncations of zero and sign extended values. */
3667 if ((GET_CODE (op
) == ZERO_EXTEND
3668 || GET_CODE (op
) == SIGN_EXTEND
)
3669 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
3671 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
3673 /* If we're requesting the lowpart of a zero or sign extension,
3674 there are three possibilities. If the outermode is the same
3675 as the origmode, we can omit both the extension and the subreg.
3676 If the outermode is not larger than the origmode, we can apply
3677 the truncation without the extension. Finally, if the outermode
3678 is larger than the origmode, but both are integer modes, we
3679 can just extend to the appropriate mode. */
3682 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
3683 if (outermode
== origmode
)
3684 return XEXP (op
, 0);
3685 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
3686 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
3687 subreg_lowpart_offset (outermode
,
3689 if (SCALAR_INT_MODE_P (outermode
))
3690 return simplify_gen_unary (GET_CODE (op
), outermode
,
3691 XEXP (op
, 0), origmode
);
3694 /* A SUBREG resulting from a zero extension may fold to zero if
3695 it extracts higher bits that the ZERO_EXTEND's source bits. */
3696 if (GET_CODE (op
) == ZERO_EXTEND
3697 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
3698 return CONST0_RTX (outermode
);
3704 /* Make a SUBREG operation or equivalent if it folds. */
3707 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
3708 enum machine_mode innermode
, unsigned int byte
)
3711 /* Little bit of sanity checking. */
3712 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3713 || innermode
== BLKmode
|| outermode
== BLKmode
)
3716 if (GET_MODE (op
) != innermode
3717 && GET_MODE (op
) != VOIDmode
)
3720 if (byte
% GET_MODE_SIZE (outermode
)
3721 || byte
>= GET_MODE_SIZE (innermode
))
3724 if (GET_CODE (op
) == QUEUED
)
3727 new = simplify_subreg (outermode
, op
, innermode
, byte
);
3731 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
3734 return gen_rtx_SUBREG (outermode
, op
, byte
);
3736 /* Simplify X, an rtx expression.
3738 Return the simplified expression or NULL if no simplifications
3741 This is the preferred entry point into the simplification routines;
3742 however, we still allow passes to call the more specific routines.
3744 Right now GCC has three (yes, three) major bodies of RTL simplification
3745 code that need to be unified.
3747 1. fold_rtx in cse.c. This code uses various CSE specific
3748 information to aid in RTL simplification.
3750 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3751 it uses combine specific information to aid in RTL
3754 3. The routines in this file.
3757 Long term we want to only have one body of simplification code; to
3758 get to that state I recommend the following steps:
3760 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3761 which are not pass dependent state into these routines.
3763 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3764 use this routine whenever possible.
3766 3. Allow for pass dependent state to be provided to these
3767 routines and add simplifications based on the pass dependent
3768 state. Remove code from cse.c & combine.c that becomes
3771 It will take time, but ultimately the compiler will be easier to
3772 maintain and improve. It's totally silly that when we add a
3773 simplification that it needs to be added to 4 places (3 for RTL
3774 simplification and 1 for tree simplification. */
3777 simplify_rtx (rtx x
)
3779 enum rtx_code code
= GET_CODE (x
);
3780 enum machine_mode mode
= GET_MODE (x
);
3782 switch (GET_RTX_CLASS (code
))
3785 return simplify_unary_operation (code
, mode
,
3786 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3787 case RTX_COMM_ARITH
:
3788 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3789 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
3791 /* Fall through.... */
3794 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3797 case RTX_BITFIELD_OPS
:
3798 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3799 XEXP (x
, 0), XEXP (x
, 1),
3803 case RTX_COMM_COMPARE
:
3804 return simplify_relational_operation (code
, mode
,
3805 ((GET_MODE (XEXP (x
, 0))
3807 ? GET_MODE (XEXP (x
, 0))
3808 : GET_MODE (XEXP (x
, 1))),
3814 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
3815 GET_MODE (SUBREG_REG (x
)),
3822 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3823 if (GET_CODE (XEXP (x
, 0)) == HIGH
3824 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))