1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static bool plus_minus_operand_p (rtx
);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
61 enum machine_mode
, rtx
, rtx
);
62 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
63 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode
, rtx i
)
71 return gen_int_mode (- INTVAL (i
), mode
);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode
, rtx x
)
80 unsigned HOST_WIDE_INT val
;
83 if (GET_MODE_CLASS (mode
) != MODE_INT
)
86 width
= GET_MODE_BITSIZE (mode
);
90 if (width
<= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x
) == CONST_INT
)
93 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x
) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x
) == 0)
97 val
= CONST_DOUBLE_HIGH (x
);
98 width
-= HOST_BITS_PER_WIDE_INT
;
103 if (width
< HOST_BITS_PER_WIDE_INT
)
104 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
105 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
117 /* If this simplifies, do it. */
118 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0
, op1
))
125 tem
= op0
, op0
= op1
, op1
= tem
;
127 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x
)
136 enum machine_mode cmode
;
137 HOST_WIDE_INT offset
= 0;
139 switch (GET_CODE (x
))
145 /* Handle float extensions of constant pool references. */
147 c
= avoid_constant_pool_reference (tmp
);
148 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
152 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr
= targetm
.delegitimize_address (addr
);
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr
) == CONST
168 && GET_CODE (XEXP (addr
, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST_INT
)
171 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
172 addr
= XEXP (XEXP (addr
, 0), 0);
175 if (GET_CODE (addr
) == LO_SUM
)
176 addr
= XEXP (addr
, 1);
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr
) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr
))
183 c
= get_pool_constant (addr
);
184 cmode
= get_pool_mode (addr
);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset
!= 0 || cmode
!= GET_MODE (x
))
191 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
192 if (tem
&& CONSTANT_P (tem
))
202 /* Return true if X is a MEM referencing the constant pool. */
205 constant_pool_reference_p (rtx x
)
207 return avoid_constant_pool_reference (x
) != x
;
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
214 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
215 enum machine_mode op_mode
)
219 /* If this simplifies, use it. */
220 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
223 return gen_rtx_fmt_e (code
, mode
, op
);
226 /* Likewise for ternary operations. */
229 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
230 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
234 /* If this simplifies, use it. */
235 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
239 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
246 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
247 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
251 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
255 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
262 simplify_replace_rtx (rtx x
, rtx old_rtx
, rtx new_rtx
)
264 enum rtx_code code
= GET_CODE (x
);
265 enum machine_mode mode
= GET_MODE (x
);
266 enum machine_mode op_mode
;
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
276 switch (GET_RTX_CLASS (code
))
280 op_mode
= GET_MODE (op0
);
281 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
282 if (op0
== XEXP (x
, 0))
284 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
288 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
289 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
290 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
292 return simplify_gen_binary (code
, mode
, op0
, op1
);
295 case RTX_COMM_COMPARE
:
298 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
299 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
300 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
301 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
303 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
306 case RTX_BITFIELD_OPS
:
308 op_mode
= GET_MODE (op0
);
309 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
310 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
311 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
312 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
314 if (op_mode
== VOIDmode
)
315 op_mode
= GET_MODE (op0
);
316 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
319 /* The only case we try to handle is a SUBREG. */
322 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
323 if (op0
== SUBREG_REG (x
))
325 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
326 GET_MODE (SUBREG_REG (x
)),
328 return op0
? op0
: x
;
335 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
336 if (op0
== XEXP (x
, 0))
338 return replace_equiv_address_nv (x
, op0
);
340 else if (code
== LO_SUM
)
342 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
343 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
349 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
351 return gen_rtx_LO_SUM (mode
, op0
, op1
);
353 else if (code
== REG
)
355 if (rtx_equal_p (x
, old_rtx
))
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
371 rtx op
, enum machine_mode op_mode
)
375 if (GET_CODE (op
) == CONST
)
378 trueop
= avoid_constant_pool_reference (op
);
380 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
384 return simplify_unary_operation_1 (code
, mode
, op
);
387 /* Perform some simplifications we can do even if the operands
390 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
392 enum rtx_code reversed
;
398 /* (not (not X)) == X. */
399 if (GET_CODE (op
) == NOT
)
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op
)
405 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
406 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
407 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
408 XEXP (op
, 0), XEXP (op
, 1));
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op
) == PLUS
412 && XEXP (op
, 1) == constm1_rtx
)
413 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op
) == NEG
)
417 return plus_constant (XEXP (op
, 0), -1);
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op
) == XOR
421 && GET_CODE (XEXP (op
, 1)) == CONST_INT
422 && (temp
= simplify_unary_operation (NOT
, mode
,
423 XEXP (op
, 1), mode
)) != 0)
424 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op
) == PLUS
428 && GET_CODE (XEXP (op
, 1)) == CONST_INT
429 && mode_signbit_p (mode
, XEXP (op
, 1))
430 && (temp
= simplify_unary_operation (NOT
, mode
,
431 XEXP (op
, 1), mode
)) != 0)
432 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
440 if (GET_CODE (op
) == ASHIFT
441 && XEXP (op
, 0) == const1_rtx
)
443 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
444 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
451 if (STORE_FLAG_VALUE
== -1
452 && GET_CODE (op
) == ASHIFTRT
453 && GET_CODE (XEXP (op
, 1)) == CONST_INT
454 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
455 return simplify_gen_relational (GE
, mode
, VOIDmode
,
456 XEXP (op
, 0), const0_rtx
);
459 if (GET_CODE (op
) == SUBREG
460 && subreg_lowpart_p (op
)
461 && (GET_MODE_SIZE (GET_MODE (op
))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
463 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
464 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
466 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
469 x
= gen_rtx_ROTATE (inner_mode
,
470 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
472 XEXP (SUBREG_REG (op
), 1));
473 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
481 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
483 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
484 enum machine_mode op_mode
;
486 op_mode
= GET_MODE (in1
);
487 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
489 op_mode
= GET_MODE (in2
);
490 if (op_mode
== VOIDmode
)
492 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
494 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
497 in2
= in1
; in1
= tem
;
500 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op
) == NEG
)
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op
) == PLUS
512 && XEXP (op
, 1) == const1_rtx
)
513 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op
) == NOT
)
517 return plus_constant (XEXP (op
, 0), 1);
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op
) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode
)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
527 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
529 if (GET_CODE (op
) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode
)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
535 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
537 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
539 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
544 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op
) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
552 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
553 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
559 if (GET_CODE (op
) == ASHIFT
)
561 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
563 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op
) == ASHIFTRT
569 && GET_CODE (XEXP (op
, 1)) == CONST_INT
570 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
571 return simplify_gen_binary (LSHIFTRT
, mode
,
572 XEXP (op
, 0), XEXP (op
, 1));
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op
) == LSHIFTRT
577 && GET_CODE (XEXP (op
, 1)) == CONST_INT
578 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
579 return simplify_gen_binary (ASHIFTRT
, mode
,
580 XEXP (op
, 0), XEXP (op
, 1));
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op
) == XOR
584 && XEXP (op
, 1) == const1_rtx
585 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
586 return plus_constant (XEXP (op
, 0), -1);
590 /* We can't handle truncation to a partial integer mode here
591 because we don't know the real bitsize of the partial
593 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
596 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
597 if ((GET_CODE (op
) == SIGN_EXTEND
598 || GET_CODE (op
) == ZERO_EXTEND
)
599 && GET_MODE (XEXP (op
, 0)) == mode
)
602 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
603 (OP:SI foo:SI) if OP is NEG or ABS. */
604 if ((GET_CODE (op
) == ABS
605 || GET_CODE (op
) == NEG
)
606 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
607 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
608 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
609 return simplify_gen_unary (GET_CODE (op
), mode
,
610 XEXP (XEXP (op
, 0), 0), mode
);
612 /* (truncate:SI (subreg:DI (truncate:SI X) 0)) is
614 if (GET_CODE (op
) == SUBREG
615 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
616 && subreg_lowpart_p (op
))
617 return SUBREG_REG (op
);
619 /* If we know that the value is already truncated, we can
620 replace the TRUNCATE with a SUBREG if TRULY_NOOP_TRUNCATION
621 is nonzero for the corresponding modes. But don't do this
622 for an (LSHIFTRT (MULT ...)) since this will cause problems
623 with the umulXi3_highpart patterns. */
624 if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
625 GET_MODE_BITSIZE (GET_MODE (op
)))
626 && num_sign_bit_copies (op
, GET_MODE (op
))
627 >= (unsigned int) (GET_MODE_BITSIZE (mode
) + 1)
628 && ! (GET_CODE (op
) == LSHIFTRT
629 && GET_CODE (XEXP (op
, 0)) == MULT
))
630 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
632 /* A truncate of a comparison can be replaced with a subreg if
633 STORE_FLAG_VALUE permits. This is like the previous test,
634 but it works even if the comparison is done in a mode larger
635 than HOST_BITS_PER_WIDE_INT. */
636 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
638 && ((HOST_WIDE_INT
) STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
639 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
643 if (DECIMAL_FLOAT_MODE_P (mode
))
646 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
647 if (GET_CODE (op
) == FLOAT_EXTEND
648 && GET_MODE (XEXP (op
, 0)) == mode
)
651 /* (float_truncate:SF (float_truncate:DF foo:XF))
652 = (float_truncate:SF foo:XF).
653 This may eliminate double rounding, so it is unsafe.
655 (float_truncate:SF (float_extend:XF foo:DF))
656 = (float_truncate:SF foo:DF).
658 (float_truncate:DF (float_extend:XF foo:SF))
659 = (float_extend:SF foo:DF). */
660 if ((GET_CODE (op
) == FLOAT_TRUNCATE
661 && flag_unsafe_math_optimizations
)
662 || GET_CODE (op
) == FLOAT_EXTEND
)
663 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
665 > GET_MODE_SIZE (mode
)
666 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
670 /* (float_truncate (float x)) is (float x) */
671 if (GET_CODE (op
) == FLOAT
672 && (flag_unsafe_math_optimizations
673 || ((unsigned)significand_size (GET_MODE (op
))
674 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
675 - num_sign_bit_copies (XEXP (op
, 0),
676 GET_MODE (XEXP (op
, 0)))))))
677 return simplify_gen_unary (FLOAT
, mode
,
679 GET_MODE (XEXP (op
, 0)));
681 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
682 (OP:SF foo:SF) if OP is NEG or ABS. */
683 if ((GET_CODE (op
) == ABS
684 || GET_CODE (op
) == NEG
)
685 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
686 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
687 return simplify_gen_unary (GET_CODE (op
), mode
,
688 XEXP (XEXP (op
, 0), 0), mode
);
690 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
691 is (float_truncate:SF x). */
692 if (GET_CODE (op
) == SUBREG
693 && subreg_lowpart_p (op
)
694 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
695 return SUBREG_REG (op
);
699 if (DECIMAL_FLOAT_MODE_P (mode
))
702 /* (float_extend (float_extend x)) is (float_extend x)
704 (float_extend (float x)) is (float x) assuming that double
705 rounding can't happen.
707 if (GET_CODE (op
) == FLOAT_EXTEND
708 || (GET_CODE (op
) == FLOAT
709 && ((unsigned)significand_size (GET_MODE (op
))
710 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
711 - num_sign_bit_copies (XEXP (op
, 0),
712 GET_MODE (XEXP (op
, 0)))))))
713 return simplify_gen_unary (GET_CODE (op
), mode
,
715 GET_MODE (XEXP (op
, 0)));
720 /* (abs (neg <foo>)) -> (abs <foo>) */
721 if (GET_CODE (op
) == NEG
)
722 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
723 GET_MODE (XEXP (op
, 0)));
725 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
727 if (GET_MODE (op
) == VOIDmode
)
730 /* If operand is something known to be positive, ignore the ABS. */
731 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
732 || ((GET_MODE_BITSIZE (GET_MODE (op
))
733 <= HOST_BITS_PER_WIDE_INT
)
734 && ((nonzero_bits (op
, GET_MODE (op
))
736 << (GET_MODE_BITSIZE (GET_MODE (op
)) - 1)))
740 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
741 if (num_sign_bit_copies (op
, mode
) == GET_MODE_BITSIZE (mode
))
742 return gen_rtx_NEG (mode
, op
);
747 /* (ffs (*_extend <X>)) = (ffs <X>) */
748 if (GET_CODE (op
) == SIGN_EXTEND
749 || GET_CODE (op
) == ZERO_EXTEND
)
750 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
751 GET_MODE (XEXP (op
, 0)));
756 /* (pop* (zero_extend <X>)) = (pop* <X>) */
757 if (GET_CODE (op
) == ZERO_EXTEND
)
758 return simplify_gen_unary (code
, mode
, XEXP (op
, 0),
759 GET_MODE (XEXP (op
, 0)));
763 /* (float (sign_extend <X>)) = (float <X>). */
764 if (GET_CODE (op
) == SIGN_EXTEND
)
765 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
766 GET_MODE (XEXP (op
, 0)));
770 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
771 becomes just the MINUS if its mode is MODE. This allows
772 folding switch statements on machines using casesi (such as
774 if (GET_CODE (op
) == TRUNCATE
775 && GET_MODE (XEXP (op
, 0)) == mode
776 && GET_CODE (XEXP (op
, 0)) == MINUS
777 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
778 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
781 /* Check for a sign extension of a subreg of a promoted
782 variable, where the promotion is sign-extended, and the
783 target mode is the same as the variable's promotion. */
784 if (GET_CODE (op
) == SUBREG
785 && SUBREG_PROMOTED_VAR_P (op
)
786 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
787 && GET_MODE (XEXP (op
, 0)) == mode
)
790 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
791 if (! POINTERS_EXTEND_UNSIGNED
792 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
794 || (GET_CODE (op
) == SUBREG
795 && REG_P (SUBREG_REG (op
))
796 && REG_POINTER (SUBREG_REG (op
))
797 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
798 return convert_memory_address (Pmode
, op
);
803 /* Check for a zero extension of a subreg of a promoted
804 variable, where the promotion is zero-extended, and the
805 target mode is the same as the variable's promotion. */
806 if (GET_CODE (op
) == SUBREG
807 && SUBREG_PROMOTED_VAR_P (op
)
808 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
809 && GET_MODE (XEXP (op
, 0)) == mode
)
812 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
813 if (POINTERS_EXTEND_UNSIGNED
> 0
814 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
816 || (GET_CODE (op
) == SUBREG
817 && REG_P (SUBREG_REG (op
))
818 && REG_POINTER (SUBREG_REG (op
))
819 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
820 return convert_memory_address (Pmode
, op
);
831 /* Try to compute the value of a unary operation CODE whose output mode is to
832 be MODE with input operand OP whose mode was originally OP_MODE.
833 Return zero if the value cannot be computed. */
835 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
836 rtx op
, enum machine_mode op_mode
)
838 unsigned int width
= GET_MODE_BITSIZE (mode
);
840 if (code
== VEC_DUPLICATE
)
842 gcc_assert (VECTOR_MODE_P (mode
));
843 if (GET_MODE (op
) != VOIDmode
)
845 if (!VECTOR_MODE_P (GET_MODE (op
)))
846 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
848 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
851 if (GET_CODE (op
) == CONST_INT
|| GET_CODE (op
) == CONST_DOUBLE
852 || GET_CODE (op
) == CONST_VECTOR
)
854 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
855 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
856 rtvec v
= rtvec_alloc (n_elts
);
859 if (GET_CODE (op
) != CONST_VECTOR
)
860 for (i
= 0; i
< n_elts
; i
++)
861 RTVEC_ELT (v
, i
) = op
;
864 enum machine_mode inmode
= GET_MODE (op
);
865 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
866 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
868 gcc_assert (in_n_elts
< n_elts
);
869 gcc_assert ((n_elts
% in_n_elts
) == 0);
870 for (i
= 0; i
< n_elts
; i
++)
871 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
873 return gen_rtx_CONST_VECTOR (mode
, v
);
877 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
879 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
880 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
881 enum machine_mode opmode
= GET_MODE (op
);
882 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
883 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
884 rtvec v
= rtvec_alloc (n_elts
);
887 gcc_assert (op_n_elts
== n_elts
);
888 for (i
= 0; i
< n_elts
; i
++)
890 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
891 CONST_VECTOR_ELT (op
, i
),
892 GET_MODE_INNER (opmode
));
895 RTVEC_ELT (v
, i
) = x
;
897 return gen_rtx_CONST_VECTOR (mode
, v
);
900 /* The order of these tests is critical so that, for example, we don't
901 check the wrong mode (input vs. output) for a conversion operation,
902 such as FIX. At some point, this should be simplified. */
904 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
905 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
907 HOST_WIDE_INT hv
, lv
;
910 if (GET_CODE (op
) == CONST_INT
)
911 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
913 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
915 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
916 d
= real_value_truncate (mode
, d
);
917 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
919 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
920 && (GET_CODE (op
) == CONST_DOUBLE
921 || GET_CODE (op
) == CONST_INT
))
923 HOST_WIDE_INT hv
, lv
;
926 if (GET_CODE (op
) == CONST_INT
)
927 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
929 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
931 if (op_mode
== VOIDmode
)
933 /* We don't know how to interpret negative-looking numbers in
934 this case, so don't try to fold those. */
938 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
941 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
943 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
944 d
= real_value_truncate (mode
, d
);
945 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
948 if (GET_CODE (op
) == CONST_INT
949 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
951 HOST_WIDE_INT arg0
= INTVAL (op
);
965 val
= (arg0
>= 0 ? arg0
: - arg0
);
969 /* Don't use ffs here. Instead, get low order bit and then its
970 number. If arg0 is zero, this will return 0, as desired. */
971 arg0
&= GET_MODE_MASK (mode
);
972 val
= exact_log2 (arg0
& (- arg0
)) + 1;
976 arg0
&= GET_MODE_MASK (mode
);
977 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
980 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
984 arg0
&= GET_MODE_MASK (mode
);
987 /* Even if the value at zero is undefined, we have to come
988 up with some replacement. Seems good enough. */
989 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
990 val
= GET_MODE_BITSIZE (mode
);
993 val
= exact_log2 (arg0
& -arg0
);
997 arg0
&= GET_MODE_MASK (mode
);
1000 val
++, arg0
&= arg0
- 1;
1004 arg0
&= GET_MODE_MASK (mode
);
1007 val
++, arg0
&= arg0
- 1;
1016 /* When zero-extending a CONST_INT, we need to know its
1018 gcc_assert (op_mode
!= VOIDmode
);
1019 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1021 /* If we were really extending the mode,
1022 we would have to distinguish between zero-extension
1023 and sign-extension. */
1024 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1027 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1028 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1034 if (op_mode
== VOIDmode
)
1036 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1038 /* If we were really extending the mode,
1039 we would have to distinguish between zero-extension
1040 and sign-extension. */
1041 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1044 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1047 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1049 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
1050 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1058 case FLOAT_TRUNCATE
:
1067 return gen_int_mode (val
, mode
);
1070 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1071 for a DImode operation on a CONST_INT. */
1072 else if (GET_MODE (op
) == VOIDmode
1073 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1074 && (GET_CODE (op
) == CONST_DOUBLE
1075 || GET_CODE (op
) == CONST_INT
))
1077 unsigned HOST_WIDE_INT l1
, lv
;
1078 HOST_WIDE_INT h1
, hv
;
1080 if (GET_CODE (op
) == CONST_DOUBLE
)
1081 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1083 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1093 neg_double (l1
, h1
, &lv
, &hv
);
1098 neg_double (l1
, h1
, &lv
, &hv
);
1110 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
1113 lv
= exact_log2 (l1
& -l1
) + 1;
1119 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
1120 - HOST_BITS_PER_WIDE_INT
;
1122 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
1123 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1124 lv
= GET_MODE_BITSIZE (mode
);
1130 lv
= exact_log2 (l1
& -l1
);
1132 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
1133 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1134 lv
= GET_MODE_BITSIZE (mode
);
1157 /* This is just a change-of-mode, so do nothing. */
1162 gcc_assert (op_mode
!= VOIDmode
);
1164 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1168 lv
= l1
& GET_MODE_MASK (op_mode
);
1172 if (op_mode
== VOIDmode
1173 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1177 lv
= l1
& GET_MODE_MASK (op_mode
);
1178 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
1179 && (lv
& ((HOST_WIDE_INT
) 1
1180 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
1181 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1183 hv
= HWI_SIGN_EXTEND (lv
);
1194 return immed_double_const (lv
, hv
, mode
);
1197 else if (GET_CODE (op
) == CONST_DOUBLE
1198 && SCALAR_FLOAT_MODE_P (mode
))
1200 REAL_VALUE_TYPE d
, t
;
1201 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1206 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1208 real_sqrt (&t
, mode
, &d
);
1212 d
= REAL_VALUE_ABS (d
);
1215 d
= REAL_VALUE_NEGATE (d
);
1217 case FLOAT_TRUNCATE
:
1218 d
= real_value_truncate (mode
, d
);
1221 /* All this does is change the mode. */
1224 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1231 real_to_target (tmp
, &d
, GET_MODE (op
));
1232 for (i
= 0; i
< 4; i
++)
1234 real_from_target (&d
, tmp
, mode
);
1240 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1243 else if (GET_CODE (op
) == CONST_DOUBLE
1244 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1245 && GET_MODE_CLASS (mode
) == MODE_INT
1246 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1248 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1249 operators are intentionally left unspecified (to ease implementation
1250 by target backends), for consistency, this routine implements the
1251 same semantics for constant folding as used by the middle-end. */
1253 /* This was formerly used only for non-IEEE float.
1254 eggert@twinsun.com says it is safe for IEEE also. */
1255 HOST_WIDE_INT xh
, xl
, th
, tl
;
1256 REAL_VALUE_TYPE x
, t
;
1257 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1261 if (REAL_VALUE_ISNAN (x
))
1264 /* Test against the signed upper bound. */
1265 if (width
> HOST_BITS_PER_WIDE_INT
)
1267 th
= ((unsigned HOST_WIDE_INT
) 1
1268 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1274 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1276 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1277 if (REAL_VALUES_LESS (t
, x
))
1284 /* Test against the signed lower bound. */
1285 if (width
> HOST_BITS_PER_WIDE_INT
)
1287 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1293 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1295 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1296 if (REAL_VALUES_LESS (x
, t
))
1302 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1306 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1309 /* Test against the unsigned upper bound. */
1310 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1315 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1317 th
= ((unsigned HOST_WIDE_INT
) 1
1318 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1324 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1326 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1327 if (REAL_VALUES_LESS (t
, x
))
1334 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1340 return immed_double_const (xl
, xh
, mode
);
1346 /* Subroutine of simplify_binary_operation to simplify a commutative,
1347 associative binary operation CODE with result mode MODE, operating
1348 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1349 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1350 canonicalization is possible. */
1353 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1358 /* Linearize the operator to the left. */
1359 if (GET_CODE (op1
) == code
)
1361 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1362 if (GET_CODE (op0
) == code
)
1364 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1365 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1368 /* "a op (b op c)" becomes "(b op c) op a". */
1369 if (! swap_commutative_operands_p (op1
, op0
))
1370 return simplify_gen_binary (code
, mode
, op1
, op0
);
1377 if (GET_CODE (op0
) == code
)
1379 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1380 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1382 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1383 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1386 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1387 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1388 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1389 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1391 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1393 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1394 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1395 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1396 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1398 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1405 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1406 and OP1. Return 0 if no simplification is possible.
1408 Don't use this for relational operations such as EQ or LT.
1409 Use simplify_relational_operation instead. */
1411 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1414 rtx trueop0
, trueop1
;
1417 /* Relational operations don't work here. We must know the mode
1418 of the operands in order to do the comparison correctly.
1419 Assuming a full word can give incorrect results.
1420 Consider comparing 128 with -128 in QImode. */
1421 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1422 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1424 /* Make sure the constant is second. */
1425 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1426 && swap_commutative_operands_p (op0
, op1
))
1428 tem
= op0
, op0
= op1
, op1
= tem
;
1431 trueop0
= avoid_constant_pool_reference (op0
);
1432 trueop1
= avoid_constant_pool_reference (op1
);
1434 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1437 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1441 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1442 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1444 rtx tem
, reversed
, opleft
, opright
;
1446 unsigned int width
= GET_MODE_BITSIZE (mode
);
1448 /* Even if we can't compute a constant result,
1449 there are some cases worth simplifying. */
1454 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1455 when x is NaN, infinite, or finite and nonzero. They aren't
1456 when x is -0 and the rounding mode is not towards -infinity,
1457 since (-0) + 0 is then 0. */
1458 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1461 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1462 transformations are safe even for IEEE. */
1463 if (GET_CODE (op0
) == NEG
)
1464 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1465 else if (GET_CODE (op1
) == NEG
)
1466 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1468 /* (~a) + 1 -> -a */
1469 if (INTEGRAL_MODE_P (mode
)
1470 && GET_CODE (op0
) == NOT
1471 && trueop1
== const1_rtx
)
1472 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1474 /* Handle both-operands-constant cases. We can only add
1475 CONST_INTs to constants since the sum of relocatable symbols
1476 can't be handled by most assemblers. Don't add CONST_INT
1477 to CONST_INT since overflow won't be computed properly if wider
1478 than HOST_BITS_PER_WIDE_INT. */
1480 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1481 && GET_CODE (op1
) == CONST_INT
)
1482 return plus_constant (op0
, INTVAL (op1
));
1483 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1484 && GET_CODE (op0
) == CONST_INT
)
1485 return plus_constant (op1
, INTVAL (op0
));
1487 /* See if this is something like X * C - X or vice versa or
1488 if the multiplication is written as a shift. If so, we can
1489 distribute and make a new multiply, shift, or maybe just
1490 have X (if C is 2 in the example above). But don't make
1491 something more expensive than we had before. */
1493 if (SCALAR_INT_MODE_P (mode
))
1495 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1496 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1497 rtx lhs
= op0
, rhs
= op1
;
1499 if (GET_CODE (lhs
) == NEG
)
1503 lhs
= XEXP (lhs
, 0);
1505 else if (GET_CODE (lhs
) == MULT
1506 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1508 coeff0l
= INTVAL (XEXP (lhs
, 1));
1509 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1510 lhs
= XEXP (lhs
, 0);
1512 else if (GET_CODE (lhs
) == ASHIFT
1513 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1514 && INTVAL (XEXP (lhs
, 1)) >= 0
1515 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1517 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1519 lhs
= XEXP (lhs
, 0);
1522 if (GET_CODE (rhs
) == NEG
)
1526 rhs
= XEXP (rhs
, 0);
1528 else if (GET_CODE (rhs
) == MULT
1529 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1531 coeff1l
= INTVAL (XEXP (rhs
, 1));
1532 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1533 rhs
= XEXP (rhs
, 0);
1535 else if (GET_CODE (rhs
) == ASHIFT
1536 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1537 && INTVAL (XEXP (rhs
, 1)) >= 0
1538 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1540 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1542 rhs
= XEXP (rhs
, 0);
1545 if (rtx_equal_p (lhs
, rhs
))
1547 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1549 unsigned HOST_WIDE_INT l
;
1552 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1553 coeff
= immed_double_const (l
, h
, mode
);
1555 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1556 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1561 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1562 if ((GET_CODE (op1
) == CONST_INT
1563 || GET_CODE (op1
) == CONST_DOUBLE
)
1564 && GET_CODE (op0
) == XOR
1565 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1566 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1567 && mode_signbit_p (mode
, op1
))
1568 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1569 simplify_gen_binary (XOR
, mode
, op1
,
1572 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1573 if (GET_CODE (op0
) == MULT
1574 && GET_CODE (XEXP (op0
, 0)) == NEG
)
1578 in1
= XEXP (XEXP (op0
, 0), 0);
1579 in2
= XEXP (op0
, 1);
1580 return simplify_gen_binary (MINUS
, mode
, op1
,
1581 simplify_gen_binary (MULT
, mode
,
1585 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1586 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1588 if (COMPARISON_P (op0
)
1589 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
1590 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
1591 && (reversed
= reversed_comparison (op0
, mode
)))
1593 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
1595 /* If one of the operands is a PLUS or a MINUS, see if we can
1596 simplify this by the associative law.
1597 Don't use the associative law for floating point.
1598 The inaccuracy makes it nonassociative,
1599 and subtle programs can break if operations are associated. */
1601 if (INTEGRAL_MODE_P (mode
)
1602 && (plus_minus_operand_p (op0
)
1603 || plus_minus_operand_p (op1
))
1604 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1607 /* Reassociate floating point addition only when the user
1608 specifies unsafe math optimizations. */
1609 if (FLOAT_MODE_P (mode
)
1610 && flag_unsafe_math_optimizations
)
1612 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1620 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1621 using cc0, in which case we want to leave it as a COMPARE
1622 so we can distinguish it from a register-register-copy.
1624 In IEEE floating point, x-0 is not the same as x. */
1626 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1627 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1628 && trueop1
== CONST0_RTX (mode
))
1632 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1633 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1634 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1635 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1637 rtx xop00
= XEXP (op0
, 0);
1638 rtx xop10
= XEXP (op1
, 0);
1641 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1643 if (REG_P (xop00
) && REG_P (xop10
)
1644 && GET_MODE (xop00
) == GET_MODE (xop10
)
1645 && REGNO (xop00
) == REGNO (xop10
)
1646 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1647 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1654 /* We can't assume x-x is 0 even with non-IEEE floating point,
1655 but since it is zero except in very strange circumstances, we
1656 will treat it as zero with -funsafe-math-optimizations. */
1657 if (rtx_equal_p (trueop0
, trueop1
)
1658 && ! side_effects_p (op0
)
1659 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1660 return CONST0_RTX (mode
);
1662 /* Change subtraction from zero into negation. (0 - x) is the
1663 same as -x when x is NaN, infinite, or finite and nonzero.
1664 But if the mode has signed zeros, and does not round towards
1665 -infinity, then 0 - 0 is 0, not -0. */
1666 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1667 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1669 /* (-1 - a) is ~a. */
1670 if (trueop0
== constm1_rtx
)
1671 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1673 /* Subtracting 0 has no effect unless the mode has signed zeros
1674 and supports rounding towards -infinity. In such a case,
1676 if (!(HONOR_SIGNED_ZEROS (mode
)
1677 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1678 && trueop1
== CONST0_RTX (mode
))
1681 /* See if this is something like X * C - X or vice versa or
1682 if the multiplication is written as a shift. If so, we can
1683 distribute and make a new multiply, shift, or maybe just
1684 have X (if C is 2 in the example above). But don't make
1685 something more expensive than we had before. */
1687 if (SCALAR_INT_MODE_P (mode
))
1689 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1690 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1691 rtx lhs
= op0
, rhs
= op1
;
1693 if (GET_CODE (lhs
) == NEG
)
1697 lhs
= XEXP (lhs
, 0);
1699 else if (GET_CODE (lhs
) == MULT
1700 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1702 coeff0l
= INTVAL (XEXP (lhs
, 1));
1703 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1704 lhs
= XEXP (lhs
, 0);
1706 else if (GET_CODE (lhs
) == ASHIFT
1707 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1708 && INTVAL (XEXP (lhs
, 1)) >= 0
1709 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1711 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1713 lhs
= XEXP (lhs
, 0);
1716 if (GET_CODE (rhs
) == NEG
)
1720 rhs
= XEXP (rhs
, 0);
1722 else if (GET_CODE (rhs
) == MULT
1723 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1725 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1726 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1727 rhs
= XEXP (rhs
, 0);
1729 else if (GET_CODE (rhs
) == ASHIFT
1730 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1731 && INTVAL (XEXP (rhs
, 1)) >= 0
1732 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1734 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
1736 rhs
= XEXP (rhs
, 0);
1739 if (rtx_equal_p (lhs
, rhs
))
1741 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1743 unsigned HOST_WIDE_INT l
;
1746 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
1747 coeff
= immed_double_const (l
, h
, mode
);
1749 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1750 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1755 /* (a - (-b)) -> (a + b). True even for IEEE. */
1756 if (GET_CODE (op1
) == NEG
)
1757 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1759 /* (-x - c) may be simplified as (-c - x). */
1760 if (GET_CODE (op0
) == NEG
1761 && (GET_CODE (op1
) == CONST_INT
1762 || GET_CODE (op1
) == CONST_DOUBLE
))
1764 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1766 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1769 /* Don't let a relocatable value get a negative coeff. */
1770 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1771 return simplify_gen_binary (PLUS
, mode
,
1773 neg_const_int (mode
, op1
));
1775 /* (x - (x & y)) -> (x & ~y) */
1776 if (GET_CODE (op1
) == AND
)
1778 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1780 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1781 GET_MODE (XEXP (op1
, 1)));
1782 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1784 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1786 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1787 GET_MODE (XEXP (op1
, 0)));
1788 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1792 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1793 by reversing the comparison code if valid. */
1794 if (STORE_FLAG_VALUE
== 1
1795 && trueop0
== const1_rtx
1796 && COMPARISON_P (op1
)
1797 && (reversed
= reversed_comparison (op1
, mode
)))
1800 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1801 if (GET_CODE (op1
) == MULT
1802 && GET_CODE (XEXP (op1
, 0)) == NEG
)
1806 in1
= XEXP (XEXP (op1
, 0), 0);
1807 in2
= XEXP (op1
, 1);
1808 return simplify_gen_binary (PLUS
, mode
,
1809 simplify_gen_binary (MULT
, mode
,
1814 /* Canonicalize (minus (neg A) (mult B C)) to
1815 (minus (mult (neg B) C) A). */
1816 if (GET_CODE (op1
) == MULT
1817 && GET_CODE (op0
) == NEG
)
1821 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
1822 in2
= XEXP (op1
, 1);
1823 return simplify_gen_binary (MINUS
, mode
,
1824 simplify_gen_binary (MULT
, mode
,
1829 /* If one of the operands is a PLUS or a MINUS, see if we can
1830 simplify this by the associative law. This will, for example,
1831 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1832 Don't use the associative law for floating point.
1833 The inaccuracy makes it nonassociative,
1834 and subtle programs can break if operations are associated. */
1836 if (INTEGRAL_MODE_P (mode
)
1837 && (plus_minus_operand_p (op0
)
1838 || plus_minus_operand_p (op1
))
1839 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1844 if (trueop1
== constm1_rtx
)
1845 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1847 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1848 x is NaN, since x * 0 is then also NaN. Nor is it valid
1849 when the mode has signed zeros, since multiplying a negative
1850 number by 0 will give -0, not 0. */
1851 if (!HONOR_NANS (mode
)
1852 && !HONOR_SIGNED_ZEROS (mode
)
1853 && trueop1
== CONST0_RTX (mode
)
1854 && ! side_effects_p (op0
))
1857 /* In IEEE floating point, x*1 is not equivalent to x for
1859 if (!HONOR_SNANS (mode
)
1860 && trueop1
== CONST1_RTX (mode
))
1863 /* Convert multiply by constant power of two into shift unless
1864 we are still generating RTL. This test is a kludge. */
1865 if (GET_CODE (trueop1
) == CONST_INT
1866 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1867 /* If the mode is larger than the host word size, and the
1868 uppermost bit is set, then this isn't a power of two due
1869 to implicit sign extension. */
1870 && (width
<= HOST_BITS_PER_WIDE_INT
1871 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1872 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1874 /* Likewise for multipliers wider than a word. */
1875 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1876 && (GET_MODE (trueop1
) == VOIDmode
1877 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
1878 && GET_MODE (op0
) == mode
1879 && CONST_DOUBLE_LOW (trueop1
) == 0
1880 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
1881 return simplify_gen_binary (ASHIFT
, mode
, op0
,
1882 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
1884 /* x*2 is x+x and x*(-1) is -x */
1885 if (GET_CODE (trueop1
) == CONST_DOUBLE
1886 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
1887 && GET_MODE (op0
) == mode
)
1890 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1892 if (REAL_VALUES_EQUAL (d
, dconst2
))
1893 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1895 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1896 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1899 /* Reassociate multiplication, but for floating point MULTs
1900 only when the user specifies unsafe math optimizations. */
1901 if (! FLOAT_MODE_P (mode
)
1902 || flag_unsafe_math_optimizations
)
1904 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1911 if (trueop1
== const0_rtx
)
1913 if (GET_CODE (trueop1
) == CONST_INT
1914 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1915 == GET_MODE_MASK (mode
)))
1917 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1919 /* A | (~A) -> -1 */
1920 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1921 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1922 && ! side_effects_p (op0
)
1923 && SCALAR_INT_MODE_P (mode
))
1926 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1927 if (GET_CODE (op1
) == CONST_INT
1928 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1929 && (nonzero_bits (op0
, mode
) & ~INTVAL (op1
)) == 0)
1932 /* Convert (A & B) | A to A. */
1933 if (GET_CODE (op0
) == AND
1934 && (rtx_equal_p (XEXP (op0
, 0), op1
)
1935 || rtx_equal_p (XEXP (op0
, 1), op1
))
1936 && ! side_effects_p (XEXP (op0
, 0))
1937 && ! side_effects_p (XEXP (op0
, 1)))
1940 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1941 mode size to (rotate A CX). */
1943 if (GET_CODE (op1
) == ASHIFT
1944 || GET_CODE (op1
) == SUBREG
)
1955 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
1956 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
1957 && GET_CODE (XEXP (opleft
, 1)) == CONST_INT
1958 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
1959 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
1960 == GET_MODE_BITSIZE (mode
)))
1961 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
1963 /* Same, but for ashift that has been "simplified" to a wider mode
1964 by simplify_shift_const. */
1966 if (GET_CODE (opleft
) == SUBREG
1967 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
1968 && GET_CODE (opright
) == LSHIFTRT
1969 && GET_CODE (XEXP (opright
, 0)) == SUBREG
1970 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
1971 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
1972 && (GET_MODE_SIZE (GET_MODE (opleft
))
1973 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
1974 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
1975 SUBREG_REG (XEXP (opright
, 0)))
1976 && GET_CODE (XEXP (SUBREG_REG (opleft
), 1)) == CONST_INT
1977 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
1978 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
1979 == GET_MODE_BITSIZE (mode
)))
1980 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
1981 XEXP (SUBREG_REG (opleft
), 1));
1983 /* If we have (ior (and (X C1) C2)), simplify this by making
1984 C1 as small as possible if C1 actually changes. */
1985 if (GET_CODE (op1
) == CONST_INT
1986 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1987 || INTVAL (op1
) > 0)
1988 && GET_CODE (op0
) == AND
1989 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
1990 && GET_CODE (op1
) == CONST_INT
1991 && (INTVAL (XEXP (op0
, 1)) & INTVAL (op1
)) != 0)
1992 return simplify_gen_binary (IOR
, mode
,
1994 (AND
, mode
, XEXP (op0
, 0),
1995 GEN_INT (INTVAL (XEXP (op0
, 1))
1999 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2000 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2001 the PLUS does not affect any of the bits in OP1: then we can do
2002 the IOR as a PLUS and we can associate. This is valid if OP1
2003 can be safely shifted left C bits. */
2004 if (GET_CODE (trueop1
) == CONST_INT
&& GET_CODE (op0
) == ASHIFTRT
2005 && GET_CODE (XEXP (op0
, 0)) == PLUS
2006 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == CONST_INT
2007 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2008 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2010 int count
= INTVAL (XEXP (op0
, 1));
2011 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2013 if (mask
>> count
== INTVAL (trueop1
)
2014 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2015 return simplify_gen_binary (ASHIFTRT
, mode
,
2016 plus_constant (XEXP (op0
, 0), mask
),
2020 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2026 if (trueop1
== const0_rtx
)
2028 if (GET_CODE (trueop1
) == CONST_INT
2029 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2030 == GET_MODE_MASK (mode
)))
2031 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2032 if (rtx_equal_p (trueop0
, trueop1
)
2033 && ! side_effects_p (op0
)
2034 && GET_MODE_CLASS (mode
) != MODE_CC
)
2035 return CONST0_RTX (mode
);
2037 /* Canonicalize XOR of the most significant bit to PLUS. */
2038 if ((GET_CODE (op1
) == CONST_INT
2039 || GET_CODE (op1
) == CONST_DOUBLE
)
2040 && mode_signbit_p (mode
, op1
))
2041 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2042 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2043 if ((GET_CODE (op1
) == CONST_INT
2044 || GET_CODE (op1
) == CONST_DOUBLE
)
2045 && GET_CODE (op0
) == PLUS
2046 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
2047 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2048 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2049 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2050 simplify_gen_binary (XOR
, mode
, op1
,
2053 /* If we are XORing two things that have no bits in common,
2054 convert them into an IOR. This helps to detect rotation encoded
2055 using those methods and possibly other simplifications. */
2057 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2058 && (nonzero_bits (op0
, mode
)
2059 & nonzero_bits (op1
, mode
)) == 0)
2060 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2062 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2063 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2066 int num_negated
= 0;
2068 if (GET_CODE (op0
) == NOT
)
2069 num_negated
++, op0
= XEXP (op0
, 0);
2070 if (GET_CODE (op1
) == NOT
)
2071 num_negated
++, op1
= XEXP (op1
, 0);
2073 if (num_negated
== 2)
2074 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2075 else if (num_negated
== 1)
2076 return simplify_gen_unary (NOT
, mode
,
2077 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2081 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2082 correspond to a machine insn or result in further simplifications
2083 if B is a constant. */
2085 if (GET_CODE (op0
) == AND
2086 && rtx_equal_p (XEXP (op0
, 1), op1
)
2087 && ! side_effects_p (op1
))
2088 return simplify_gen_binary (AND
, mode
,
2089 simplify_gen_unary (NOT
, mode
,
2090 XEXP (op0
, 0), mode
),
2093 else if (GET_CODE (op0
) == AND
2094 && rtx_equal_p (XEXP (op0
, 0), op1
)
2095 && ! side_effects_p (op1
))
2096 return simplify_gen_binary (AND
, mode
,
2097 simplify_gen_unary (NOT
, mode
,
2098 XEXP (op0
, 1), mode
),
2101 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2102 comparison if STORE_FLAG_VALUE is 1. */
2103 if (STORE_FLAG_VALUE
== 1
2104 && trueop1
== const1_rtx
2105 && COMPARISON_P (op0
)
2106 && (reversed
= reversed_comparison (op0
, mode
)))
2109 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2110 is (lt foo (const_int 0)), so we can perform the above
2111 simplification if STORE_FLAG_VALUE is 1. */
2113 if (STORE_FLAG_VALUE
== 1
2114 && trueop1
== const1_rtx
2115 && GET_CODE (op0
) == LSHIFTRT
2116 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2117 && INTVAL (XEXP (op0
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
2118 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2120 /* (xor (comparison foo bar) (const_int sign-bit))
2121 when STORE_FLAG_VALUE is the sign bit. */
2122 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2123 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
2124 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
2125 && trueop1
== const_true_rtx
2126 && COMPARISON_P (op0
)
2127 && (reversed
= reversed_comparison (op0
, mode
)))
2132 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2138 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2140 /* If we are turning off bits already known off in OP0, we need
2142 if (GET_CODE (trueop1
) == CONST_INT
2143 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2144 && (nonzero_bits (trueop0
, mode
) & ~INTVAL (trueop1
)) == 0)
2146 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2147 && GET_MODE_CLASS (mode
) != MODE_CC
)
2150 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2151 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2152 && ! side_effects_p (op0
)
2153 && GET_MODE_CLASS (mode
) != MODE_CC
)
2154 return CONST0_RTX (mode
);
2156 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2157 there are no nonzero bits of C outside of X's mode. */
2158 if ((GET_CODE (op0
) == SIGN_EXTEND
2159 || GET_CODE (op0
) == ZERO_EXTEND
)
2160 && GET_CODE (trueop1
) == CONST_INT
2161 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2162 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2163 & INTVAL (trueop1
)) == 0)
2165 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2166 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2167 gen_int_mode (INTVAL (trueop1
),
2169 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2172 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2173 insn (and may simplify more). */
2174 if (GET_CODE (op0
) == XOR
2175 && rtx_equal_p (XEXP (op0
, 0), op1
)
2176 && ! side_effects_p (op1
))
2177 return simplify_gen_binary (AND
, mode
,
2178 simplify_gen_unary (NOT
, mode
,
2179 XEXP (op0
, 1), mode
),
2182 if (GET_CODE (op0
) == XOR
2183 && rtx_equal_p (XEXP (op0
, 1), op1
)
2184 && ! side_effects_p (op1
))
2185 return simplify_gen_binary (AND
, mode
,
2186 simplify_gen_unary (NOT
, mode
,
2187 XEXP (op0
, 0), mode
),
2190 /* Similarly for (~(A ^ B)) & A. */
2191 if (GET_CODE (op0
) == NOT
2192 && GET_CODE (XEXP (op0
, 0)) == XOR
2193 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2194 && ! side_effects_p (op1
))
2195 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2197 if (GET_CODE (op0
) == NOT
2198 && GET_CODE (XEXP (op0
, 0)) == XOR
2199 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2200 && ! side_effects_p (op1
))
2201 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2203 /* Convert (A | B) & A to A. */
2204 if (GET_CODE (op0
) == IOR
2205 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2206 || rtx_equal_p (XEXP (op0
, 1), op1
))
2207 && ! side_effects_p (XEXP (op0
, 0))
2208 && ! side_effects_p (XEXP (op0
, 1)))
2211 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2212 ((A & N) + B) & M -> (A + B) & M
2213 Similarly if (N & M) == 0,
2214 ((A | N) + B) & M -> (A + B) & M
2215 and for - instead of + and/or ^ instead of |. */
2216 if (GET_CODE (trueop1
) == CONST_INT
2217 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2218 && ~INTVAL (trueop1
)
2219 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
2220 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2225 pmop
[0] = XEXP (op0
, 0);
2226 pmop
[1] = XEXP (op0
, 1);
2228 for (which
= 0; which
< 2; which
++)
2231 switch (GET_CODE (tem
))
2234 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2235 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
2236 == INTVAL (trueop1
))
2237 pmop
[which
] = XEXP (tem
, 0);
2241 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2242 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
2243 pmop
[which
] = XEXP (tem
, 0);
2250 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2252 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2254 return simplify_gen_binary (code
, mode
, tem
, op1
);
2257 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2263 /* 0/x is 0 (or x&0 if x has side-effects). */
2264 if (trueop0
== CONST0_RTX (mode
))
2266 if (side_effects_p (op1
))
2267 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2271 if (trueop1
== CONST1_RTX (mode
))
2272 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2273 /* Convert divide by power of two into shift. */
2274 if (GET_CODE (trueop1
) == CONST_INT
2275 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
2276 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2280 /* Handle floating point and integers separately. */
2281 if (SCALAR_FLOAT_MODE_P (mode
))
2283 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2284 safe for modes with NaNs, since 0.0 / 0.0 will then be
2285 NaN rather than 0.0. Nor is it safe for modes with signed
2286 zeros, since dividing 0 by a negative number gives -0.0 */
2287 if (trueop0
== CONST0_RTX (mode
)
2288 && !HONOR_NANS (mode
)
2289 && !HONOR_SIGNED_ZEROS (mode
)
2290 && ! side_effects_p (op1
))
2293 if (trueop1
== CONST1_RTX (mode
)
2294 && !HONOR_SNANS (mode
))
2297 if (GET_CODE (trueop1
) == CONST_DOUBLE
2298 && trueop1
!= CONST0_RTX (mode
))
2301 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2304 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2305 && !HONOR_SNANS (mode
))
2306 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2308 /* Change FP division by a constant into multiplication.
2309 Only do this with -funsafe-math-optimizations. */
2310 if (flag_unsafe_math_optimizations
2311 && !REAL_VALUES_EQUAL (d
, dconst0
))
2313 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2314 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2315 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2321 /* 0/x is 0 (or x&0 if x has side-effects). */
2322 if (trueop0
== CONST0_RTX (mode
))
2324 if (side_effects_p (op1
))
2325 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2329 if (trueop1
== CONST1_RTX (mode
))
2330 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2332 if (trueop1
== constm1_rtx
)
2334 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2335 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2341 /* 0%x is 0 (or x&0 if x has side-effects). */
2342 if (trueop0
== CONST0_RTX (mode
))
2344 if (side_effects_p (op1
))
2345 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2348 /* x%1 is 0 (of x&0 if x has side-effects). */
2349 if (trueop1
== CONST1_RTX (mode
))
2351 if (side_effects_p (op0
))
2352 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2353 return CONST0_RTX (mode
);
2355 /* Implement modulus by power of two as AND. */
2356 if (GET_CODE (trueop1
) == CONST_INT
2357 && exact_log2 (INTVAL (trueop1
)) > 0)
2358 return simplify_gen_binary (AND
, mode
, op0
,
2359 GEN_INT (INTVAL (op1
) - 1));
2363 /* 0%x is 0 (or x&0 if x has side-effects). */
2364 if (trueop0
== CONST0_RTX (mode
))
2366 if (side_effects_p (op1
))
2367 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2370 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2371 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
2373 if (side_effects_p (op0
))
2374 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2375 return CONST0_RTX (mode
);
2382 /* Rotating ~0 always results in ~0. */
2383 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
2384 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2385 && ! side_effects_p (op1
))
2388 /* Fall through.... */
2392 if (trueop1
== CONST0_RTX (mode
))
2394 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2399 if (width
<= HOST_BITS_PER_WIDE_INT
2400 && GET_CODE (trueop1
) == CONST_INT
2401 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2402 && ! side_effects_p (op0
))
2404 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2406 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2412 if (width
<= HOST_BITS_PER_WIDE_INT
2413 && GET_CODE (trueop1
) == CONST_INT
2414 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2415 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2416 && ! side_effects_p (op0
))
2418 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2420 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2426 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2428 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2430 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2436 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2438 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2440 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2449 /* ??? There are simplifications that can be done. */
2453 if (!VECTOR_MODE_P (mode
))
2455 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2456 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2457 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2458 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2459 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
2461 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2462 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2467 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2468 gcc_assert (GET_MODE_INNER (mode
)
2469 == GET_MODE_INNER (GET_MODE (trueop0
)));
2470 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2472 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2474 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2475 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2476 rtvec v
= rtvec_alloc (n_elts
);
2479 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2480 for (i
= 0; i
< n_elts
; i
++)
2482 rtx x
= XVECEXP (trueop1
, 0, i
);
2484 gcc_assert (GET_CODE (x
) == CONST_INT
);
2485 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2489 return gen_rtx_CONST_VECTOR (mode
, v
);
2493 if (XVECLEN (trueop1
, 0) == 1
2494 && GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
2495 && GET_CODE (trueop0
) == VEC_CONCAT
)
2498 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
2500 /* Try to find the element in the VEC_CONCAT. */
2501 while (GET_MODE (vec
) != mode
2502 && GET_CODE (vec
) == VEC_CONCAT
)
2504 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
2505 if (offset
< vec_size
)
2506 vec
= XEXP (vec
, 0);
2510 vec
= XEXP (vec
, 1);
2512 vec
= avoid_constant_pool_reference (vec
);
2515 if (GET_MODE (vec
) == mode
)
2522 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2523 ? GET_MODE (trueop0
)
2524 : GET_MODE_INNER (mode
));
2525 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2526 ? GET_MODE (trueop1
)
2527 : GET_MODE_INNER (mode
));
2529 gcc_assert (VECTOR_MODE_P (mode
));
2530 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2531 == GET_MODE_SIZE (mode
));
2533 if (VECTOR_MODE_P (op0_mode
))
2534 gcc_assert (GET_MODE_INNER (mode
)
2535 == GET_MODE_INNER (op0_mode
));
2537 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2539 if (VECTOR_MODE_P (op1_mode
))
2540 gcc_assert (GET_MODE_INNER (mode
)
2541 == GET_MODE_INNER (op1_mode
));
2543 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2545 if ((GET_CODE (trueop0
) == CONST_VECTOR
2546 || GET_CODE (trueop0
) == CONST_INT
2547 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2548 && (GET_CODE (trueop1
) == CONST_VECTOR
2549 || GET_CODE (trueop1
) == CONST_INT
2550 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2552 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2553 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2554 rtvec v
= rtvec_alloc (n_elts
);
2556 unsigned in_n_elts
= 1;
2558 if (VECTOR_MODE_P (op0_mode
))
2559 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2560 for (i
= 0; i
< n_elts
; i
++)
2564 if (!VECTOR_MODE_P (op0_mode
))
2565 RTVEC_ELT (v
, i
) = trueop0
;
2567 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2571 if (!VECTOR_MODE_P (op1_mode
))
2572 RTVEC_ELT (v
, i
) = trueop1
;
2574 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2579 return gen_rtx_CONST_VECTOR (mode
, v
);
2592 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2595 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
2597 unsigned int width
= GET_MODE_BITSIZE (mode
);
2599 if (VECTOR_MODE_P (mode
)
2600 && code
!= VEC_CONCAT
2601 && GET_CODE (op0
) == CONST_VECTOR
2602 && GET_CODE (op1
) == CONST_VECTOR
)
2604 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2605 enum machine_mode op0mode
= GET_MODE (op0
);
2606 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
2607 enum machine_mode op1mode
= GET_MODE (op1
);
2608 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
2609 rtvec v
= rtvec_alloc (n_elts
);
2612 gcc_assert (op0_n_elts
== n_elts
);
2613 gcc_assert (op1_n_elts
== n_elts
);
2614 for (i
= 0; i
< n_elts
; i
++)
2616 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
2617 CONST_VECTOR_ELT (op0
, i
),
2618 CONST_VECTOR_ELT (op1
, i
));
2621 RTVEC_ELT (v
, i
) = x
;
2624 return gen_rtx_CONST_VECTOR (mode
, v
);
2627 if (VECTOR_MODE_P (mode
)
2628 && code
== VEC_CONCAT
2629 && CONSTANT_P (op0
) && CONSTANT_P (op1
))
2631 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2632 rtvec v
= rtvec_alloc (n_elts
);
2634 gcc_assert (n_elts
>= 2);
2637 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
2638 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
2640 RTVEC_ELT (v
, 0) = op0
;
2641 RTVEC_ELT (v
, 1) = op1
;
2645 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
2646 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
2649 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
2650 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
2651 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
2653 for (i
= 0; i
< op0_n_elts
; ++i
)
2654 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
2655 for (i
= 0; i
< op1_n_elts
; ++i
)
2656 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
2659 return gen_rtx_CONST_VECTOR (mode
, v
);
2662 if (SCALAR_FLOAT_MODE_P (mode
)
2663 && GET_CODE (op0
) == CONST_DOUBLE
2664 && GET_CODE (op1
) == CONST_DOUBLE
2665 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
2676 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
2678 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
2680 for (i
= 0; i
< 4; i
++)
2697 real_from_target (&r
, tmp0
, mode
);
2698 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
2702 REAL_VALUE_TYPE f0
, f1
, value
, result
;
2705 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
2706 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
2707 real_convert (&f0
, mode
, &f0
);
2708 real_convert (&f1
, mode
, &f1
);
2710 if (HONOR_SNANS (mode
)
2711 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
2715 && REAL_VALUES_EQUAL (f1
, dconst0
)
2716 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
2719 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2720 && flag_trapping_math
2721 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
2723 int s0
= REAL_VALUE_NEGATIVE (f0
);
2724 int s1
= REAL_VALUE_NEGATIVE (f1
);
2729 /* Inf + -Inf = NaN plus exception. */
2734 /* Inf - Inf = NaN plus exception. */
2739 /* Inf / Inf = NaN plus exception. */
2746 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2747 && flag_trapping_math
2748 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
2749 || (REAL_VALUE_ISINF (f1
)
2750 && REAL_VALUES_EQUAL (f0
, dconst0
))))
2751 /* Inf * 0 = NaN plus exception. */
2754 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
2756 real_convert (&result
, mode
, &value
);
2758 /* Don't constant fold this floating point operation if
2759 the result has overflowed and flag_trapping_math. */
2761 if (flag_trapping_math
2762 && MODE_HAS_INFINITIES (mode
)
2763 && REAL_VALUE_ISINF (result
)
2764 && !REAL_VALUE_ISINF (f0
)
2765 && !REAL_VALUE_ISINF (f1
))
2766 /* Overflow plus exception. */
2769 /* Don't constant fold this floating point operation if the
2770 result may dependent upon the run-time rounding mode and
2771 flag_rounding_math is set, or if GCC's software emulation
2772 is unable to accurately represent the result. */
2774 if ((flag_rounding_math
2775 || (REAL_MODE_FORMAT_COMPOSITE_P (mode
)
2776 && !flag_unsafe_math_optimizations
))
2777 && (inexact
|| !real_identical (&result
, &value
)))
2780 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
2784 /* We can fold some multi-word operations. */
2785 if (GET_MODE_CLASS (mode
) == MODE_INT
2786 && width
== HOST_BITS_PER_WIDE_INT
* 2
2787 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
2788 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
2790 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
2791 HOST_WIDE_INT h1
, h2
, hv
, ht
;
2793 if (GET_CODE (op0
) == CONST_DOUBLE
)
2794 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
2796 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
2798 if (GET_CODE (op1
) == CONST_DOUBLE
)
2799 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
2801 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
2806 /* A - B == A + (-B). */
2807 neg_double (l2
, h2
, &lv
, &hv
);
2810 /* Fall through.... */
2813 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2817 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2821 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2822 &lv
, &hv
, <
, &ht
))
2827 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2828 <
, &ht
, &lv
, &hv
))
2833 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2834 &lv
, &hv
, <
, &ht
))
2839 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2840 <
, &ht
, &lv
, &hv
))
2845 lv
= l1
& l2
, hv
= h1
& h2
;
2849 lv
= l1
| l2
, hv
= h1
| h2
;
2853 lv
= l1
^ l2
, hv
= h1
^ h2
;
2859 && ((unsigned HOST_WIDE_INT
) l1
2860 < (unsigned HOST_WIDE_INT
) l2
)))
2869 && ((unsigned HOST_WIDE_INT
) l1
2870 > (unsigned HOST_WIDE_INT
) l2
)))
2877 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
2879 && ((unsigned HOST_WIDE_INT
) l1
2880 < (unsigned HOST_WIDE_INT
) l2
)))
2887 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
2889 && ((unsigned HOST_WIDE_INT
) l1
2890 > (unsigned HOST_WIDE_INT
) l2
)))
2896 case LSHIFTRT
: case ASHIFTRT
:
2898 case ROTATE
: case ROTATERT
:
2899 if (SHIFT_COUNT_TRUNCATED
)
2900 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
2902 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
2905 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
2906 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
2908 else if (code
== ASHIFT
)
2909 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
2910 else if (code
== ROTATE
)
2911 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
2912 else /* code == ROTATERT */
2913 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
2920 return immed_double_const (lv
, hv
, mode
);
2923 if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
2924 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
2926 /* Get the integer argument values in two forms:
2927 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2929 arg0
= INTVAL (op0
);
2930 arg1
= INTVAL (op1
);
2932 if (width
< HOST_BITS_PER_WIDE_INT
)
2934 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2935 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2938 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2939 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2942 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2943 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2951 /* Compute the value of the arithmetic. */
2956 val
= arg0s
+ arg1s
;
2960 val
= arg0s
- arg1s
;
2964 val
= arg0s
* arg1s
;
2969 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2972 val
= arg0s
/ arg1s
;
2977 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2980 val
= arg0s
% arg1s
;
2985 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2988 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2993 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2996 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3014 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3015 the value is in range. We can't return any old value for
3016 out-of-range arguments because either the middle-end (via
3017 shift_truncation_mask) or the back-end might be relying on
3018 target-specific knowledge. Nor can we rely on
3019 shift_truncation_mask, since the shift might not be part of an
3020 ashlM3, lshrM3 or ashrM3 instruction. */
3021 if (SHIFT_COUNT_TRUNCATED
)
3022 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3023 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3026 val
= (code
== ASHIFT
3027 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3028 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3030 /* Sign-extend the result for arithmetic right shifts. */
3031 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3032 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
3040 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3041 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3049 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3050 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3054 /* Do nothing here. */
3058 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3062 val
= ((unsigned HOST_WIDE_INT
) arg0
3063 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3067 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3071 val
= ((unsigned HOST_WIDE_INT
) arg0
3072 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3079 /* ??? There are simplifications that can be done. */
3086 return gen_int_mode (val
, mode
);
3094 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3097 Rather than test for specific case, we do this by a brute-force method
3098 and do all possible simplifications until no more changes occur. Then
3099 we rebuild the operation. */
3101 struct simplify_plus_minus_op_data
3109 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
3111 const struct simplify_plus_minus_op_data
*d1
= p1
;
3112 const struct simplify_plus_minus_op_data
*d2
= p2
;
3115 result
= (commutative_operand_precedence (d2
->op
)
3116 - commutative_operand_precedence (d1
->op
));
3119 return d1
->ix
- d2
->ix
;
3123 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3126 struct simplify_plus_minus_op_data ops
[8];
3128 int n_ops
= 2, input_ops
= 2;
3129 int first
, changed
, canonicalized
= 0;
3132 memset (ops
, 0, sizeof ops
);
3134 /* Set up the two operands and then expand them until nothing has been
3135 changed. If we run out of room in our array, give up; this should
3136 almost never happen. */
3141 ops
[1].neg
= (code
== MINUS
);
3147 for (i
= 0; i
< n_ops
; i
++)
3149 rtx this_op
= ops
[i
].op
;
3150 int this_neg
= ops
[i
].neg
;
3151 enum rtx_code this_code
= GET_CODE (this_op
);
3160 ops
[n_ops
].op
= XEXP (this_op
, 1);
3161 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3164 ops
[i
].op
= XEXP (this_op
, 0);
3167 canonicalized
|= this_neg
;
3171 ops
[i
].op
= XEXP (this_op
, 0);
3172 ops
[i
].neg
= ! this_neg
;
3179 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3180 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3181 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3183 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3184 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3185 ops
[n_ops
].neg
= this_neg
;
3193 /* ~a -> (-a - 1) */
3196 ops
[n_ops
].op
= constm1_rtx
;
3197 ops
[n_ops
++].neg
= this_neg
;
3198 ops
[i
].op
= XEXP (this_op
, 0);
3199 ops
[i
].neg
= !this_neg
;
3208 ops
[i
].op
= neg_const_int (mode
, this_op
);
3222 gcc_assert (n_ops
>= 2);
3225 int n_constants
= 0;
3227 for (i
= 0; i
< n_ops
; i
++)
3228 if (GET_CODE (ops
[i
].op
) == CONST_INT
)
3231 if (n_constants
<= 1)
3235 /* If we only have two operands, we can avoid the loops. */
3238 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3241 /* Get the two operands. Be careful with the order, especially for
3242 the cases where code == MINUS. */
3243 if (ops
[0].neg
&& ops
[1].neg
)
3245 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3248 else if (ops
[0].neg
)
3259 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
3262 /* Now simplify each pair of operands until nothing changes. The first
3263 time through just simplify constants against each other. */
3270 for (i
= 0; i
< n_ops
- 1; i
++)
3271 for (j
= i
+ 1; j
< n_ops
; j
++)
3273 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
3274 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
3276 if (lhs
!= 0 && rhs
!= 0
3277 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
3279 enum rtx_code ncode
= PLUS
;
3285 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3287 else if (swap_commutative_operands_p (lhs
, rhs
))
3288 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3290 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
3292 /* Reject "simplifications" that just wrap the two
3293 arguments in a CONST. Failure to do so can result
3294 in infinite recursion with simplify_binary_operation
3295 when it calls us to simplify CONST operations. */
3297 && ! (GET_CODE (tem
) == CONST
3298 && GET_CODE (XEXP (tem
, 0)) == ncode
3299 && XEXP (XEXP (tem
, 0), 0) == lhs
3300 && XEXP (XEXP (tem
, 0), 1) == rhs
)
3301 /* Don't allow -x + -1 -> ~x simplifications in the
3302 first pass. This allows us the chance to combine
3303 the -1 with other constants. */
3305 && GET_CODE (tem
) == NOT
3306 && XEXP (tem
, 0) == rhs
))
3309 if (GET_CODE (tem
) == NEG
)
3310 tem
= XEXP (tem
, 0), lneg
= !lneg
;
3311 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
3312 tem
= neg_const_int (mode
, tem
), lneg
= 0;
3316 ops
[j
].op
= NULL_RTX
;
3326 /* Pack all the operands to the lower-numbered entries. */
3327 for (i
= 0, j
= 0; j
< n_ops
; j
++)
3331 /* Stabilize sort. */
3337 /* Sort the operations based on swap_commutative_operands_p. */
3338 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
3340 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3342 && GET_CODE (ops
[1].op
) == CONST_INT
3343 && CONSTANT_P (ops
[0].op
)
3345 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
3347 /* We suppressed creation of trivial CONST expressions in the
3348 combination loop to avoid recursion. Create one manually now.
3349 The combination loop should have ensured that there is exactly
3350 one CONST_INT, and the sort will have ensured that it is last
3351 in the array and that any other constant will be next-to-last. */
3354 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
3355 && CONSTANT_P (ops
[n_ops
- 2].op
))
3357 rtx value
= ops
[n_ops
- 1].op
;
3358 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
3359 value
= neg_const_int (mode
, value
);
3360 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
3364 /* Put a non-negated operand first, if possible. */
3366 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
3369 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
3378 /* Now make the result by performing the requested operations. */
3380 for (i
= 1; i
< n_ops
; i
++)
3381 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
3382 mode
, result
, ops
[i
].op
);
3387 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3389 plus_minus_operand_p (rtx x
)
3391 return GET_CODE (x
) == PLUS
3392 || GET_CODE (x
) == MINUS
3393 || (GET_CODE (x
) == CONST
3394 && GET_CODE (XEXP (x
, 0)) == PLUS
3395 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
3396 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
3399 /* Like simplify_binary_operation except used for relational operators.
3400 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3401 not also be VOIDmode.
3403 CMP_MODE specifies in which mode the comparison is done in, so it is
3404 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3405 the operands or, if both are VOIDmode, the operands are compared in
3406 "infinite precision". */
3408 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
3409 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3411 rtx tem
, trueop0
, trueop1
;
3413 if (cmp_mode
== VOIDmode
)
3414 cmp_mode
= GET_MODE (op0
);
3415 if (cmp_mode
== VOIDmode
)
3416 cmp_mode
= GET_MODE (op1
);
3418 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
3421 if (SCALAR_FLOAT_MODE_P (mode
))
3423 if (tem
== const0_rtx
)
3424 return CONST0_RTX (mode
);
3425 #ifdef FLOAT_STORE_FLAG_VALUE
3427 REAL_VALUE_TYPE val
;
3428 val
= FLOAT_STORE_FLAG_VALUE (mode
);
3429 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
3435 if (VECTOR_MODE_P (mode
))
3437 if (tem
== const0_rtx
)
3438 return CONST0_RTX (mode
);
3439 #ifdef VECTOR_STORE_FLAG_VALUE
3444 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
3445 if (val
== NULL_RTX
)
3447 if (val
== const1_rtx
)
3448 return CONST1_RTX (mode
);
3450 units
= GET_MODE_NUNITS (mode
);
3451 v
= rtvec_alloc (units
);
3452 for (i
= 0; i
< units
; i
++)
3453 RTVEC_ELT (v
, i
) = val
;
3454 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
3464 /* For the following tests, ensure const0_rtx is op1. */
3465 if (swap_commutative_operands_p (op0
, op1
)
3466 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
3467 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
3469 /* If op0 is a compare, extract the comparison arguments from it. */
3470 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3471 return simplify_relational_operation (code
, mode
, VOIDmode
,
3472 XEXP (op0
, 0), XEXP (op0
, 1));
3474 if (mode
== VOIDmode
3475 || GET_MODE_CLASS (cmp_mode
) == MODE_CC
3479 trueop0
= avoid_constant_pool_reference (op0
);
3480 trueop1
= avoid_constant_pool_reference (op1
);
3481 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
3485 /* This part of simplify_relational_operation is only used when CMP_MODE
3486 is not in class MODE_CC (i.e. it is a real comparison).
3488 MODE is the mode of the result, while CMP_MODE specifies in which
3489 mode the comparison is done in, so it is the mode of the operands. */
3492 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
3493 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3495 enum rtx_code op0code
= GET_CODE (op0
);
3497 if (GET_CODE (op1
) == CONST_INT
)
3499 if (INTVAL (op1
) == 0 && COMPARISON_P (op0
))
3501 /* If op0 is a comparison, extract the comparison arguments form it. */
3504 if (GET_MODE (op0
) == mode
)
3505 return simplify_rtx (op0
);
3507 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
3508 XEXP (op0
, 0), XEXP (op0
, 1));
3510 else if (code
== EQ
)
3512 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
3513 if (new_code
!= UNKNOWN
)
3514 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
3515 XEXP (op0
, 0), XEXP (op0
, 1));
3520 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3521 if ((code
== EQ
|| code
== NE
)
3522 && (op0code
== PLUS
|| op0code
== MINUS
)
3524 && CONSTANT_P (XEXP (op0
, 1))
3525 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
3527 rtx x
= XEXP (op0
, 0);
3528 rtx c
= XEXP (op0
, 1);
3530 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
3532 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
3535 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3536 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3538 && op1
== const0_rtx
3539 && GET_MODE_CLASS (mode
) == MODE_INT
3540 && cmp_mode
!= VOIDmode
3541 /* ??? Work-around BImode bugs in the ia64 backend. */
3543 && cmp_mode
!= BImode
3544 && nonzero_bits (op0
, cmp_mode
) == 1
3545 && STORE_FLAG_VALUE
== 1)
3546 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
3547 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
3548 : lowpart_subreg (mode
, op0
, cmp_mode
);
3553 /* Check if the given comparison (done in the given MODE) is actually a
3554 tautology or a contradiction.
3555 If no simplification is possible, this function returns zero.
3556 Otherwise, it returns either const_true_rtx or const0_rtx. */
3559 simplify_const_relational_operation (enum rtx_code code
,
3560 enum machine_mode mode
,
3563 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
3568 gcc_assert (mode
!= VOIDmode
3569 || (GET_MODE (op0
) == VOIDmode
3570 && GET_MODE (op1
) == VOIDmode
));
3572 /* If op0 is a compare, extract the comparison arguments from it. */
3573 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3575 op1
= XEXP (op0
, 1);
3576 op0
= XEXP (op0
, 0);
3578 if (GET_MODE (op0
) != VOIDmode
)
3579 mode
= GET_MODE (op0
);
3580 else if (GET_MODE (op1
) != VOIDmode
)
3581 mode
= GET_MODE (op1
);
3586 /* We can't simplify MODE_CC values since we don't know what the
3587 actual comparison is. */
3588 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
3591 /* Make sure the constant is second. */
3592 if (swap_commutative_operands_p (op0
, op1
))
3594 tem
= op0
, op0
= op1
, op1
= tem
;
3595 code
= swap_condition (code
);
3598 trueop0
= avoid_constant_pool_reference (op0
);
3599 trueop1
= avoid_constant_pool_reference (op1
);
3601 /* For integer comparisons of A and B maybe we can simplify A - B and can
3602 then simplify a comparison of that with zero. If A and B are both either
3603 a register or a CONST_INT, this can't help; testing for these cases will
3604 prevent infinite recursion here and speed things up.
3606 If CODE is an unsigned comparison, then we can never do this optimization,
3607 because it gives an incorrect result if the subtraction wraps around zero.
3608 ANSI C defines unsigned operations such that they never overflow, and
3609 thus such cases can not be ignored; but we cannot do it even for
3610 signed comparisons for languages such as Java, so test flag_wrapv. */
3612 if (!flag_wrapv
&& INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
3613 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
3614 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
3615 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
3616 /* We cannot do this for == or != if tem is a nonzero address. */
3617 && ((code
!= EQ
&& code
!= NE
) || ! nonzero_address_p (tem
))
3618 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
3619 return simplify_const_relational_operation (signed_condition (code
),
3620 mode
, tem
, const0_rtx
);
3622 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
3623 return const_true_rtx
;
3625 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
3628 /* For modes without NaNs, if the two operands are equal, we know the
3629 result except if they have side-effects. */
3630 if (! HONOR_NANS (GET_MODE (trueop0
))
3631 && rtx_equal_p (trueop0
, trueop1
)
3632 && ! side_effects_p (trueop0
))
3633 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
3635 /* If the operands are floating-point constants, see if we can fold
3637 else if (GET_CODE (trueop0
) == CONST_DOUBLE
3638 && GET_CODE (trueop1
) == CONST_DOUBLE
3639 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
3641 REAL_VALUE_TYPE d0
, d1
;
3643 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
3644 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
3646 /* Comparisons are unordered iff at least one of the values is NaN. */
3647 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
3657 return const_true_rtx
;
3670 equal
= REAL_VALUES_EQUAL (d0
, d1
);
3671 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
3672 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
3675 /* Otherwise, see if the operands are both integers. */
3676 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
3677 && (GET_CODE (trueop0
) == CONST_DOUBLE
3678 || GET_CODE (trueop0
) == CONST_INT
)
3679 && (GET_CODE (trueop1
) == CONST_DOUBLE
3680 || GET_CODE (trueop1
) == CONST_INT
))
3682 int width
= GET_MODE_BITSIZE (mode
);
3683 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
3684 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
3686 /* Get the two words comprising each integer constant. */
3687 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
3689 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
3690 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
3694 l0u
= l0s
= INTVAL (trueop0
);
3695 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
3698 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
3700 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
3701 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
3705 l1u
= l1s
= INTVAL (trueop1
);
3706 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
3709 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3710 we have to sign or zero-extend the values. */
3711 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
3713 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3714 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3716 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3717 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3719 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3720 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3722 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
3723 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
3725 equal
= (h0u
== h1u
&& l0u
== l1u
);
3726 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
3727 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
3728 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
3729 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
3732 /* Otherwise, there are some code-specific tests we can make. */
3735 /* Optimize comparisons with upper and lower bounds. */
3736 if (SCALAR_INT_MODE_P (mode
)
3737 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3750 get_mode_bounds (mode
, sign
, mode
, &mmin
, &mmax
);
3757 /* x >= min is always true. */
3758 if (rtx_equal_p (trueop1
, mmin
))
3759 tem
= const_true_rtx
;
3765 /* x <= max is always true. */
3766 if (rtx_equal_p (trueop1
, mmax
))
3767 tem
= const_true_rtx
;
3772 /* x > max is always false. */
3773 if (rtx_equal_p (trueop1
, mmax
))
3779 /* x < min is always false. */
3780 if (rtx_equal_p (trueop1
, mmin
))
3787 if (tem
== const0_rtx
3788 || tem
== const_true_rtx
)
3795 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3800 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3801 return const_true_rtx
;
3805 /* Optimize abs(x) < 0.0. */
3806 if (trueop1
== CONST0_RTX (mode
)
3807 && !HONOR_SNANS (mode
)
3808 && !(flag_wrapv
&& INTEGRAL_MODE_P (mode
)))
3810 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3812 if (GET_CODE (tem
) == ABS
)
3818 /* Optimize abs(x) >= 0.0. */
3819 if (trueop1
== CONST0_RTX (mode
)
3820 && !HONOR_NANS (mode
)
3821 && !(flag_wrapv
&& INTEGRAL_MODE_P (mode
)))
3823 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3825 if (GET_CODE (tem
) == ABS
)
3826 return const_true_rtx
;
3831 /* Optimize ! (abs(x) < 0.0). */
3832 if (trueop1
== CONST0_RTX (mode
))
3834 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3836 if (GET_CODE (tem
) == ABS
)
3837 return const_true_rtx
;
3848 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3854 return equal
? const_true_rtx
: const0_rtx
;
3857 return ! equal
? const_true_rtx
: const0_rtx
;
3860 return op0lt
? const_true_rtx
: const0_rtx
;
3863 return op1lt
? const_true_rtx
: const0_rtx
;
3865 return op0ltu
? const_true_rtx
: const0_rtx
;
3867 return op1ltu
? const_true_rtx
: const0_rtx
;
3870 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
3873 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
3875 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
3877 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
3879 return const_true_rtx
;
3887 /* Simplify CODE, an operation with result mode MODE and three operands,
3888 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3889 a constant. Return 0 if no simplifications is possible. */
3892 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
3893 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
3896 unsigned int width
= GET_MODE_BITSIZE (mode
);
3898 /* VOIDmode means "infinite" precision. */
3900 width
= HOST_BITS_PER_WIDE_INT
;
3906 if (GET_CODE (op0
) == CONST_INT
3907 && GET_CODE (op1
) == CONST_INT
3908 && GET_CODE (op2
) == CONST_INT
3909 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
3910 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
3912 /* Extracting a bit-field from a constant */
3913 HOST_WIDE_INT val
= INTVAL (op0
);
3915 if (BITS_BIG_ENDIAN
)
3916 val
>>= (GET_MODE_BITSIZE (op0_mode
)
3917 - INTVAL (op2
) - INTVAL (op1
));
3919 val
>>= INTVAL (op2
);
3921 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
3923 /* First zero-extend. */
3924 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
3925 /* If desired, propagate sign bit. */
3926 if (code
== SIGN_EXTRACT
3927 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
3928 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
3931 /* Clear the bits that don't belong in our mode,
3932 unless they and our sign bit are all one.
3933 So we get either a reasonable negative value or a reasonable
3934 unsigned value for this mode. */
3935 if (width
< HOST_BITS_PER_WIDE_INT
3936 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
3937 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
3938 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3940 return gen_int_mode (val
, mode
);
3945 if (GET_CODE (op0
) == CONST_INT
)
3946 return op0
!= const0_rtx
? op1
: op2
;
3948 /* Convert c ? a : a into "a". */
3949 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
3952 /* Convert a != b ? a : b into "a". */
3953 if (GET_CODE (op0
) == NE
3954 && ! side_effects_p (op0
)
3955 && ! HONOR_NANS (mode
)
3956 && ! HONOR_SIGNED_ZEROS (mode
)
3957 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3958 && rtx_equal_p (XEXP (op0
, 1), op2
))
3959 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3960 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3963 /* Convert a == b ? a : b into "b". */
3964 if (GET_CODE (op0
) == EQ
3965 && ! side_effects_p (op0
)
3966 && ! HONOR_NANS (mode
)
3967 && ! HONOR_SIGNED_ZEROS (mode
)
3968 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3969 && rtx_equal_p (XEXP (op0
, 1), op2
))
3970 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3971 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3974 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
3976 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
3977 ? GET_MODE (XEXP (op0
, 1))
3978 : GET_MODE (XEXP (op0
, 0)));
3981 /* Look for happy constants in op1 and op2. */
3982 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
3984 HOST_WIDE_INT t
= INTVAL (op1
);
3985 HOST_WIDE_INT f
= INTVAL (op2
);
3987 if (t
== STORE_FLAG_VALUE
&& f
== 0)
3988 code
= GET_CODE (op0
);
3989 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
3992 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
4000 return simplify_gen_relational (code
, mode
, cmp_mode
,
4001 XEXP (op0
, 0), XEXP (op0
, 1));
4004 if (cmp_mode
== VOIDmode
)
4005 cmp_mode
= op0_mode
;
4006 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
4007 cmp_mode
, XEXP (op0
, 0),
4010 /* See if any simplifications were possible. */
4013 if (GET_CODE (temp
) == CONST_INT
)
4014 return temp
== const0_rtx
? op2
: op1
;
4016 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
4022 gcc_assert (GET_MODE (op0
) == mode
);
4023 gcc_assert (GET_MODE (op1
) == mode
);
4024 gcc_assert (VECTOR_MODE_P (mode
));
4025 op2
= avoid_constant_pool_reference (op2
);
4026 if (GET_CODE (op2
) == CONST_INT
)
4028 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
4029 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
4030 int mask
= (1 << n_elts
) - 1;
4032 if (!(INTVAL (op2
) & mask
))
4034 if ((INTVAL (op2
) & mask
) == mask
)
4037 op0
= avoid_constant_pool_reference (op0
);
4038 op1
= avoid_constant_pool_reference (op1
);
4039 if (GET_CODE (op0
) == CONST_VECTOR
4040 && GET_CODE (op1
) == CONST_VECTOR
)
4042 rtvec v
= rtvec_alloc (n_elts
);
4045 for (i
= 0; i
< n_elts
; i
++)
4046 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
4047 ? CONST_VECTOR_ELT (op0
, i
)
4048 : CONST_VECTOR_ELT (op1
, i
));
4049 return gen_rtx_CONST_VECTOR (mode
, v
);
4061 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4062 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4064 Works by unpacking OP into a collection of 8-bit values
4065 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4066 and then repacking them again for OUTERMODE. */
4069 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
4070 enum machine_mode innermode
, unsigned int byte
)
4072 /* We support up to 512-bit values (for V8DFmode). */
4076 value_mask
= (1 << value_bit
) - 1
4078 unsigned char value
[max_bitsize
/ value_bit
];
4087 rtvec result_v
= NULL
;
4088 enum mode_class outer_class
;
4089 enum machine_mode outer_submode
;
4091 /* Some ports misuse CCmode. */
4092 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
4095 /* We have no way to represent a complex constant at the rtl level. */
4096 if (COMPLEX_MODE_P (outermode
))
4099 /* Unpack the value. */
4101 if (GET_CODE (op
) == CONST_VECTOR
)
4103 num_elem
= CONST_VECTOR_NUNITS (op
);
4104 elems
= &CONST_VECTOR_ELT (op
, 0);
4105 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
4111 elem_bitsize
= max_bitsize
;
4113 /* If this asserts, it is too complicated; reducing value_bit may help. */
4114 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
4115 /* I don't know how to handle endianness of sub-units. */
4116 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
4118 for (elem
= 0; elem
< num_elem
; elem
++)
4121 rtx el
= elems
[elem
];
4123 /* Vectors are kept in target memory order. (This is probably
4126 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4127 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4129 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4130 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4131 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4132 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4133 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4136 switch (GET_CODE (el
))
4140 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4142 *vp
++ = INTVAL (el
) >> i
;
4143 /* CONST_INTs are always logically sign-extended. */
4144 for (; i
< elem_bitsize
; i
+= value_bit
)
4145 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
4149 if (GET_MODE (el
) == VOIDmode
)
4151 /* If this triggers, someone should have generated a
4152 CONST_INT instead. */
4153 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
4155 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4156 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
4157 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
4160 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
4163 /* It shouldn't matter what's done here, so fill it with
4165 for (; i
< elem_bitsize
; i
+= value_bit
)
4170 long tmp
[max_bitsize
/ 32];
4171 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
4173 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
4174 gcc_assert (bitsize
<= elem_bitsize
);
4175 gcc_assert (bitsize
% value_bit
== 0);
4177 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
4180 /* real_to_target produces its result in words affected by
4181 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4182 and use WORDS_BIG_ENDIAN instead; see the documentation
4183 of SUBREG in rtl.texi. */
4184 for (i
= 0; i
< bitsize
; i
+= value_bit
)
4187 if (WORDS_BIG_ENDIAN
)
4188 ibase
= bitsize
- 1 - i
;
4191 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
4194 /* It shouldn't matter what's done here, so fill it with
4196 for (; i
< elem_bitsize
; i
+= value_bit
)
4206 /* Now, pick the right byte to start with. */
4207 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4208 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4209 will already have offset 0. */
4210 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
4212 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
4214 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4215 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4216 byte
= (subword_byte
% UNITS_PER_WORD
4217 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4220 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4221 so if it's become negative it will instead be very large.) */
4222 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4224 /* Convert from bytes to chunks of size value_bit. */
4225 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
4227 /* Re-pack the value. */
4229 if (VECTOR_MODE_P (outermode
))
4231 num_elem
= GET_MODE_NUNITS (outermode
);
4232 result_v
= rtvec_alloc (num_elem
);
4233 elems
= &RTVEC_ELT (result_v
, 0);
4234 outer_submode
= GET_MODE_INNER (outermode
);
4240 outer_submode
= outermode
;
4243 outer_class
= GET_MODE_CLASS (outer_submode
);
4244 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
4246 gcc_assert (elem_bitsize
% value_bit
== 0);
4247 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
4249 for (elem
= 0; elem
< num_elem
; elem
++)
4253 /* Vectors are stored in target memory order. (This is probably
4256 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4257 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4259 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4260 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4261 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4262 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4263 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4266 switch (outer_class
)
4269 case MODE_PARTIAL_INT
:
4271 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
4274 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4276 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
4277 for (; i
< elem_bitsize
; i
+= value_bit
)
4278 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
4279 << (i
- HOST_BITS_PER_WIDE_INT
));
4281 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4283 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4284 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
4285 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
4286 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
4293 case MODE_DECIMAL_FLOAT
:
4296 long tmp
[max_bitsize
/ 32];
4298 /* real_from_target wants its input in words affected by
4299 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4300 and use WORDS_BIG_ENDIAN instead; see the documentation
4301 of SUBREG in rtl.texi. */
4302 for (i
= 0; i
< max_bitsize
/ 32; i
++)
4304 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4307 if (WORDS_BIG_ENDIAN
)
4308 ibase
= elem_bitsize
- 1 - i
;
4311 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
4314 real_from_target (&r
, tmp
, outer_submode
);
4315 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
4323 if (VECTOR_MODE_P (outermode
))
4324 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
4329 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4330 Return 0 if no simplifications are possible. */
4332 simplify_subreg (enum machine_mode outermode
, rtx op
,
4333 enum machine_mode innermode
, unsigned int byte
)
4335 /* Little bit of sanity checking. */
4336 gcc_assert (innermode
!= VOIDmode
);
4337 gcc_assert (outermode
!= VOIDmode
);
4338 gcc_assert (innermode
!= BLKmode
);
4339 gcc_assert (outermode
!= BLKmode
);
4341 gcc_assert (GET_MODE (op
) == innermode
4342 || GET_MODE (op
) == VOIDmode
);
4344 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
4345 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4347 if (outermode
== innermode
&& !byte
)
4350 if (GET_CODE (op
) == CONST_INT
4351 || GET_CODE (op
) == CONST_DOUBLE
4352 || GET_CODE (op
) == CONST_VECTOR
)
4353 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
4355 /* Changing mode twice with SUBREG => just change it once,
4356 or not at all if changing back op starting mode. */
4357 if (GET_CODE (op
) == SUBREG
)
4359 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
4360 int final_offset
= byte
+ SUBREG_BYTE (op
);
4363 if (outermode
== innermostmode
4364 && byte
== 0 && SUBREG_BYTE (op
) == 0)
4365 return SUBREG_REG (op
);
4367 /* The SUBREG_BYTE represents offset, as if the value were stored
4368 in memory. Irritating exception is paradoxical subreg, where
4369 we define SUBREG_BYTE to be 0. On big endian machines, this
4370 value should be negative. For a moment, undo this exception. */
4371 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
4373 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
4374 if (WORDS_BIG_ENDIAN
)
4375 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4376 if (BYTES_BIG_ENDIAN
)
4377 final_offset
+= difference
% UNITS_PER_WORD
;
4379 if (SUBREG_BYTE (op
) == 0
4380 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
4382 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
4383 if (WORDS_BIG_ENDIAN
)
4384 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4385 if (BYTES_BIG_ENDIAN
)
4386 final_offset
+= difference
% UNITS_PER_WORD
;
4389 /* See whether resulting subreg will be paradoxical. */
4390 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
4392 /* In nonparadoxical subregs we can't handle negative offsets. */
4393 if (final_offset
< 0)
4395 /* Bail out in case resulting subreg would be incorrect. */
4396 if (final_offset
% GET_MODE_SIZE (outermode
)
4397 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
4403 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
4405 /* In paradoxical subreg, see if we are still looking on lower part.
4406 If so, our SUBREG_BYTE will be 0. */
4407 if (WORDS_BIG_ENDIAN
)
4408 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4409 if (BYTES_BIG_ENDIAN
)
4410 offset
+= difference
% UNITS_PER_WORD
;
4411 if (offset
== final_offset
)
4417 /* Recurse for further possible simplifications. */
4418 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
4422 if (validate_subreg (outermode
, innermostmode
,
4423 SUBREG_REG (op
), final_offset
))
4424 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
4428 /* SUBREG of a hard register => just change the register number
4429 and/or mode. If the hard register is not valid in that mode,
4430 suppress this simplification. If the hard register is the stack,
4431 frame, or argument pointer, leave this as a SUBREG. */
4434 && REGNO (op
) < FIRST_PSEUDO_REGISTER
4435 #ifdef CANNOT_CHANGE_MODE_CLASS
4436 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
4437 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
4438 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
4440 && ((reload_completed
&& !frame_pointer_needed
)
4441 || (REGNO (op
) != FRAME_POINTER_REGNUM
4442 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4443 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
4446 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4447 && REGNO (op
) != ARG_POINTER_REGNUM
4449 && REGNO (op
) != STACK_POINTER_REGNUM
4450 && subreg_offset_representable_p (REGNO (op
), innermode
,
4453 unsigned int regno
= REGNO (op
);
4454 unsigned int final_regno
4455 = regno
+ subreg_regno_offset (regno
, innermode
, byte
, outermode
);
4457 /* ??? We do allow it if the current REG is not valid for
4458 its mode. This is a kludge to work around how float/complex
4459 arguments are passed on 32-bit SPARC and should be fixed. */
4460 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
4461 || ! HARD_REGNO_MODE_OK (regno
, innermode
))
4463 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
4465 /* Propagate original regno. We don't have any way to specify
4466 the offset inside original regno, so do so only for lowpart.
4467 The information is used only by alias analysis that can not
4468 grog partial register anyway. */
4470 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
4471 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
4476 /* If we have a SUBREG of a register that we are replacing and we are
4477 replacing it with a MEM, make a new MEM and try replacing the
4478 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4479 or if we would be widening it. */
4482 && ! mode_dependent_address_p (XEXP (op
, 0))
4483 /* Allow splitting of volatile memory references in case we don't
4484 have instruction to move the whole thing. */
4485 && (! MEM_VOLATILE_P (op
)
4486 || ! have_insn_for (SET
, innermode
))
4487 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
4488 return adjust_address_nv (op
, outermode
, byte
);
4490 /* Handle complex values represented as CONCAT
4491 of real and imaginary part. */
4492 if (GET_CODE (op
) == CONCAT
)
4494 unsigned int inner_size
, final_offset
;
4497 inner_size
= GET_MODE_UNIT_SIZE (innermode
);
4498 part
= byte
< inner_size
? XEXP (op
, 0) : XEXP (op
, 1);
4499 final_offset
= byte
% inner_size
;
4500 if (final_offset
+ GET_MODE_SIZE (outermode
) > inner_size
)
4503 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
4506 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
4507 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
4511 /* Optimize SUBREG truncations of zero and sign extended values. */
4512 if ((GET_CODE (op
) == ZERO_EXTEND
4513 || GET_CODE (op
) == SIGN_EXTEND
)
4514 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
4516 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
4518 /* If we're requesting the lowpart of a zero or sign extension,
4519 there are three possibilities. If the outermode is the same
4520 as the origmode, we can omit both the extension and the subreg.
4521 If the outermode is not larger than the origmode, we can apply
4522 the truncation without the extension. Finally, if the outermode
4523 is larger than the origmode, but both are integer modes, we
4524 can just extend to the appropriate mode. */
4527 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
4528 if (outermode
== origmode
)
4529 return XEXP (op
, 0);
4530 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
4531 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
4532 subreg_lowpart_offset (outermode
,
4534 if (SCALAR_INT_MODE_P (outermode
))
4535 return simplify_gen_unary (GET_CODE (op
), outermode
,
4536 XEXP (op
, 0), origmode
);
4539 /* A SUBREG resulting from a zero extension may fold to zero if
4540 it extracts higher bits that the ZERO_EXTEND's source bits. */
4541 if (GET_CODE (op
) == ZERO_EXTEND
4542 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
4543 return CONST0_RTX (outermode
);
4546 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4547 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4548 the outer subreg is effectively a truncation to the original mode. */
4549 if ((GET_CODE (op
) == LSHIFTRT
4550 || GET_CODE (op
) == ASHIFTRT
)
4551 && SCALAR_INT_MODE_P (outermode
)
4552 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4553 to avoid the possibility that an outer LSHIFTRT shifts by more
4554 than the sign extension's sign_bit_copies and introduces zeros
4555 into the high bits of the result. */
4556 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
4557 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4558 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
4559 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4560 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4561 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4562 return simplify_gen_binary (ASHIFTRT
, outermode
,
4563 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4565 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4566 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4567 the outer subreg is effectively a truncation to the original mode. */
4568 if ((GET_CODE (op
) == LSHIFTRT
4569 || GET_CODE (op
) == ASHIFTRT
)
4570 && SCALAR_INT_MODE_P (outermode
)
4571 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4572 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4573 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4574 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4575 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4576 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4577 return simplify_gen_binary (LSHIFTRT
, outermode
,
4578 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4580 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4581 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4582 the outer subreg is effectively a truncation to the original mode. */
4583 if (GET_CODE (op
) == ASHIFT
4584 && SCALAR_INT_MODE_P (outermode
)
4585 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4586 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4587 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4588 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
4589 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4590 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4591 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4592 return simplify_gen_binary (ASHIFT
, outermode
,
4593 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4598 /* Make a SUBREG operation or equivalent if it folds. */
4601 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
4602 enum machine_mode innermode
, unsigned int byte
)
4606 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
4610 if (GET_CODE (op
) == SUBREG
4611 || GET_CODE (op
) == CONCAT
4612 || GET_MODE (op
) == VOIDmode
)
4615 if (validate_subreg (outermode
, innermode
, op
, byte
))
4616 return gen_rtx_SUBREG (outermode
, op
, byte
);
4621 /* Simplify X, an rtx expression.
4623 Return the simplified expression or NULL if no simplifications
4626 This is the preferred entry point into the simplification routines;
4627 however, we still allow passes to call the more specific routines.
4629 Right now GCC has three (yes, three) major bodies of RTL simplification
4630 code that need to be unified.
4632 1. fold_rtx in cse.c. This code uses various CSE specific
4633 information to aid in RTL simplification.
4635 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4636 it uses combine specific information to aid in RTL
4639 3. The routines in this file.
4642 Long term we want to only have one body of simplification code; to
4643 get to that state I recommend the following steps:
4645 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4646 which are not pass dependent state into these routines.
4648 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4649 use this routine whenever possible.
4651 3. Allow for pass dependent state to be provided to these
4652 routines and add simplifications based on the pass dependent
4653 state. Remove code from cse.c & combine.c that becomes
4656 It will take time, but ultimately the compiler will be easier to
4657 maintain and improve. It's totally silly that when we add a
4658 simplification that it needs to be added to 4 places (3 for RTL
4659 simplification and 1 for tree simplification. */
4662 simplify_rtx (rtx x
)
4664 enum rtx_code code
= GET_CODE (x
);
4665 enum machine_mode mode
= GET_MODE (x
);
4667 switch (GET_RTX_CLASS (code
))
4670 return simplify_unary_operation (code
, mode
,
4671 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
4672 case RTX_COMM_ARITH
:
4673 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
4674 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
4676 /* Fall through.... */
4679 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
4682 case RTX_BITFIELD_OPS
:
4683 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
4684 XEXP (x
, 0), XEXP (x
, 1),
4688 case RTX_COMM_COMPARE
:
4689 return simplify_relational_operation (code
, mode
,
4690 ((GET_MODE (XEXP (x
, 0))
4692 ? GET_MODE (XEXP (x
, 0))
4693 : GET_MODE (XEXP (x
, 1))),
4699 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
4700 GET_MODE (SUBREG_REG (x
)),
4707 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4708 if (GET_CODE (XEXP (x
, 0)) == HIGH
4709 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))