1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static bool plus_minus_operand_p (rtx
);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
61 enum machine_mode
, rtx
, rtx
);
62 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
63 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode
, rtx i
)
71 return gen_int_mode (- INTVAL (i
), mode
);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode
, rtx x
)
80 unsigned HOST_WIDE_INT val
;
83 if (GET_MODE_CLASS (mode
) != MODE_INT
)
86 width
= GET_MODE_BITSIZE (mode
);
90 if (width
<= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x
) == CONST_INT
)
93 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x
) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x
) == 0)
97 val
= CONST_DOUBLE_HIGH (x
);
98 width
-= HOST_BITS_PER_WIDE_INT
;
103 if (width
< HOST_BITS_PER_WIDE_INT
)
104 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
105 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
117 /* If this simplifies, do it. */
118 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0
, op1
))
125 tem
= op0
, op0
= op1
, op1
= tem
;
127 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x
)
136 enum machine_mode cmode
;
137 HOST_WIDE_INT offset
= 0;
139 switch (GET_CODE (x
))
145 /* Handle float extensions of constant pool references. */
147 c
= avoid_constant_pool_reference (tmp
);
148 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
152 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr
= targetm
.delegitimize_address (addr
);
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr
) == CONST
168 && GET_CODE (XEXP (addr
, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST_INT
)
171 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
172 addr
= XEXP (XEXP (addr
, 0), 0);
175 if (GET_CODE (addr
) == LO_SUM
)
176 addr
= XEXP (addr
, 1);
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr
) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr
))
183 c
= get_pool_constant (addr
);
184 cmode
= get_pool_mode (addr
);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset
!= 0 || cmode
!= GET_MODE (x
))
191 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
192 if (tem
&& CONSTANT_P (tem
))
202 /* Return true if X is a MEM referencing the constant pool. */
205 constant_pool_reference_p (rtx x
)
207 return avoid_constant_pool_reference (x
) != x
;
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
214 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
215 enum machine_mode op_mode
)
219 /* If this simplifies, use it. */
220 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
223 return gen_rtx_fmt_e (code
, mode
, op
);
226 /* Likewise for ternary operations. */
229 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
230 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
234 /* If this simplifies, use it. */
235 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
239 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
246 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
247 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
251 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
255 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
262 simplify_replace_rtx (rtx x
, rtx old_rtx
, rtx new_rtx
)
264 enum rtx_code code
= GET_CODE (x
);
265 enum machine_mode mode
= GET_MODE (x
);
266 enum machine_mode op_mode
;
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
276 switch (GET_RTX_CLASS (code
))
280 op_mode
= GET_MODE (op0
);
281 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
282 if (op0
== XEXP (x
, 0))
284 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
288 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
289 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
290 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
292 return simplify_gen_binary (code
, mode
, op0
, op1
);
295 case RTX_COMM_COMPARE
:
298 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
299 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
300 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
301 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
303 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
306 case RTX_BITFIELD_OPS
:
308 op_mode
= GET_MODE (op0
);
309 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
310 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
311 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
312 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
314 if (op_mode
== VOIDmode
)
315 op_mode
= GET_MODE (op0
);
316 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
319 /* The only case we try to handle is a SUBREG. */
322 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
323 if (op0
== SUBREG_REG (x
))
325 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
326 GET_MODE (SUBREG_REG (x
)),
328 return op0
? op0
: x
;
335 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
336 if (op0
== XEXP (x
, 0))
338 return replace_equiv_address_nv (x
, op0
);
340 else if (code
== LO_SUM
)
342 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
343 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
349 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
351 return gen_rtx_LO_SUM (mode
, op0
, op1
);
353 else if (code
== REG
)
355 if (rtx_equal_p (x
, old_rtx
))
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
371 rtx op
, enum machine_mode op_mode
)
375 if (GET_CODE (op
) == CONST
)
378 trueop
= avoid_constant_pool_reference (op
);
380 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
384 return simplify_unary_operation_1 (code
, mode
, op
);
387 /* Perform some simplifications we can do even if the operands
390 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
392 enum rtx_code reversed
;
398 /* (not (not X)) == X. */
399 if (GET_CODE (op
) == NOT
)
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op
)
405 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
406 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
407 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
408 XEXP (op
, 0), XEXP (op
, 1));
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op
) == PLUS
412 && XEXP (op
, 1) == constm1_rtx
)
413 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op
) == NEG
)
417 return plus_constant (XEXP (op
, 0), -1);
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op
) == XOR
421 && GET_CODE (XEXP (op
, 1)) == CONST_INT
422 && (temp
= simplify_unary_operation (NOT
, mode
,
423 XEXP (op
, 1), mode
)) != 0)
424 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op
) == PLUS
428 && GET_CODE (XEXP (op
, 1)) == CONST_INT
429 && mode_signbit_p (mode
, XEXP (op
, 1))
430 && (temp
= simplify_unary_operation (NOT
, mode
,
431 XEXP (op
, 1), mode
)) != 0)
432 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
440 if (GET_CODE (op
) == ASHIFT
441 && XEXP (op
, 0) == const1_rtx
)
443 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
444 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
451 if (STORE_FLAG_VALUE
== -1
452 && GET_CODE (op
) == ASHIFTRT
453 && GET_CODE (XEXP (op
, 1)) == CONST_INT
454 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
455 return simplify_gen_relational (GE
, mode
, VOIDmode
,
456 XEXP (op
, 0), const0_rtx
);
459 if (GET_CODE (op
) == SUBREG
460 && subreg_lowpart_p (op
)
461 && (GET_MODE_SIZE (GET_MODE (op
))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
463 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
464 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
466 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
469 x
= gen_rtx_ROTATE (inner_mode
,
470 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
472 XEXP (SUBREG_REG (op
), 1));
473 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
481 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
483 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
484 enum machine_mode op_mode
;
486 op_mode
= GET_MODE (in1
);
487 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
489 op_mode
= GET_MODE (in2
);
490 if (op_mode
== VOIDmode
)
492 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
494 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
497 in2
= in1
; in1
= tem
;
500 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op
) == NEG
)
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op
) == PLUS
512 && XEXP (op
, 1) == const1_rtx
)
513 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op
) == NOT
)
517 return plus_constant (XEXP (op
, 0), 1);
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op
) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode
)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
527 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
529 if (GET_CODE (op
) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode
)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
535 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
537 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
539 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
544 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op
) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
552 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
553 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
559 if (GET_CODE (op
) == ASHIFT
)
561 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
563 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op
) == ASHIFTRT
569 && GET_CODE (XEXP (op
, 1)) == CONST_INT
570 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
571 return simplify_gen_binary (LSHIFTRT
, mode
,
572 XEXP (op
, 0), XEXP (op
, 1));
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op
) == LSHIFTRT
577 && GET_CODE (XEXP (op
, 1)) == CONST_INT
578 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
579 return simplify_gen_binary (ASHIFTRT
, mode
,
580 XEXP (op
, 0), XEXP (op
, 1));
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op
) == XOR
584 && XEXP (op
, 1) == const1_rtx
585 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
586 return plus_constant (XEXP (op
, 0), -1);
588 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
589 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
590 if (GET_CODE (op
) == LT
591 && XEXP (op
, 1) == const0_rtx
)
593 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
594 int isize
= GET_MODE_BITSIZE (inner
);
595 if (STORE_FLAG_VALUE
== 1)
597 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
598 GEN_INT (isize
- 1));
601 if (GET_MODE_BITSIZE (mode
) > isize
)
602 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
603 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
605 else if (STORE_FLAG_VALUE
== -1)
607 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
608 GEN_INT (isize
- 1));
611 if (GET_MODE_BITSIZE (mode
) > isize
)
612 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
613 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
619 /* We can't handle truncation to a partial integer mode here
620 because we don't know the real bitsize of the partial
622 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
625 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
626 if ((GET_CODE (op
) == SIGN_EXTEND
627 || GET_CODE (op
) == ZERO_EXTEND
)
628 && GET_MODE (XEXP (op
, 0)) == mode
)
631 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
632 (OP:SI foo:SI) if OP is NEG or ABS. */
633 if ((GET_CODE (op
) == ABS
634 || GET_CODE (op
) == NEG
)
635 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
636 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
637 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
638 return simplify_gen_unary (GET_CODE (op
), mode
,
639 XEXP (XEXP (op
, 0), 0), mode
);
641 /* (truncate:A (subreg:B (truncate:C X) 0)) is
643 if (GET_CODE (op
) == SUBREG
644 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
645 && subreg_lowpart_p (op
))
646 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
647 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
649 /* If we know that the value is already truncated, we can
650 replace the TRUNCATE with a SUBREG. Note that this is also
651 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
652 modes we just have to apply a different definition for
653 truncation. But don't do this for an (LSHIFTRT (MULT ...))
654 since this will cause problems with the umulXi3_highpart
656 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
657 GET_MODE_BITSIZE (GET_MODE (op
)))
658 ? (num_sign_bit_copies (op
, GET_MODE (op
))
659 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op
))
660 - GET_MODE_BITSIZE (mode
)))
661 : truncated_to_mode (mode
, op
))
662 && ! (GET_CODE (op
) == LSHIFTRT
663 && GET_CODE (XEXP (op
, 0)) == MULT
))
664 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
666 /* A truncate of a comparison can be replaced with a subreg if
667 STORE_FLAG_VALUE permits. This is like the previous test,
668 but it works even if the comparison is done in a mode larger
669 than HOST_BITS_PER_WIDE_INT. */
670 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
672 && ((HOST_WIDE_INT
) STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
673 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
677 if (DECIMAL_FLOAT_MODE_P (mode
))
680 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
681 if (GET_CODE (op
) == FLOAT_EXTEND
682 && GET_MODE (XEXP (op
, 0)) == mode
)
685 /* (float_truncate:SF (float_truncate:DF foo:XF))
686 = (float_truncate:SF foo:XF).
687 This may eliminate double rounding, so it is unsafe.
689 (float_truncate:SF (float_extend:XF foo:DF))
690 = (float_truncate:SF foo:DF).
692 (float_truncate:DF (float_extend:XF foo:SF))
693 = (float_extend:SF foo:DF). */
694 if ((GET_CODE (op
) == FLOAT_TRUNCATE
695 && flag_unsafe_math_optimizations
)
696 || GET_CODE (op
) == FLOAT_EXTEND
)
697 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
699 > GET_MODE_SIZE (mode
)
700 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
704 /* (float_truncate (float x)) is (float x) */
705 if (GET_CODE (op
) == FLOAT
706 && (flag_unsafe_math_optimizations
707 || ((unsigned)significand_size (GET_MODE (op
))
708 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
709 - num_sign_bit_copies (XEXP (op
, 0),
710 GET_MODE (XEXP (op
, 0)))))))
711 return simplify_gen_unary (FLOAT
, mode
,
713 GET_MODE (XEXP (op
, 0)));
715 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
716 (OP:SF foo:SF) if OP is NEG or ABS. */
717 if ((GET_CODE (op
) == ABS
718 || GET_CODE (op
) == NEG
)
719 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
720 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
721 return simplify_gen_unary (GET_CODE (op
), mode
,
722 XEXP (XEXP (op
, 0), 0), mode
);
724 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
725 is (float_truncate:SF x). */
726 if (GET_CODE (op
) == SUBREG
727 && subreg_lowpart_p (op
)
728 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
729 return SUBREG_REG (op
);
733 if (DECIMAL_FLOAT_MODE_P (mode
))
736 /* (float_extend (float_extend x)) is (float_extend x)
738 (float_extend (float x)) is (float x) assuming that double
739 rounding can't happen.
741 if (GET_CODE (op
) == FLOAT_EXTEND
742 || (GET_CODE (op
) == FLOAT
743 && ((unsigned)significand_size (GET_MODE (op
))
744 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
745 - num_sign_bit_copies (XEXP (op
, 0),
746 GET_MODE (XEXP (op
, 0)))))))
747 return simplify_gen_unary (GET_CODE (op
), mode
,
749 GET_MODE (XEXP (op
, 0)));
754 /* (abs (neg <foo>)) -> (abs <foo>) */
755 if (GET_CODE (op
) == NEG
)
756 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
757 GET_MODE (XEXP (op
, 0)));
759 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
761 if (GET_MODE (op
) == VOIDmode
)
764 /* If operand is something known to be positive, ignore the ABS. */
765 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
766 || ((GET_MODE_BITSIZE (GET_MODE (op
))
767 <= HOST_BITS_PER_WIDE_INT
)
768 && ((nonzero_bits (op
, GET_MODE (op
))
770 << (GET_MODE_BITSIZE (GET_MODE (op
)) - 1)))
774 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
775 if (num_sign_bit_copies (op
, mode
) == GET_MODE_BITSIZE (mode
))
776 return gen_rtx_NEG (mode
, op
);
781 /* (ffs (*_extend <X>)) = (ffs <X>) */
782 if (GET_CODE (op
) == SIGN_EXTEND
783 || GET_CODE (op
) == ZERO_EXTEND
)
784 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
785 GET_MODE (XEXP (op
, 0)));
790 /* (pop* (zero_extend <X>)) = (pop* <X>) */
791 if (GET_CODE (op
) == ZERO_EXTEND
)
792 return simplify_gen_unary (code
, mode
, XEXP (op
, 0),
793 GET_MODE (XEXP (op
, 0)));
797 /* (float (sign_extend <X>)) = (float <X>). */
798 if (GET_CODE (op
) == SIGN_EXTEND
)
799 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
800 GET_MODE (XEXP (op
, 0)));
804 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
805 becomes just the MINUS if its mode is MODE. This allows
806 folding switch statements on machines using casesi (such as
808 if (GET_CODE (op
) == TRUNCATE
809 && GET_MODE (XEXP (op
, 0)) == mode
810 && GET_CODE (XEXP (op
, 0)) == MINUS
811 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
812 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
815 /* Check for a sign extension of a subreg of a promoted
816 variable, where the promotion is sign-extended, and the
817 target mode is the same as the variable's promotion. */
818 if (GET_CODE (op
) == SUBREG
819 && SUBREG_PROMOTED_VAR_P (op
)
820 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
821 && GET_MODE (XEXP (op
, 0)) == mode
)
824 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
825 if (! POINTERS_EXTEND_UNSIGNED
826 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
828 || (GET_CODE (op
) == SUBREG
829 && REG_P (SUBREG_REG (op
))
830 && REG_POINTER (SUBREG_REG (op
))
831 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
832 return convert_memory_address (Pmode
, op
);
837 /* Check for a zero extension of a subreg of a promoted
838 variable, where the promotion is zero-extended, and the
839 target mode is the same as the variable's promotion. */
840 if (GET_CODE (op
) == SUBREG
841 && SUBREG_PROMOTED_VAR_P (op
)
842 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
843 && GET_MODE (XEXP (op
, 0)) == mode
)
846 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
847 if (POINTERS_EXTEND_UNSIGNED
> 0
848 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
850 || (GET_CODE (op
) == SUBREG
851 && REG_P (SUBREG_REG (op
))
852 && REG_POINTER (SUBREG_REG (op
))
853 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
854 return convert_memory_address (Pmode
, op
);
865 /* Try to compute the value of a unary operation CODE whose output mode is to
866 be MODE with input operand OP whose mode was originally OP_MODE.
867 Return zero if the value cannot be computed. */
869 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
870 rtx op
, enum machine_mode op_mode
)
872 unsigned int width
= GET_MODE_BITSIZE (mode
);
874 if (code
== VEC_DUPLICATE
)
876 gcc_assert (VECTOR_MODE_P (mode
));
877 if (GET_MODE (op
) != VOIDmode
)
879 if (!VECTOR_MODE_P (GET_MODE (op
)))
880 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
882 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
885 if (GET_CODE (op
) == CONST_INT
|| GET_CODE (op
) == CONST_DOUBLE
886 || GET_CODE (op
) == CONST_VECTOR
)
888 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
889 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
890 rtvec v
= rtvec_alloc (n_elts
);
893 if (GET_CODE (op
) != CONST_VECTOR
)
894 for (i
= 0; i
< n_elts
; i
++)
895 RTVEC_ELT (v
, i
) = op
;
898 enum machine_mode inmode
= GET_MODE (op
);
899 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
900 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
902 gcc_assert (in_n_elts
< n_elts
);
903 gcc_assert ((n_elts
% in_n_elts
) == 0);
904 for (i
= 0; i
< n_elts
; i
++)
905 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
907 return gen_rtx_CONST_VECTOR (mode
, v
);
911 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
913 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
914 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
915 enum machine_mode opmode
= GET_MODE (op
);
916 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
917 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
918 rtvec v
= rtvec_alloc (n_elts
);
921 gcc_assert (op_n_elts
== n_elts
);
922 for (i
= 0; i
< n_elts
; i
++)
924 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
925 CONST_VECTOR_ELT (op
, i
),
926 GET_MODE_INNER (opmode
));
929 RTVEC_ELT (v
, i
) = x
;
931 return gen_rtx_CONST_VECTOR (mode
, v
);
934 /* The order of these tests is critical so that, for example, we don't
935 check the wrong mode (input vs. output) for a conversion operation,
936 such as FIX. At some point, this should be simplified. */
938 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
939 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
941 HOST_WIDE_INT hv
, lv
;
944 if (GET_CODE (op
) == CONST_INT
)
945 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
947 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
949 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
950 d
= real_value_truncate (mode
, d
);
951 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
953 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
954 && (GET_CODE (op
) == CONST_DOUBLE
955 || GET_CODE (op
) == CONST_INT
))
957 HOST_WIDE_INT hv
, lv
;
960 if (GET_CODE (op
) == CONST_INT
)
961 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
963 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
965 if (op_mode
== VOIDmode
)
967 /* We don't know how to interpret negative-looking numbers in
968 this case, so don't try to fold those. */
972 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
975 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
977 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
978 d
= real_value_truncate (mode
, d
);
979 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
982 if (GET_CODE (op
) == CONST_INT
983 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
985 HOST_WIDE_INT arg0
= INTVAL (op
);
999 val
= (arg0
>= 0 ? arg0
: - arg0
);
1003 /* Don't use ffs here. Instead, get low order bit and then its
1004 number. If arg0 is zero, this will return 0, as desired. */
1005 arg0
&= GET_MODE_MASK (mode
);
1006 val
= exact_log2 (arg0
& (- arg0
)) + 1;
1010 arg0
&= GET_MODE_MASK (mode
);
1011 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1014 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
1018 arg0
&= GET_MODE_MASK (mode
);
1021 /* Even if the value at zero is undefined, we have to come
1022 up with some replacement. Seems good enough. */
1023 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1024 val
= GET_MODE_BITSIZE (mode
);
1027 val
= exact_log2 (arg0
& -arg0
);
1031 arg0
&= GET_MODE_MASK (mode
);
1034 val
++, arg0
&= arg0
- 1;
1038 arg0
&= GET_MODE_MASK (mode
);
1041 val
++, arg0
&= arg0
- 1;
1050 /* When zero-extending a CONST_INT, we need to know its
1052 gcc_assert (op_mode
!= VOIDmode
);
1053 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1055 /* If we were really extending the mode,
1056 we would have to distinguish between zero-extension
1057 and sign-extension. */
1058 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1061 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1062 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1068 if (op_mode
== VOIDmode
)
1070 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1072 /* If we were really extending the mode,
1073 we would have to distinguish between zero-extension
1074 and sign-extension. */
1075 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1078 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1081 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1083 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
1084 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1092 case FLOAT_TRUNCATE
:
1102 return gen_int_mode (val
, mode
);
1105 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1106 for a DImode operation on a CONST_INT. */
1107 else if (GET_MODE (op
) == VOIDmode
1108 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1109 && (GET_CODE (op
) == CONST_DOUBLE
1110 || GET_CODE (op
) == CONST_INT
))
1112 unsigned HOST_WIDE_INT l1
, lv
;
1113 HOST_WIDE_INT h1
, hv
;
1115 if (GET_CODE (op
) == CONST_DOUBLE
)
1116 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1118 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1128 neg_double (l1
, h1
, &lv
, &hv
);
1133 neg_double (l1
, h1
, &lv
, &hv
);
1145 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
1148 lv
= exact_log2 (l1
& -l1
) + 1;
1154 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
1155 - HOST_BITS_PER_WIDE_INT
;
1157 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
1158 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1159 lv
= GET_MODE_BITSIZE (mode
);
1165 lv
= exact_log2 (l1
& -l1
);
1167 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
1168 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1169 lv
= GET_MODE_BITSIZE (mode
);
1192 /* This is just a change-of-mode, so do nothing. */
1197 gcc_assert (op_mode
!= VOIDmode
);
1199 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1203 lv
= l1
& GET_MODE_MASK (op_mode
);
1207 if (op_mode
== VOIDmode
1208 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1212 lv
= l1
& GET_MODE_MASK (op_mode
);
1213 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
1214 && (lv
& ((HOST_WIDE_INT
) 1
1215 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
1216 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1218 hv
= HWI_SIGN_EXTEND (lv
);
1229 return immed_double_const (lv
, hv
, mode
);
1232 else if (GET_CODE (op
) == CONST_DOUBLE
1233 && SCALAR_FLOAT_MODE_P (mode
))
1235 REAL_VALUE_TYPE d
, t
;
1236 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1241 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1243 real_sqrt (&t
, mode
, &d
);
1247 d
= REAL_VALUE_ABS (d
);
1250 d
= REAL_VALUE_NEGATE (d
);
1252 case FLOAT_TRUNCATE
:
1253 d
= real_value_truncate (mode
, d
);
1256 /* All this does is change the mode. */
1259 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1266 real_to_target (tmp
, &d
, GET_MODE (op
));
1267 for (i
= 0; i
< 4; i
++)
1269 real_from_target (&d
, tmp
, mode
);
1275 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1278 else if (GET_CODE (op
) == CONST_DOUBLE
1279 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1280 && GET_MODE_CLASS (mode
) == MODE_INT
1281 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1283 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1284 operators are intentionally left unspecified (to ease implementation
1285 by target backends), for consistency, this routine implements the
1286 same semantics for constant folding as used by the middle-end. */
1288 /* This was formerly used only for non-IEEE float.
1289 eggert@twinsun.com says it is safe for IEEE also. */
1290 HOST_WIDE_INT xh
, xl
, th
, tl
;
1291 REAL_VALUE_TYPE x
, t
;
1292 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1296 if (REAL_VALUE_ISNAN (x
))
1299 /* Test against the signed upper bound. */
1300 if (width
> HOST_BITS_PER_WIDE_INT
)
1302 th
= ((unsigned HOST_WIDE_INT
) 1
1303 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1309 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1311 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1312 if (REAL_VALUES_LESS (t
, x
))
1319 /* Test against the signed lower bound. */
1320 if (width
> HOST_BITS_PER_WIDE_INT
)
1322 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1328 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1330 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1331 if (REAL_VALUES_LESS (x
, t
))
1337 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1341 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1344 /* Test against the unsigned upper bound. */
1345 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1350 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1352 th
= ((unsigned HOST_WIDE_INT
) 1
1353 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1359 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1361 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1362 if (REAL_VALUES_LESS (t
, x
))
1369 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1375 return immed_double_const (xl
, xh
, mode
);
1381 /* Subroutine of simplify_binary_operation to simplify a commutative,
1382 associative binary operation CODE with result mode MODE, operating
1383 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1384 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1385 canonicalization is possible. */
1388 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1393 /* Linearize the operator to the left. */
1394 if (GET_CODE (op1
) == code
)
1396 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1397 if (GET_CODE (op0
) == code
)
1399 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1400 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1403 /* "a op (b op c)" becomes "(b op c) op a". */
1404 if (! swap_commutative_operands_p (op1
, op0
))
1405 return simplify_gen_binary (code
, mode
, op1
, op0
);
1412 if (GET_CODE (op0
) == code
)
1414 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1415 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1417 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1418 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1421 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1422 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1423 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1424 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1426 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1428 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1429 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1430 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1431 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1433 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1440 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1441 and OP1. Return 0 if no simplification is possible.
1443 Don't use this for relational operations such as EQ or LT.
1444 Use simplify_relational_operation instead. */
1446 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1449 rtx trueop0
, trueop1
;
1452 /* Relational operations don't work here. We must know the mode
1453 of the operands in order to do the comparison correctly.
1454 Assuming a full word can give incorrect results.
1455 Consider comparing 128 with -128 in QImode. */
1456 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1457 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1459 /* Make sure the constant is second. */
1460 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1461 && swap_commutative_operands_p (op0
, op1
))
1463 tem
= op0
, op0
= op1
, op1
= tem
;
1466 trueop0
= avoid_constant_pool_reference (op0
);
1467 trueop1
= avoid_constant_pool_reference (op1
);
1469 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1472 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1475 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1476 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1477 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1478 actual constants. */
1481 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1482 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1484 rtx tem
, reversed
, opleft
, opright
;
1486 unsigned int width
= GET_MODE_BITSIZE (mode
);
1488 /* Even if we can't compute a constant result,
1489 there are some cases worth simplifying. */
1494 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1495 when x is NaN, infinite, or finite and nonzero. They aren't
1496 when x is -0 and the rounding mode is not towards -infinity,
1497 since (-0) + 0 is then 0. */
1498 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1501 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1502 transformations are safe even for IEEE. */
1503 if (GET_CODE (op0
) == NEG
)
1504 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1505 else if (GET_CODE (op1
) == NEG
)
1506 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1508 /* (~a) + 1 -> -a */
1509 if (INTEGRAL_MODE_P (mode
)
1510 && GET_CODE (op0
) == NOT
1511 && trueop1
== const1_rtx
)
1512 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1514 /* Handle both-operands-constant cases. We can only add
1515 CONST_INTs to constants since the sum of relocatable symbols
1516 can't be handled by most assemblers. Don't add CONST_INT
1517 to CONST_INT since overflow won't be computed properly if wider
1518 than HOST_BITS_PER_WIDE_INT. */
1520 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1521 && GET_CODE (op1
) == CONST_INT
)
1522 return plus_constant (op0
, INTVAL (op1
));
1523 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1524 && GET_CODE (op0
) == CONST_INT
)
1525 return plus_constant (op1
, INTVAL (op0
));
1527 /* See if this is something like X * C - X or vice versa or
1528 if the multiplication is written as a shift. If so, we can
1529 distribute and make a new multiply, shift, or maybe just
1530 have X (if C is 2 in the example above). But don't make
1531 something more expensive than we had before. */
1533 if (SCALAR_INT_MODE_P (mode
))
1535 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1536 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1537 rtx lhs
= op0
, rhs
= op1
;
1539 if (GET_CODE (lhs
) == NEG
)
1543 lhs
= XEXP (lhs
, 0);
1545 else if (GET_CODE (lhs
) == MULT
1546 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1548 coeff0l
= INTVAL (XEXP (lhs
, 1));
1549 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1550 lhs
= XEXP (lhs
, 0);
1552 else if (GET_CODE (lhs
) == ASHIFT
1553 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1554 && INTVAL (XEXP (lhs
, 1)) >= 0
1555 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1557 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1559 lhs
= XEXP (lhs
, 0);
1562 if (GET_CODE (rhs
) == NEG
)
1566 rhs
= XEXP (rhs
, 0);
1568 else if (GET_CODE (rhs
) == MULT
1569 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1571 coeff1l
= INTVAL (XEXP (rhs
, 1));
1572 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1573 rhs
= XEXP (rhs
, 0);
1575 else if (GET_CODE (rhs
) == ASHIFT
1576 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1577 && INTVAL (XEXP (rhs
, 1)) >= 0
1578 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1580 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1582 rhs
= XEXP (rhs
, 0);
1585 if (rtx_equal_p (lhs
, rhs
))
1587 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1589 unsigned HOST_WIDE_INT l
;
1592 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1593 coeff
= immed_double_const (l
, h
, mode
);
1595 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1596 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1601 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1602 if ((GET_CODE (op1
) == CONST_INT
1603 || GET_CODE (op1
) == CONST_DOUBLE
)
1604 && GET_CODE (op0
) == XOR
1605 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1606 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1607 && mode_signbit_p (mode
, op1
))
1608 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1609 simplify_gen_binary (XOR
, mode
, op1
,
1612 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1613 if (GET_CODE (op0
) == MULT
1614 && GET_CODE (XEXP (op0
, 0)) == NEG
)
1618 in1
= XEXP (XEXP (op0
, 0), 0);
1619 in2
= XEXP (op0
, 1);
1620 return simplify_gen_binary (MINUS
, mode
, op1
,
1621 simplify_gen_binary (MULT
, mode
,
1625 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1626 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1628 if (COMPARISON_P (op0
)
1629 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
1630 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
1631 && (reversed
= reversed_comparison (op0
, mode
)))
1633 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
1635 /* If one of the operands is a PLUS or a MINUS, see if we can
1636 simplify this by the associative law.
1637 Don't use the associative law for floating point.
1638 The inaccuracy makes it nonassociative,
1639 and subtle programs can break if operations are associated. */
1641 if (INTEGRAL_MODE_P (mode
)
1642 && (plus_minus_operand_p (op0
)
1643 || plus_minus_operand_p (op1
))
1644 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1647 /* Reassociate floating point addition only when the user
1648 specifies unsafe math optimizations. */
1649 if (FLOAT_MODE_P (mode
)
1650 && flag_unsafe_math_optimizations
)
1652 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1660 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1661 using cc0, in which case we want to leave it as a COMPARE
1662 so we can distinguish it from a register-register-copy.
1664 In IEEE floating point, x-0 is not the same as x. */
1666 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1667 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1668 && trueop1
== CONST0_RTX (mode
))
1672 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1673 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1674 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1675 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1677 rtx xop00
= XEXP (op0
, 0);
1678 rtx xop10
= XEXP (op1
, 0);
1681 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1683 if (REG_P (xop00
) && REG_P (xop10
)
1684 && GET_MODE (xop00
) == GET_MODE (xop10
)
1685 && REGNO (xop00
) == REGNO (xop10
)
1686 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1687 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1694 /* We can't assume x-x is 0 even with non-IEEE floating point,
1695 but since it is zero except in very strange circumstances, we
1696 will treat it as zero with -funsafe-math-optimizations. */
1697 if (rtx_equal_p (trueop0
, trueop1
)
1698 && ! side_effects_p (op0
)
1699 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1700 return CONST0_RTX (mode
);
1702 /* Change subtraction from zero into negation. (0 - x) is the
1703 same as -x when x is NaN, infinite, or finite and nonzero.
1704 But if the mode has signed zeros, and does not round towards
1705 -infinity, then 0 - 0 is 0, not -0. */
1706 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1707 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1709 /* (-1 - a) is ~a. */
1710 if (trueop0
== constm1_rtx
)
1711 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1713 /* Subtracting 0 has no effect unless the mode has signed zeros
1714 and supports rounding towards -infinity. In such a case,
1716 if (!(HONOR_SIGNED_ZEROS (mode
)
1717 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1718 && trueop1
== CONST0_RTX (mode
))
1721 /* See if this is something like X * C - X or vice versa or
1722 if the multiplication is written as a shift. If so, we can
1723 distribute and make a new multiply, shift, or maybe just
1724 have X (if C is 2 in the example above). But don't make
1725 something more expensive than we had before. */
1727 if (SCALAR_INT_MODE_P (mode
))
1729 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1730 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1731 rtx lhs
= op0
, rhs
= op1
;
1733 if (GET_CODE (lhs
) == NEG
)
1737 lhs
= XEXP (lhs
, 0);
1739 else if (GET_CODE (lhs
) == MULT
1740 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1742 coeff0l
= INTVAL (XEXP (lhs
, 1));
1743 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1744 lhs
= XEXP (lhs
, 0);
1746 else if (GET_CODE (lhs
) == ASHIFT
1747 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1748 && INTVAL (XEXP (lhs
, 1)) >= 0
1749 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1751 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1753 lhs
= XEXP (lhs
, 0);
1756 if (GET_CODE (rhs
) == NEG
)
1760 rhs
= XEXP (rhs
, 0);
1762 else if (GET_CODE (rhs
) == MULT
1763 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1765 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1766 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1767 rhs
= XEXP (rhs
, 0);
1769 else if (GET_CODE (rhs
) == ASHIFT
1770 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1771 && INTVAL (XEXP (rhs
, 1)) >= 0
1772 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1774 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
1776 rhs
= XEXP (rhs
, 0);
1779 if (rtx_equal_p (lhs
, rhs
))
1781 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1783 unsigned HOST_WIDE_INT l
;
1786 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
1787 coeff
= immed_double_const (l
, h
, mode
);
1789 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1790 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1795 /* (a - (-b)) -> (a + b). True even for IEEE. */
1796 if (GET_CODE (op1
) == NEG
)
1797 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1799 /* (-x - c) may be simplified as (-c - x). */
1800 if (GET_CODE (op0
) == NEG
1801 && (GET_CODE (op1
) == CONST_INT
1802 || GET_CODE (op1
) == CONST_DOUBLE
))
1804 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1806 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1809 /* Don't let a relocatable value get a negative coeff. */
1810 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1811 return simplify_gen_binary (PLUS
, mode
,
1813 neg_const_int (mode
, op1
));
1815 /* (x - (x & y)) -> (x & ~y) */
1816 if (GET_CODE (op1
) == AND
)
1818 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1820 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1821 GET_MODE (XEXP (op1
, 1)));
1822 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1824 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1826 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1827 GET_MODE (XEXP (op1
, 0)));
1828 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1832 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1833 by reversing the comparison code if valid. */
1834 if (STORE_FLAG_VALUE
== 1
1835 && trueop0
== const1_rtx
1836 && COMPARISON_P (op1
)
1837 && (reversed
= reversed_comparison (op1
, mode
)))
1840 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1841 if (GET_CODE (op1
) == MULT
1842 && GET_CODE (XEXP (op1
, 0)) == NEG
)
1846 in1
= XEXP (XEXP (op1
, 0), 0);
1847 in2
= XEXP (op1
, 1);
1848 return simplify_gen_binary (PLUS
, mode
,
1849 simplify_gen_binary (MULT
, mode
,
1854 /* Canonicalize (minus (neg A) (mult B C)) to
1855 (minus (mult (neg B) C) A). */
1856 if (GET_CODE (op1
) == MULT
1857 && GET_CODE (op0
) == NEG
)
1861 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
1862 in2
= XEXP (op1
, 1);
1863 return simplify_gen_binary (MINUS
, mode
,
1864 simplify_gen_binary (MULT
, mode
,
1869 /* If one of the operands is a PLUS or a MINUS, see if we can
1870 simplify this by the associative law. This will, for example,
1871 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1872 Don't use the associative law for floating point.
1873 The inaccuracy makes it nonassociative,
1874 and subtle programs can break if operations are associated. */
1876 if (INTEGRAL_MODE_P (mode
)
1877 && (plus_minus_operand_p (op0
)
1878 || plus_minus_operand_p (op1
))
1879 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1884 if (trueop1
== constm1_rtx
)
1885 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1887 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1888 x is NaN, since x * 0 is then also NaN. Nor is it valid
1889 when the mode has signed zeros, since multiplying a negative
1890 number by 0 will give -0, not 0. */
1891 if (!HONOR_NANS (mode
)
1892 && !HONOR_SIGNED_ZEROS (mode
)
1893 && trueop1
== CONST0_RTX (mode
)
1894 && ! side_effects_p (op0
))
1897 /* In IEEE floating point, x*1 is not equivalent to x for
1899 if (!HONOR_SNANS (mode
)
1900 && trueop1
== CONST1_RTX (mode
))
1903 /* Convert multiply by constant power of two into shift unless
1904 we are still generating RTL. This test is a kludge. */
1905 if (GET_CODE (trueop1
) == CONST_INT
1906 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1907 /* If the mode is larger than the host word size, and the
1908 uppermost bit is set, then this isn't a power of two due
1909 to implicit sign extension. */
1910 && (width
<= HOST_BITS_PER_WIDE_INT
1911 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1912 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1914 /* Likewise for multipliers wider than a word. */
1915 if (GET_CODE (trueop1
) == CONST_DOUBLE
1916 && (GET_MODE (trueop1
) == VOIDmode
1917 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
1918 && GET_MODE (op0
) == mode
1919 && CONST_DOUBLE_LOW (trueop1
) == 0
1920 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
1921 return simplify_gen_binary (ASHIFT
, mode
, op0
,
1922 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
1924 /* x*2 is x+x and x*(-1) is -x */
1925 if (GET_CODE (trueop1
) == CONST_DOUBLE
1926 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
1927 && GET_MODE (op0
) == mode
)
1930 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1932 if (REAL_VALUES_EQUAL (d
, dconst2
))
1933 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1935 if (!HONOR_SNANS (mode
)
1936 && REAL_VALUES_EQUAL (d
, dconstm1
))
1937 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1940 /* Optimize -x * -x as x * x. */
1941 if (FLOAT_MODE_P (mode
)
1942 && GET_CODE (op0
) == NEG
1943 && GET_CODE (op1
) == NEG
1944 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
1945 && !side_effects_p (XEXP (op0
, 0)))
1946 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1948 /* Likewise, optimize abs(x) * abs(x) as x * x. */
1949 if (SCALAR_FLOAT_MODE_P (mode
)
1950 && GET_CODE (op0
) == ABS
1951 && GET_CODE (op1
) == ABS
1952 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
1953 && !side_effects_p (XEXP (op0
, 0)))
1954 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1956 /* Reassociate multiplication, but for floating point MULTs
1957 only when the user specifies unsafe math optimizations. */
1958 if (! FLOAT_MODE_P (mode
)
1959 || flag_unsafe_math_optimizations
)
1961 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1968 if (trueop1
== const0_rtx
)
1970 if (GET_CODE (trueop1
) == CONST_INT
1971 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1972 == GET_MODE_MASK (mode
)))
1974 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1976 /* A | (~A) -> -1 */
1977 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1978 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1979 && ! side_effects_p (op0
)
1980 && SCALAR_INT_MODE_P (mode
))
1983 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1984 if (GET_CODE (op1
) == CONST_INT
1985 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1986 && (nonzero_bits (op0
, mode
) & ~INTVAL (op1
)) == 0)
1989 /* Convert (A & B) | A to A. */
1990 if (GET_CODE (op0
) == AND
1991 && (rtx_equal_p (XEXP (op0
, 0), op1
)
1992 || rtx_equal_p (XEXP (op0
, 1), op1
))
1993 && ! side_effects_p (XEXP (op0
, 0))
1994 && ! side_effects_p (XEXP (op0
, 1)))
1997 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1998 mode size to (rotate A CX). */
2000 if (GET_CODE (op1
) == ASHIFT
2001 || GET_CODE (op1
) == SUBREG
)
2012 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2013 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2014 && GET_CODE (XEXP (opleft
, 1)) == CONST_INT
2015 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2016 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2017 == GET_MODE_BITSIZE (mode
)))
2018 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2020 /* Same, but for ashift that has been "simplified" to a wider mode
2021 by simplify_shift_const. */
2023 if (GET_CODE (opleft
) == SUBREG
2024 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2025 && GET_CODE (opright
) == LSHIFTRT
2026 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2027 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2028 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2029 && (GET_MODE_SIZE (GET_MODE (opleft
))
2030 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2031 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2032 SUBREG_REG (XEXP (opright
, 0)))
2033 && GET_CODE (XEXP (SUBREG_REG (opleft
), 1)) == CONST_INT
2034 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2035 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2036 == GET_MODE_BITSIZE (mode
)))
2037 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2038 XEXP (SUBREG_REG (opleft
), 1));
2040 /* If we have (ior (and (X C1) C2)), simplify this by making
2041 C1 as small as possible if C1 actually changes. */
2042 if (GET_CODE (op1
) == CONST_INT
2043 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2044 || INTVAL (op1
) > 0)
2045 && GET_CODE (op0
) == AND
2046 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2047 && GET_CODE (op1
) == CONST_INT
2048 && (INTVAL (XEXP (op0
, 1)) & INTVAL (op1
)) != 0)
2049 return simplify_gen_binary (IOR
, mode
,
2051 (AND
, mode
, XEXP (op0
, 0),
2052 GEN_INT (INTVAL (XEXP (op0
, 1))
2056 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2057 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2058 the PLUS does not affect any of the bits in OP1: then we can do
2059 the IOR as a PLUS and we can associate. This is valid if OP1
2060 can be safely shifted left C bits. */
2061 if (GET_CODE (trueop1
) == CONST_INT
&& GET_CODE (op0
) == ASHIFTRT
2062 && GET_CODE (XEXP (op0
, 0)) == PLUS
2063 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == CONST_INT
2064 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2065 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2067 int count
= INTVAL (XEXP (op0
, 1));
2068 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2070 if (mask
>> count
== INTVAL (trueop1
)
2071 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2072 return simplify_gen_binary (ASHIFTRT
, mode
,
2073 plus_constant (XEXP (op0
, 0), mask
),
2077 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2083 if (trueop1
== const0_rtx
)
2085 if (GET_CODE (trueop1
) == CONST_INT
2086 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2087 == GET_MODE_MASK (mode
)))
2088 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2089 if (rtx_equal_p (trueop0
, trueop1
)
2090 && ! side_effects_p (op0
)
2091 && GET_MODE_CLASS (mode
) != MODE_CC
)
2092 return CONST0_RTX (mode
);
2094 /* Canonicalize XOR of the most significant bit to PLUS. */
2095 if ((GET_CODE (op1
) == CONST_INT
2096 || GET_CODE (op1
) == CONST_DOUBLE
)
2097 && mode_signbit_p (mode
, op1
))
2098 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2099 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2100 if ((GET_CODE (op1
) == CONST_INT
2101 || GET_CODE (op1
) == CONST_DOUBLE
)
2102 && GET_CODE (op0
) == PLUS
2103 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
2104 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2105 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2106 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2107 simplify_gen_binary (XOR
, mode
, op1
,
2110 /* If we are XORing two things that have no bits in common,
2111 convert them into an IOR. This helps to detect rotation encoded
2112 using those methods and possibly other simplifications. */
2114 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2115 && (nonzero_bits (op0
, mode
)
2116 & nonzero_bits (op1
, mode
)) == 0)
2117 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2119 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2120 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2123 int num_negated
= 0;
2125 if (GET_CODE (op0
) == NOT
)
2126 num_negated
++, op0
= XEXP (op0
, 0);
2127 if (GET_CODE (op1
) == NOT
)
2128 num_negated
++, op1
= XEXP (op1
, 0);
2130 if (num_negated
== 2)
2131 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2132 else if (num_negated
== 1)
2133 return simplify_gen_unary (NOT
, mode
,
2134 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2138 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2139 correspond to a machine insn or result in further simplifications
2140 if B is a constant. */
2142 if (GET_CODE (op0
) == AND
2143 && rtx_equal_p (XEXP (op0
, 1), op1
)
2144 && ! side_effects_p (op1
))
2145 return simplify_gen_binary (AND
, mode
,
2146 simplify_gen_unary (NOT
, mode
,
2147 XEXP (op0
, 0), mode
),
2150 else if (GET_CODE (op0
) == AND
2151 && rtx_equal_p (XEXP (op0
, 0), op1
)
2152 && ! side_effects_p (op1
))
2153 return simplify_gen_binary (AND
, mode
,
2154 simplify_gen_unary (NOT
, mode
,
2155 XEXP (op0
, 1), mode
),
2158 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2159 comparison if STORE_FLAG_VALUE is 1. */
2160 if (STORE_FLAG_VALUE
== 1
2161 && trueop1
== const1_rtx
2162 && COMPARISON_P (op0
)
2163 && (reversed
= reversed_comparison (op0
, mode
)))
2166 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2167 is (lt foo (const_int 0)), so we can perform the above
2168 simplification if STORE_FLAG_VALUE is 1. */
2170 if (STORE_FLAG_VALUE
== 1
2171 && trueop1
== const1_rtx
2172 && GET_CODE (op0
) == LSHIFTRT
2173 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2174 && INTVAL (XEXP (op0
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
2175 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2177 /* (xor (comparison foo bar) (const_int sign-bit))
2178 when STORE_FLAG_VALUE is the sign bit. */
2179 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2180 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
2181 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
2182 && trueop1
== const_true_rtx
2183 && COMPARISON_P (op0
)
2184 && (reversed
= reversed_comparison (op0
, mode
)))
2189 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2195 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2197 /* If we are turning off bits already known off in OP0, we need
2199 if (GET_CODE (trueop1
) == CONST_INT
2200 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2201 && (nonzero_bits (trueop0
, mode
) & ~INTVAL (trueop1
)) == 0)
2203 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2204 && GET_MODE_CLASS (mode
) != MODE_CC
)
2207 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2208 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2209 && ! side_effects_p (op0
)
2210 && GET_MODE_CLASS (mode
) != MODE_CC
)
2211 return CONST0_RTX (mode
);
2213 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2214 there are no nonzero bits of C outside of X's mode. */
2215 if ((GET_CODE (op0
) == SIGN_EXTEND
2216 || GET_CODE (op0
) == ZERO_EXTEND
)
2217 && GET_CODE (trueop1
) == CONST_INT
2218 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2219 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2220 & INTVAL (trueop1
)) == 0)
2222 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2223 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2224 gen_int_mode (INTVAL (trueop1
),
2226 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2229 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2230 insn (and may simplify more). */
2231 if (GET_CODE (op0
) == XOR
2232 && rtx_equal_p (XEXP (op0
, 0), op1
)
2233 && ! side_effects_p (op1
))
2234 return simplify_gen_binary (AND
, mode
,
2235 simplify_gen_unary (NOT
, mode
,
2236 XEXP (op0
, 1), mode
),
2239 if (GET_CODE (op0
) == XOR
2240 && rtx_equal_p (XEXP (op0
, 1), op1
)
2241 && ! side_effects_p (op1
))
2242 return simplify_gen_binary (AND
, mode
,
2243 simplify_gen_unary (NOT
, mode
,
2244 XEXP (op0
, 0), mode
),
2247 /* Similarly for (~(A ^ B)) & A. */
2248 if (GET_CODE (op0
) == NOT
2249 && GET_CODE (XEXP (op0
, 0)) == XOR
2250 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2251 && ! side_effects_p (op1
))
2252 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2254 if (GET_CODE (op0
) == NOT
2255 && GET_CODE (XEXP (op0
, 0)) == XOR
2256 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2257 && ! side_effects_p (op1
))
2258 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2260 /* Convert (A | B) & A to A. */
2261 if (GET_CODE (op0
) == IOR
2262 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2263 || rtx_equal_p (XEXP (op0
, 1), op1
))
2264 && ! side_effects_p (XEXP (op0
, 0))
2265 && ! side_effects_p (XEXP (op0
, 1)))
2268 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2269 ((A & N) + B) & M -> (A + B) & M
2270 Similarly if (N & M) == 0,
2271 ((A | N) + B) & M -> (A + B) & M
2272 and for - instead of + and/or ^ instead of |. */
2273 if (GET_CODE (trueop1
) == CONST_INT
2274 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2275 && ~INTVAL (trueop1
)
2276 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
2277 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2282 pmop
[0] = XEXP (op0
, 0);
2283 pmop
[1] = XEXP (op0
, 1);
2285 for (which
= 0; which
< 2; which
++)
2288 switch (GET_CODE (tem
))
2291 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2292 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
2293 == INTVAL (trueop1
))
2294 pmop
[which
] = XEXP (tem
, 0);
2298 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2299 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
2300 pmop
[which
] = XEXP (tem
, 0);
2307 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2309 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2311 return simplify_gen_binary (code
, mode
, tem
, op1
);
2314 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2320 /* 0/x is 0 (or x&0 if x has side-effects). */
2321 if (trueop0
== CONST0_RTX (mode
))
2323 if (side_effects_p (op1
))
2324 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2328 if (trueop1
== CONST1_RTX (mode
))
2329 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2330 /* Convert divide by power of two into shift. */
2331 if (GET_CODE (trueop1
) == CONST_INT
2332 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
2333 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2337 /* Handle floating point and integers separately. */
2338 if (SCALAR_FLOAT_MODE_P (mode
))
2340 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2341 safe for modes with NaNs, since 0.0 / 0.0 will then be
2342 NaN rather than 0.0. Nor is it safe for modes with signed
2343 zeros, since dividing 0 by a negative number gives -0.0 */
2344 if (trueop0
== CONST0_RTX (mode
)
2345 && !HONOR_NANS (mode
)
2346 && !HONOR_SIGNED_ZEROS (mode
)
2347 && ! side_effects_p (op1
))
2350 if (trueop1
== CONST1_RTX (mode
)
2351 && !HONOR_SNANS (mode
))
2354 if (GET_CODE (trueop1
) == CONST_DOUBLE
2355 && trueop1
!= CONST0_RTX (mode
))
2358 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2361 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2362 && !HONOR_SNANS (mode
))
2363 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2365 /* Change FP division by a constant into multiplication.
2366 Only do this with -funsafe-math-optimizations. */
2367 if (flag_unsafe_math_optimizations
2368 && !REAL_VALUES_EQUAL (d
, dconst0
))
2370 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2371 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2372 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2378 /* 0/x is 0 (or x&0 if x has side-effects). */
2379 if (trueop0
== CONST0_RTX (mode
))
2381 if (side_effects_p (op1
))
2382 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2386 if (trueop1
== CONST1_RTX (mode
))
2387 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2389 if (trueop1
== constm1_rtx
)
2391 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2392 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2398 /* 0%x is 0 (or x&0 if x has side-effects). */
2399 if (trueop0
== CONST0_RTX (mode
))
2401 if (side_effects_p (op1
))
2402 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2405 /* x%1 is 0 (of x&0 if x has side-effects). */
2406 if (trueop1
== CONST1_RTX (mode
))
2408 if (side_effects_p (op0
))
2409 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2410 return CONST0_RTX (mode
);
2412 /* Implement modulus by power of two as AND. */
2413 if (GET_CODE (trueop1
) == CONST_INT
2414 && exact_log2 (INTVAL (trueop1
)) > 0)
2415 return simplify_gen_binary (AND
, mode
, op0
,
2416 GEN_INT (INTVAL (op1
) - 1));
2420 /* 0%x is 0 (or x&0 if x has side-effects). */
2421 if (trueop0
== CONST0_RTX (mode
))
2423 if (side_effects_p (op1
))
2424 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2427 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2428 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
2430 if (side_effects_p (op0
))
2431 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2432 return CONST0_RTX (mode
);
2439 if (trueop1
== CONST0_RTX (mode
))
2441 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2443 /* Rotating ~0 always results in ~0. */
2444 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
2445 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2446 && ! side_effects_p (op1
))
2452 if (trueop1
== CONST0_RTX (mode
))
2454 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2459 if (trueop1
== CONST0_RTX (mode
))
2461 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2463 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2464 if (GET_CODE (op0
) == CLZ
2465 && GET_CODE (trueop1
) == CONST_INT
2466 && STORE_FLAG_VALUE
== 1
2467 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
2469 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2470 unsigned HOST_WIDE_INT zero_val
= 0;
2472 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
2473 && zero_val
== GET_MODE_BITSIZE (imode
)
2474 && INTVAL (trueop1
) == exact_log2 (zero_val
))
2475 return simplify_gen_relational (EQ
, mode
, imode
,
2476 XEXP (op0
, 0), const0_rtx
);
2481 if (width
<= HOST_BITS_PER_WIDE_INT
2482 && GET_CODE (trueop1
) == CONST_INT
2483 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2484 && ! side_effects_p (op0
))
2486 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2488 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2494 if (width
<= HOST_BITS_PER_WIDE_INT
2495 && GET_CODE (trueop1
) == CONST_INT
2496 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2497 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2498 && ! side_effects_p (op0
))
2500 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2502 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2508 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2510 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2512 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2518 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2520 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2522 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2531 /* ??? There are simplifications that can be done. */
2535 if (!VECTOR_MODE_P (mode
))
2537 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2538 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2539 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2540 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2541 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
2543 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2544 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2549 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2550 gcc_assert (GET_MODE_INNER (mode
)
2551 == GET_MODE_INNER (GET_MODE (trueop0
)));
2552 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2554 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2556 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2557 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2558 rtvec v
= rtvec_alloc (n_elts
);
2561 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2562 for (i
= 0; i
< n_elts
; i
++)
2564 rtx x
= XVECEXP (trueop1
, 0, i
);
2566 gcc_assert (GET_CODE (x
) == CONST_INT
);
2567 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2571 return gen_rtx_CONST_VECTOR (mode
, v
);
2575 if (XVECLEN (trueop1
, 0) == 1
2576 && GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
2577 && GET_CODE (trueop0
) == VEC_CONCAT
)
2580 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
2582 /* Try to find the element in the VEC_CONCAT. */
2583 while (GET_MODE (vec
) != mode
2584 && GET_CODE (vec
) == VEC_CONCAT
)
2586 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
2587 if (offset
< vec_size
)
2588 vec
= XEXP (vec
, 0);
2592 vec
= XEXP (vec
, 1);
2594 vec
= avoid_constant_pool_reference (vec
);
2597 if (GET_MODE (vec
) == mode
)
2604 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2605 ? GET_MODE (trueop0
)
2606 : GET_MODE_INNER (mode
));
2607 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2608 ? GET_MODE (trueop1
)
2609 : GET_MODE_INNER (mode
));
2611 gcc_assert (VECTOR_MODE_P (mode
));
2612 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2613 == GET_MODE_SIZE (mode
));
2615 if (VECTOR_MODE_P (op0_mode
))
2616 gcc_assert (GET_MODE_INNER (mode
)
2617 == GET_MODE_INNER (op0_mode
));
2619 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2621 if (VECTOR_MODE_P (op1_mode
))
2622 gcc_assert (GET_MODE_INNER (mode
)
2623 == GET_MODE_INNER (op1_mode
));
2625 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2627 if ((GET_CODE (trueop0
) == CONST_VECTOR
2628 || GET_CODE (trueop0
) == CONST_INT
2629 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2630 && (GET_CODE (trueop1
) == CONST_VECTOR
2631 || GET_CODE (trueop1
) == CONST_INT
2632 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2634 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2635 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2636 rtvec v
= rtvec_alloc (n_elts
);
2638 unsigned in_n_elts
= 1;
2640 if (VECTOR_MODE_P (op0_mode
))
2641 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2642 for (i
= 0; i
< n_elts
; i
++)
2646 if (!VECTOR_MODE_P (op0_mode
))
2647 RTVEC_ELT (v
, i
) = trueop0
;
2649 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2653 if (!VECTOR_MODE_P (op1_mode
))
2654 RTVEC_ELT (v
, i
) = trueop1
;
2656 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2661 return gen_rtx_CONST_VECTOR (mode
, v
);
2674 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2677 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
2679 unsigned int width
= GET_MODE_BITSIZE (mode
);
2681 if (VECTOR_MODE_P (mode
)
2682 && code
!= VEC_CONCAT
2683 && GET_CODE (op0
) == CONST_VECTOR
2684 && GET_CODE (op1
) == CONST_VECTOR
)
2686 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2687 enum machine_mode op0mode
= GET_MODE (op0
);
2688 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
2689 enum machine_mode op1mode
= GET_MODE (op1
);
2690 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
2691 rtvec v
= rtvec_alloc (n_elts
);
2694 gcc_assert (op0_n_elts
== n_elts
);
2695 gcc_assert (op1_n_elts
== n_elts
);
2696 for (i
= 0; i
< n_elts
; i
++)
2698 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
2699 CONST_VECTOR_ELT (op0
, i
),
2700 CONST_VECTOR_ELT (op1
, i
));
2703 RTVEC_ELT (v
, i
) = x
;
2706 return gen_rtx_CONST_VECTOR (mode
, v
);
2709 if (VECTOR_MODE_P (mode
)
2710 && code
== VEC_CONCAT
2711 && CONSTANT_P (op0
) && CONSTANT_P (op1
))
2713 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2714 rtvec v
= rtvec_alloc (n_elts
);
2716 gcc_assert (n_elts
>= 2);
2719 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
2720 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
2722 RTVEC_ELT (v
, 0) = op0
;
2723 RTVEC_ELT (v
, 1) = op1
;
2727 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
2728 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
2731 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
2732 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
2733 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
2735 for (i
= 0; i
< op0_n_elts
; ++i
)
2736 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
2737 for (i
= 0; i
< op1_n_elts
; ++i
)
2738 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
2741 return gen_rtx_CONST_VECTOR (mode
, v
);
2744 if (SCALAR_FLOAT_MODE_P (mode
)
2745 && GET_CODE (op0
) == CONST_DOUBLE
2746 && GET_CODE (op1
) == CONST_DOUBLE
2747 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
2758 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
2760 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
2762 for (i
= 0; i
< 4; i
++)
2779 real_from_target (&r
, tmp0
, mode
);
2780 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
2784 REAL_VALUE_TYPE f0
, f1
, value
, result
;
2787 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
2788 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
2789 real_convert (&f0
, mode
, &f0
);
2790 real_convert (&f1
, mode
, &f1
);
2792 if (HONOR_SNANS (mode
)
2793 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
2797 && REAL_VALUES_EQUAL (f1
, dconst0
)
2798 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
2801 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2802 && flag_trapping_math
2803 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
2805 int s0
= REAL_VALUE_NEGATIVE (f0
);
2806 int s1
= REAL_VALUE_NEGATIVE (f1
);
2811 /* Inf + -Inf = NaN plus exception. */
2816 /* Inf - Inf = NaN plus exception. */
2821 /* Inf / Inf = NaN plus exception. */
2828 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2829 && flag_trapping_math
2830 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
2831 || (REAL_VALUE_ISINF (f1
)
2832 && REAL_VALUES_EQUAL (f0
, dconst0
))))
2833 /* Inf * 0 = NaN plus exception. */
2836 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
2838 real_convert (&result
, mode
, &value
);
2840 /* Don't constant fold this floating point operation if
2841 the result has overflowed and flag_trapping_math. */
2843 if (flag_trapping_math
2844 && MODE_HAS_INFINITIES (mode
)
2845 && REAL_VALUE_ISINF (result
)
2846 && !REAL_VALUE_ISINF (f0
)
2847 && !REAL_VALUE_ISINF (f1
))
2848 /* Overflow plus exception. */
2851 /* Don't constant fold this floating point operation if the
2852 result may dependent upon the run-time rounding mode and
2853 flag_rounding_math is set, or if GCC's software emulation
2854 is unable to accurately represent the result. */
2856 if ((flag_rounding_math
2857 || (REAL_MODE_FORMAT_COMPOSITE_P (mode
)
2858 && !flag_unsafe_math_optimizations
))
2859 && (inexact
|| !real_identical (&result
, &value
)))
2862 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
2866 /* We can fold some multi-word operations. */
2867 if (GET_MODE_CLASS (mode
) == MODE_INT
2868 && width
== HOST_BITS_PER_WIDE_INT
* 2
2869 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
2870 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
2872 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
2873 HOST_WIDE_INT h1
, h2
, hv
, ht
;
2875 if (GET_CODE (op0
) == CONST_DOUBLE
)
2876 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
2878 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
2880 if (GET_CODE (op1
) == CONST_DOUBLE
)
2881 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
2883 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
2888 /* A - B == A + (-B). */
2889 neg_double (l2
, h2
, &lv
, &hv
);
2892 /* Fall through.... */
2895 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2899 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2903 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2904 &lv
, &hv
, <
, &ht
))
2909 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2910 <
, &ht
, &lv
, &hv
))
2915 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2916 &lv
, &hv
, <
, &ht
))
2921 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2922 <
, &ht
, &lv
, &hv
))
2927 lv
= l1
& l2
, hv
= h1
& h2
;
2931 lv
= l1
| l2
, hv
= h1
| h2
;
2935 lv
= l1
^ l2
, hv
= h1
^ h2
;
2941 && ((unsigned HOST_WIDE_INT
) l1
2942 < (unsigned HOST_WIDE_INT
) l2
)))
2951 && ((unsigned HOST_WIDE_INT
) l1
2952 > (unsigned HOST_WIDE_INT
) l2
)))
2959 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
2961 && ((unsigned HOST_WIDE_INT
) l1
2962 < (unsigned HOST_WIDE_INT
) l2
)))
2969 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
2971 && ((unsigned HOST_WIDE_INT
) l1
2972 > (unsigned HOST_WIDE_INT
) l2
)))
2978 case LSHIFTRT
: case ASHIFTRT
:
2980 case ROTATE
: case ROTATERT
:
2981 if (SHIFT_COUNT_TRUNCATED
)
2982 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
2984 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
2987 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
2988 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
2990 else if (code
== ASHIFT
)
2991 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
2992 else if (code
== ROTATE
)
2993 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
2994 else /* code == ROTATERT */
2995 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3002 return immed_double_const (lv
, hv
, mode
);
3005 if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
3006 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3008 /* Get the integer argument values in two forms:
3009 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3011 arg0
= INTVAL (op0
);
3012 arg1
= INTVAL (op1
);
3014 if (width
< HOST_BITS_PER_WIDE_INT
)
3016 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3017 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3020 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3021 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3024 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3025 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3033 /* Compute the value of the arithmetic. */
3038 val
= arg0s
+ arg1s
;
3042 val
= arg0s
- arg1s
;
3046 val
= arg0s
* arg1s
;
3051 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3054 val
= arg0s
/ arg1s
;
3059 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3062 val
= arg0s
% arg1s
;
3067 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3070 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3075 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3078 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3096 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3097 the value is in range. We can't return any old value for
3098 out-of-range arguments because either the middle-end (via
3099 shift_truncation_mask) or the back-end might be relying on
3100 target-specific knowledge. Nor can we rely on
3101 shift_truncation_mask, since the shift might not be part of an
3102 ashlM3, lshrM3 or ashrM3 instruction. */
3103 if (SHIFT_COUNT_TRUNCATED
)
3104 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3105 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3108 val
= (code
== ASHIFT
3109 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3110 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3112 /* Sign-extend the result for arithmetic right shifts. */
3113 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3114 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
3122 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3123 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3131 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3132 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3136 /* Do nothing here. */
3140 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3144 val
= ((unsigned HOST_WIDE_INT
) arg0
3145 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3149 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3153 val
= ((unsigned HOST_WIDE_INT
) arg0
3154 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3162 /* ??? There are simplifications that can be done. */
3169 return gen_int_mode (val
, mode
);
3177 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3180 Rather than test for specific case, we do this by a brute-force method
3181 and do all possible simplifications until no more changes occur. Then
3182 we rebuild the operation. */
3184 struct simplify_plus_minus_op_data
3191 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
3193 const struct simplify_plus_minus_op_data
*d1
= p1
;
3194 const struct simplify_plus_minus_op_data
*d2
= p2
;
3197 result
= (commutative_operand_precedence (d2
->op
)
3198 - commutative_operand_precedence (d1
->op
));
3202 /* Group together equal REGs to do more simplification. */
3203 if (REG_P (d1
->op
) && REG_P (d2
->op
))
3204 return REGNO (d1
->op
) - REGNO (d2
->op
);
3210 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3213 struct simplify_plus_minus_op_data ops
[8];
3215 int n_ops
= 2, input_ops
= 2;
3216 int changed
, n_constants
= 0, canonicalized
= 0;
3219 memset (ops
, 0, sizeof ops
);
3221 /* Set up the two operands and then expand them until nothing has been
3222 changed. If we run out of room in our array, give up; this should
3223 almost never happen. */
3228 ops
[1].neg
= (code
== MINUS
);
3234 for (i
= 0; i
< n_ops
; i
++)
3236 rtx this_op
= ops
[i
].op
;
3237 int this_neg
= ops
[i
].neg
;
3238 enum rtx_code this_code
= GET_CODE (this_op
);
3247 ops
[n_ops
].op
= XEXP (this_op
, 1);
3248 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3251 ops
[i
].op
= XEXP (this_op
, 0);
3254 canonicalized
|= this_neg
;
3258 ops
[i
].op
= XEXP (this_op
, 0);
3259 ops
[i
].neg
= ! this_neg
;
3266 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3267 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3268 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3270 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3271 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3272 ops
[n_ops
].neg
= this_neg
;
3280 /* ~a -> (-a - 1) */
3283 ops
[n_ops
].op
= constm1_rtx
;
3284 ops
[n_ops
++].neg
= this_neg
;
3285 ops
[i
].op
= XEXP (this_op
, 0);
3286 ops
[i
].neg
= !this_neg
;
3296 ops
[i
].op
= neg_const_int (mode
, this_op
);
3310 if (n_constants
> 1)
3313 gcc_assert (n_ops
>= 2);
3315 /* If we only have two operands, we can avoid the loops. */
3318 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3321 /* Get the two operands. Be careful with the order, especially for
3322 the cases where code == MINUS. */
3323 if (ops
[0].neg
&& ops
[1].neg
)
3325 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3328 else if (ops
[0].neg
)
3339 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
3342 /* Now simplify each pair of operands until nothing changes. */
3345 /* Insertion sort is good enough for an eight-element array. */
3346 for (i
= 1; i
< n_ops
; i
++)
3348 struct simplify_plus_minus_op_data save
;
3350 if (simplify_plus_minus_op_data_cmp (&ops
[j
], &ops
[i
]) < 0)
3356 ops
[j
+ 1] = ops
[j
];
3357 while (j
-- && simplify_plus_minus_op_data_cmp (&ops
[j
], &save
) > 0);
3361 /* This is only useful the first time through. */
3366 for (i
= n_ops
- 1; i
> 0; i
--)
3367 for (j
= i
- 1; j
>= 0; j
--)
3369 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
3370 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
3372 if (lhs
!= 0 && rhs
!= 0)
3374 enum rtx_code ncode
= PLUS
;
3380 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3382 else if (swap_commutative_operands_p (lhs
, rhs
))
3383 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3385 if ((GET_CODE (lhs
) == CONST
|| GET_CODE (lhs
) == CONST_INT
)
3386 && (GET_CODE (rhs
) == CONST
|| GET_CODE (rhs
) == CONST_INT
))
3388 rtx tem_lhs
, tem_rhs
;
3390 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
3391 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
3392 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
3394 if (tem
&& !CONSTANT_P (tem
))
3395 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
3398 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
3400 /* Reject "simplifications" that just wrap the two
3401 arguments in a CONST. Failure to do so can result
3402 in infinite recursion with simplify_binary_operation
3403 when it calls us to simplify CONST operations. */
3405 && ! (GET_CODE (tem
) == CONST
3406 && GET_CODE (XEXP (tem
, 0)) == ncode
3407 && XEXP (XEXP (tem
, 0), 0) == lhs
3408 && XEXP (XEXP (tem
, 0), 1) == rhs
))
3411 if (GET_CODE (tem
) == NEG
)
3412 tem
= XEXP (tem
, 0), lneg
= !lneg
;
3413 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
3414 tem
= neg_const_int (mode
, tem
), lneg
= 0;
3418 ops
[j
].op
= NULL_RTX
;
3424 /* Pack all the operands to the lower-numbered entries. */
3425 for (i
= 0, j
= 0; j
< n_ops
; j
++)
3435 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3437 && GET_CODE (ops
[1].op
) == CONST_INT
3438 && CONSTANT_P (ops
[0].op
)
3440 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
3442 /* We suppressed creation of trivial CONST expressions in the
3443 combination loop to avoid recursion. Create one manually now.
3444 The combination loop should have ensured that there is exactly
3445 one CONST_INT, and the sort will have ensured that it is last
3446 in the array and that any other constant will be next-to-last. */
3449 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
3450 && CONSTANT_P (ops
[n_ops
- 2].op
))
3452 rtx value
= ops
[n_ops
- 1].op
;
3453 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
3454 value
= neg_const_int (mode
, value
);
3455 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
3459 /* Put a non-negated operand first, if possible. */
3461 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
3464 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
3473 /* Now make the result by performing the requested operations. */
3475 for (i
= 1; i
< n_ops
; i
++)
3476 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
3477 mode
, result
, ops
[i
].op
);
3482 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3484 plus_minus_operand_p (rtx x
)
3486 return GET_CODE (x
) == PLUS
3487 || GET_CODE (x
) == MINUS
3488 || (GET_CODE (x
) == CONST
3489 && GET_CODE (XEXP (x
, 0)) == PLUS
3490 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
3491 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
3494 /* Like simplify_binary_operation except used for relational operators.
3495 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3496 not also be VOIDmode.
3498 CMP_MODE specifies in which mode the comparison is done in, so it is
3499 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3500 the operands or, if both are VOIDmode, the operands are compared in
3501 "infinite precision". */
3503 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
3504 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3506 rtx tem
, trueop0
, trueop1
;
3508 if (cmp_mode
== VOIDmode
)
3509 cmp_mode
= GET_MODE (op0
);
3510 if (cmp_mode
== VOIDmode
)
3511 cmp_mode
= GET_MODE (op1
);
3513 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
3516 if (SCALAR_FLOAT_MODE_P (mode
))
3518 if (tem
== const0_rtx
)
3519 return CONST0_RTX (mode
);
3520 #ifdef FLOAT_STORE_FLAG_VALUE
3522 REAL_VALUE_TYPE val
;
3523 val
= FLOAT_STORE_FLAG_VALUE (mode
);
3524 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
3530 if (VECTOR_MODE_P (mode
))
3532 if (tem
== const0_rtx
)
3533 return CONST0_RTX (mode
);
3534 #ifdef VECTOR_STORE_FLAG_VALUE
3539 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
3540 if (val
== NULL_RTX
)
3542 if (val
== const1_rtx
)
3543 return CONST1_RTX (mode
);
3545 units
= GET_MODE_NUNITS (mode
);
3546 v
= rtvec_alloc (units
);
3547 for (i
= 0; i
< units
; i
++)
3548 RTVEC_ELT (v
, i
) = val
;
3549 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
3559 /* For the following tests, ensure const0_rtx is op1. */
3560 if (swap_commutative_operands_p (op0
, op1
)
3561 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
3562 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
3564 /* If op0 is a compare, extract the comparison arguments from it. */
3565 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3566 return simplify_relational_operation (code
, mode
, VOIDmode
,
3567 XEXP (op0
, 0), XEXP (op0
, 1));
3569 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
3573 trueop0
= avoid_constant_pool_reference (op0
);
3574 trueop1
= avoid_constant_pool_reference (op1
);
3575 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
3579 /* This part of simplify_relational_operation is only used when CMP_MODE
3580 is not in class MODE_CC (i.e. it is a real comparison).
3582 MODE is the mode of the result, while CMP_MODE specifies in which
3583 mode the comparison is done in, so it is the mode of the operands. */
3586 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
3587 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3589 enum rtx_code op0code
= GET_CODE (op0
);
3591 if (GET_CODE (op1
) == CONST_INT
)
3593 if (INTVAL (op1
) == 0 && COMPARISON_P (op0
))
3595 /* If op0 is a comparison, extract the comparison arguments
3599 if (GET_MODE (op0
) == mode
)
3600 return simplify_rtx (op0
);
3602 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
3603 XEXP (op0
, 0), XEXP (op0
, 1));
3605 else if (code
== EQ
)
3607 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
3608 if (new_code
!= UNKNOWN
)
3609 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
3610 XEXP (op0
, 0), XEXP (op0
, 1));
3615 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3616 if ((code
== EQ
|| code
== NE
)
3617 && (op0code
== PLUS
|| op0code
== MINUS
)
3619 && CONSTANT_P (XEXP (op0
, 1))
3620 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
3622 rtx x
= XEXP (op0
, 0);
3623 rtx c
= XEXP (op0
, 1);
3625 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
3627 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
3630 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3631 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3633 && op1
== const0_rtx
3634 && GET_MODE_CLASS (mode
) == MODE_INT
3635 && cmp_mode
!= VOIDmode
3636 /* ??? Work-around BImode bugs in the ia64 backend. */
3638 && cmp_mode
!= BImode
3639 && nonzero_bits (op0
, cmp_mode
) == 1
3640 && STORE_FLAG_VALUE
== 1)
3641 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
3642 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
3643 : lowpart_subreg (mode
, op0
, cmp_mode
);
3645 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3646 if ((code
== EQ
|| code
== NE
)
3647 && op1
== const0_rtx
3649 return simplify_gen_relational (code
, mode
, cmp_mode
,
3650 XEXP (op0
, 0), XEXP (op0
, 1));
3652 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3653 if ((code
== EQ
|| code
== NE
)
3655 && rtx_equal_p (XEXP (op0
, 0), op1
)
3656 && !side_effects_p (XEXP (op0
, 0)))
3657 return simplify_gen_relational (code
, mode
, cmp_mode
,
3658 XEXP (op0
, 1), const0_rtx
);
3660 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3661 if ((code
== EQ
|| code
== NE
)
3663 && rtx_equal_p (XEXP (op0
, 1), op1
)
3664 && !side_effects_p (XEXP (op0
, 1)))
3665 return simplify_gen_relational (code
, mode
, cmp_mode
,
3666 XEXP (op0
, 0), const0_rtx
);
3668 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3669 if ((code
== EQ
|| code
== NE
)
3671 && (GET_CODE (op1
) == CONST_INT
3672 || GET_CODE (op1
) == CONST_DOUBLE
)
3673 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
3674 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
3675 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
3676 simplify_gen_binary (XOR
, cmp_mode
,
3677 XEXP (op0
, 1), op1
));
3682 /* Check if the given comparison (done in the given MODE) is actually a
3683 tautology or a contradiction.
3684 If no simplification is possible, this function returns zero.
3685 Otherwise, it returns either const_true_rtx or const0_rtx. */
3688 simplify_const_relational_operation (enum rtx_code code
,
3689 enum machine_mode mode
,
3692 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
3697 gcc_assert (mode
!= VOIDmode
3698 || (GET_MODE (op0
) == VOIDmode
3699 && GET_MODE (op1
) == VOIDmode
));
3701 /* If op0 is a compare, extract the comparison arguments from it. */
3702 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3704 op1
= XEXP (op0
, 1);
3705 op0
= XEXP (op0
, 0);
3707 if (GET_MODE (op0
) != VOIDmode
)
3708 mode
= GET_MODE (op0
);
3709 else if (GET_MODE (op1
) != VOIDmode
)
3710 mode
= GET_MODE (op1
);
3715 /* We can't simplify MODE_CC values since we don't know what the
3716 actual comparison is. */
3717 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
3720 /* Make sure the constant is second. */
3721 if (swap_commutative_operands_p (op0
, op1
))
3723 tem
= op0
, op0
= op1
, op1
= tem
;
3724 code
= swap_condition (code
);
3727 trueop0
= avoid_constant_pool_reference (op0
);
3728 trueop1
= avoid_constant_pool_reference (op1
);
3730 /* For integer comparisons of A and B maybe we can simplify A - B and can
3731 then simplify a comparison of that with zero. If A and B are both either
3732 a register or a CONST_INT, this can't help; testing for these cases will
3733 prevent infinite recursion here and speed things up.
3735 We can only do this for EQ and NE comparisons as otherwise we may
3736 lose or introduce overflow which we cannot disregard as undefined as
3737 we do not know the signedness of the operation on either the left or
3738 the right hand side of the comparison. */
3740 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
3741 && (code
== EQ
|| code
== NE
)
3742 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
3743 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
3744 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
3745 /* We cannot do this if tem is a nonzero address. */
3746 && ! nonzero_address_p (tem
))
3747 return simplify_const_relational_operation (signed_condition (code
),
3748 mode
, tem
, const0_rtx
);
3750 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
3751 return const_true_rtx
;
3753 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
3756 /* For modes without NaNs, if the two operands are equal, we know the
3757 result except if they have side-effects. */
3758 if (! HONOR_NANS (GET_MODE (trueop0
))
3759 && rtx_equal_p (trueop0
, trueop1
)
3760 && ! side_effects_p (trueop0
))
3761 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
3763 /* If the operands are floating-point constants, see if we can fold
3765 else if (GET_CODE (trueop0
) == CONST_DOUBLE
3766 && GET_CODE (trueop1
) == CONST_DOUBLE
3767 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
3769 REAL_VALUE_TYPE d0
, d1
;
3771 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
3772 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
3774 /* Comparisons are unordered iff at least one of the values is NaN. */
3775 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
3785 return const_true_rtx
;
3798 equal
= REAL_VALUES_EQUAL (d0
, d1
);
3799 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
3800 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
3803 /* Otherwise, see if the operands are both integers. */
3804 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
3805 && (GET_CODE (trueop0
) == CONST_DOUBLE
3806 || GET_CODE (trueop0
) == CONST_INT
)
3807 && (GET_CODE (trueop1
) == CONST_DOUBLE
3808 || GET_CODE (trueop1
) == CONST_INT
))
3810 int width
= GET_MODE_BITSIZE (mode
);
3811 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
3812 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
3814 /* Get the two words comprising each integer constant. */
3815 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
3817 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
3818 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
3822 l0u
= l0s
= INTVAL (trueop0
);
3823 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
3826 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
3828 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
3829 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
3833 l1u
= l1s
= INTVAL (trueop1
);
3834 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
3837 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3838 we have to sign or zero-extend the values. */
3839 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
3841 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3842 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3844 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3845 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3847 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3848 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3850 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
3851 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
3853 equal
= (h0u
== h1u
&& l0u
== l1u
);
3854 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
3855 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
3856 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
3857 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
3860 /* Otherwise, there are some code-specific tests we can make. */
3863 /* Optimize comparisons with upper and lower bounds. */
3864 if (SCALAR_INT_MODE_P (mode
)
3865 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3878 get_mode_bounds (mode
, sign
, mode
, &mmin
, &mmax
);
3885 /* x >= min is always true. */
3886 if (rtx_equal_p (trueop1
, mmin
))
3887 tem
= const_true_rtx
;
3893 /* x <= max is always true. */
3894 if (rtx_equal_p (trueop1
, mmax
))
3895 tem
= const_true_rtx
;
3900 /* x > max is always false. */
3901 if (rtx_equal_p (trueop1
, mmax
))
3907 /* x < min is always false. */
3908 if (rtx_equal_p (trueop1
, mmin
))
3915 if (tem
== const0_rtx
3916 || tem
== const_true_rtx
)
3923 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3928 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3929 return const_true_rtx
;
3933 /* Optimize abs(x) < 0.0. */
3934 if (trueop1
== CONST0_RTX (mode
)
3935 && !HONOR_SNANS (mode
)
3936 && !(flag_wrapv
&& INTEGRAL_MODE_P (mode
)))
3938 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3940 if (GET_CODE (tem
) == ABS
)
3946 /* Optimize abs(x) >= 0.0. */
3947 if (trueop1
== CONST0_RTX (mode
)
3948 && !HONOR_NANS (mode
)
3949 && !(flag_wrapv
&& INTEGRAL_MODE_P (mode
)))
3951 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3953 if (GET_CODE (tem
) == ABS
)
3954 return const_true_rtx
;
3959 /* Optimize ! (abs(x) < 0.0). */
3960 if (trueop1
== CONST0_RTX (mode
))
3962 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3964 if (GET_CODE (tem
) == ABS
)
3965 return const_true_rtx
;
3976 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3982 return equal
? const_true_rtx
: const0_rtx
;
3985 return ! equal
? const_true_rtx
: const0_rtx
;
3988 return op0lt
? const_true_rtx
: const0_rtx
;
3991 return op1lt
? const_true_rtx
: const0_rtx
;
3993 return op0ltu
? const_true_rtx
: const0_rtx
;
3995 return op1ltu
? const_true_rtx
: const0_rtx
;
3998 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
4001 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
4003 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
4005 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
4007 return const_true_rtx
;
4015 /* Simplify CODE, an operation with result mode MODE and three operands,
4016 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4017 a constant. Return 0 if no simplifications is possible. */
4020 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4021 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4024 unsigned int width
= GET_MODE_BITSIZE (mode
);
4026 /* VOIDmode means "infinite" precision. */
4028 width
= HOST_BITS_PER_WIDE_INT
;
4034 if (GET_CODE (op0
) == CONST_INT
4035 && GET_CODE (op1
) == CONST_INT
4036 && GET_CODE (op2
) == CONST_INT
4037 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4038 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4040 /* Extracting a bit-field from a constant */
4041 HOST_WIDE_INT val
= INTVAL (op0
);
4043 if (BITS_BIG_ENDIAN
)
4044 val
>>= (GET_MODE_BITSIZE (op0_mode
)
4045 - INTVAL (op2
) - INTVAL (op1
));
4047 val
>>= INTVAL (op2
);
4049 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
4051 /* First zero-extend. */
4052 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
4053 /* If desired, propagate sign bit. */
4054 if (code
== SIGN_EXTRACT
4055 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
4056 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
4059 /* Clear the bits that don't belong in our mode,
4060 unless they and our sign bit are all one.
4061 So we get either a reasonable negative value or a reasonable
4062 unsigned value for this mode. */
4063 if (width
< HOST_BITS_PER_WIDE_INT
4064 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
4065 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
4066 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4068 return gen_int_mode (val
, mode
);
4073 if (GET_CODE (op0
) == CONST_INT
)
4074 return op0
!= const0_rtx
? op1
: op2
;
4076 /* Convert c ? a : a into "a". */
4077 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4080 /* Convert a != b ? a : b into "a". */
4081 if (GET_CODE (op0
) == NE
4082 && ! side_effects_p (op0
)
4083 && ! HONOR_NANS (mode
)
4084 && ! HONOR_SIGNED_ZEROS (mode
)
4085 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4086 && rtx_equal_p (XEXP (op0
, 1), op2
))
4087 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4088 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4091 /* Convert a == b ? a : b into "b". */
4092 if (GET_CODE (op0
) == EQ
4093 && ! side_effects_p (op0
)
4094 && ! HONOR_NANS (mode
)
4095 && ! HONOR_SIGNED_ZEROS (mode
)
4096 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4097 && rtx_equal_p (XEXP (op0
, 1), op2
))
4098 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4099 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4102 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
4104 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
4105 ? GET_MODE (XEXP (op0
, 1))
4106 : GET_MODE (XEXP (op0
, 0)));
4109 /* Look for happy constants in op1 and op2. */
4110 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
4112 HOST_WIDE_INT t
= INTVAL (op1
);
4113 HOST_WIDE_INT f
= INTVAL (op2
);
4115 if (t
== STORE_FLAG_VALUE
&& f
== 0)
4116 code
= GET_CODE (op0
);
4117 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
4120 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
4128 return simplify_gen_relational (code
, mode
, cmp_mode
,
4129 XEXP (op0
, 0), XEXP (op0
, 1));
4132 if (cmp_mode
== VOIDmode
)
4133 cmp_mode
= op0_mode
;
4134 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
4135 cmp_mode
, XEXP (op0
, 0),
4138 /* See if any simplifications were possible. */
4141 if (GET_CODE (temp
) == CONST_INT
)
4142 return temp
== const0_rtx
? op2
: op1
;
4144 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
4150 gcc_assert (GET_MODE (op0
) == mode
);
4151 gcc_assert (GET_MODE (op1
) == mode
);
4152 gcc_assert (VECTOR_MODE_P (mode
));
4153 op2
= avoid_constant_pool_reference (op2
);
4154 if (GET_CODE (op2
) == CONST_INT
)
4156 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
4157 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
4158 int mask
= (1 << n_elts
) - 1;
4160 if (!(INTVAL (op2
) & mask
))
4162 if ((INTVAL (op2
) & mask
) == mask
)
4165 op0
= avoid_constant_pool_reference (op0
);
4166 op1
= avoid_constant_pool_reference (op1
);
4167 if (GET_CODE (op0
) == CONST_VECTOR
4168 && GET_CODE (op1
) == CONST_VECTOR
)
4170 rtvec v
= rtvec_alloc (n_elts
);
4173 for (i
= 0; i
< n_elts
; i
++)
4174 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
4175 ? CONST_VECTOR_ELT (op0
, i
)
4176 : CONST_VECTOR_ELT (op1
, i
));
4177 return gen_rtx_CONST_VECTOR (mode
, v
);
4189 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4190 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4192 Works by unpacking OP into a collection of 8-bit values
4193 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4194 and then repacking them again for OUTERMODE. */
4197 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
4198 enum machine_mode innermode
, unsigned int byte
)
4200 /* We support up to 512-bit values (for V8DFmode). */
4204 value_mask
= (1 << value_bit
) - 1
4206 unsigned char value
[max_bitsize
/ value_bit
];
4215 rtvec result_v
= NULL
;
4216 enum mode_class outer_class
;
4217 enum machine_mode outer_submode
;
4219 /* Some ports misuse CCmode. */
4220 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
4223 /* We have no way to represent a complex constant at the rtl level. */
4224 if (COMPLEX_MODE_P (outermode
))
4227 /* Unpack the value. */
4229 if (GET_CODE (op
) == CONST_VECTOR
)
4231 num_elem
= CONST_VECTOR_NUNITS (op
);
4232 elems
= &CONST_VECTOR_ELT (op
, 0);
4233 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
4239 elem_bitsize
= max_bitsize
;
4241 /* If this asserts, it is too complicated; reducing value_bit may help. */
4242 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
4243 /* I don't know how to handle endianness of sub-units. */
4244 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
4246 for (elem
= 0; elem
< num_elem
; elem
++)
4249 rtx el
= elems
[elem
];
4251 /* Vectors are kept in target memory order. (This is probably
4254 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4255 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4257 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4258 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4259 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4260 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4261 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4264 switch (GET_CODE (el
))
4268 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4270 *vp
++ = INTVAL (el
) >> i
;
4271 /* CONST_INTs are always logically sign-extended. */
4272 for (; i
< elem_bitsize
; i
+= value_bit
)
4273 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
4277 if (GET_MODE (el
) == VOIDmode
)
4279 /* If this triggers, someone should have generated a
4280 CONST_INT instead. */
4281 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
4283 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4284 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
4285 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
4288 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
4291 /* It shouldn't matter what's done here, so fill it with
4293 for (; i
< elem_bitsize
; i
+= value_bit
)
4298 long tmp
[max_bitsize
/ 32];
4299 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
4301 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
4302 gcc_assert (bitsize
<= elem_bitsize
);
4303 gcc_assert (bitsize
% value_bit
== 0);
4305 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
4308 /* real_to_target produces its result in words affected by
4309 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4310 and use WORDS_BIG_ENDIAN instead; see the documentation
4311 of SUBREG in rtl.texi. */
4312 for (i
= 0; i
< bitsize
; i
+= value_bit
)
4315 if (WORDS_BIG_ENDIAN
)
4316 ibase
= bitsize
- 1 - i
;
4319 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
4322 /* It shouldn't matter what's done here, so fill it with
4324 for (; i
< elem_bitsize
; i
+= value_bit
)
4334 /* Now, pick the right byte to start with. */
4335 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4336 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4337 will already have offset 0. */
4338 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
4340 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
4342 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4343 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4344 byte
= (subword_byte
% UNITS_PER_WORD
4345 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4348 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4349 so if it's become negative it will instead be very large.) */
4350 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4352 /* Convert from bytes to chunks of size value_bit. */
4353 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
4355 /* Re-pack the value. */
4357 if (VECTOR_MODE_P (outermode
))
4359 num_elem
= GET_MODE_NUNITS (outermode
);
4360 result_v
= rtvec_alloc (num_elem
);
4361 elems
= &RTVEC_ELT (result_v
, 0);
4362 outer_submode
= GET_MODE_INNER (outermode
);
4368 outer_submode
= outermode
;
4371 outer_class
= GET_MODE_CLASS (outer_submode
);
4372 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
4374 gcc_assert (elem_bitsize
% value_bit
== 0);
4375 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
4377 for (elem
= 0; elem
< num_elem
; elem
++)
4381 /* Vectors are stored in target memory order. (This is probably
4384 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4385 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4387 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4388 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4389 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4390 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4391 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4394 switch (outer_class
)
4397 case MODE_PARTIAL_INT
:
4399 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
4402 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4404 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
4405 for (; i
< elem_bitsize
; i
+= value_bit
)
4406 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
4407 << (i
- HOST_BITS_PER_WIDE_INT
));
4409 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4411 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4412 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
4413 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
4414 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
4421 case MODE_DECIMAL_FLOAT
:
4424 long tmp
[max_bitsize
/ 32];
4426 /* real_from_target wants its input in words affected by
4427 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4428 and use WORDS_BIG_ENDIAN instead; see the documentation
4429 of SUBREG in rtl.texi. */
4430 for (i
= 0; i
< max_bitsize
/ 32; i
++)
4432 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4435 if (WORDS_BIG_ENDIAN
)
4436 ibase
= elem_bitsize
- 1 - i
;
4439 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
4442 real_from_target (&r
, tmp
, outer_submode
);
4443 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
4451 if (VECTOR_MODE_P (outermode
))
4452 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
4457 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4458 Return 0 if no simplifications are possible. */
4460 simplify_subreg (enum machine_mode outermode
, rtx op
,
4461 enum machine_mode innermode
, unsigned int byte
)
4463 /* Little bit of sanity checking. */
4464 gcc_assert (innermode
!= VOIDmode
);
4465 gcc_assert (outermode
!= VOIDmode
);
4466 gcc_assert (innermode
!= BLKmode
);
4467 gcc_assert (outermode
!= BLKmode
);
4469 gcc_assert (GET_MODE (op
) == innermode
4470 || GET_MODE (op
) == VOIDmode
);
4472 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
4473 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4475 if (outermode
== innermode
&& !byte
)
4478 if (GET_CODE (op
) == CONST_INT
4479 || GET_CODE (op
) == CONST_DOUBLE
4480 || GET_CODE (op
) == CONST_VECTOR
)
4481 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
4483 /* Changing mode twice with SUBREG => just change it once,
4484 or not at all if changing back op starting mode. */
4485 if (GET_CODE (op
) == SUBREG
)
4487 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
4488 int final_offset
= byte
+ SUBREG_BYTE (op
);
4491 if (outermode
== innermostmode
4492 && byte
== 0 && SUBREG_BYTE (op
) == 0)
4493 return SUBREG_REG (op
);
4495 /* The SUBREG_BYTE represents offset, as if the value were stored
4496 in memory. Irritating exception is paradoxical subreg, where
4497 we define SUBREG_BYTE to be 0. On big endian machines, this
4498 value should be negative. For a moment, undo this exception. */
4499 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
4501 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
4502 if (WORDS_BIG_ENDIAN
)
4503 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4504 if (BYTES_BIG_ENDIAN
)
4505 final_offset
+= difference
% UNITS_PER_WORD
;
4507 if (SUBREG_BYTE (op
) == 0
4508 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
4510 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
4511 if (WORDS_BIG_ENDIAN
)
4512 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4513 if (BYTES_BIG_ENDIAN
)
4514 final_offset
+= difference
% UNITS_PER_WORD
;
4517 /* See whether resulting subreg will be paradoxical. */
4518 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
4520 /* In nonparadoxical subregs we can't handle negative offsets. */
4521 if (final_offset
< 0)
4523 /* Bail out in case resulting subreg would be incorrect. */
4524 if (final_offset
% GET_MODE_SIZE (outermode
)
4525 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
4531 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
4533 /* In paradoxical subreg, see if we are still looking on lower part.
4534 If so, our SUBREG_BYTE will be 0. */
4535 if (WORDS_BIG_ENDIAN
)
4536 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4537 if (BYTES_BIG_ENDIAN
)
4538 offset
+= difference
% UNITS_PER_WORD
;
4539 if (offset
== final_offset
)
4545 /* Recurse for further possible simplifications. */
4546 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
4550 if (validate_subreg (outermode
, innermostmode
,
4551 SUBREG_REG (op
), final_offset
))
4552 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
4556 /* Merge implicit and explicit truncations. */
4558 if (GET_CODE (op
) == TRUNCATE
4559 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
4560 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
4561 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
4562 GET_MODE (XEXP (op
, 0)));
4564 /* SUBREG of a hard register => just change the register number
4565 and/or mode. If the hard register is not valid in that mode,
4566 suppress this simplification. If the hard register is the stack,
4567 frame, or argument pointer, leave this as a SUBREG. */
4570 && REGNO (op
) < FIRST_PSEUDO_REGISTER
4571 #ifdef CANNOT_CHANGE_MODE_CLASS
4572 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
4573 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
4574 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
4576 && ((reload_completed
&& !frame_pointer_needed
)
4577 || (REGNO (op
) != FRAME_POINTER_REGNUM
4578 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4579 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
4582 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4583 && REGNO (op
) != ARG_POINTER_REGNUM
4585 && REGNO (op
) != STACK_POINTER_REGNUM
4586 && subreg_offset_representable_p (REGNO (op
), innermode
,
4589 unsigned int regno
= REGNO (op
);
4590 unsigned int final_regno
4591 = regno
+ subreg_regno_offset (regno
, innermode
, byte
, outermode
);
4593 /* ??? We do allow it if the current REG is not valid for
4594 its mode. This is a kludge to work around how float/complex
4595 arguments are passed on 32-bit SPARC and should be fixed. */
4596 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
4597 || ! HARD_REGNO_MODE_OK (regno
, innermode
))
4600 int final_offset
= byte
;
4602 /* Adjust offset for paradoxical subregs. */
4604 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
4606 int difference
= (GET_MODE_SIZE (innermode
)
4607 - GET_MODE_SIZE (outermode
));
4608 if (WORDS_BIG_ENDIAN
)
4609 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4610 if (BYTES_BIG_ENDIAN
)
4611 final_offset
+= difference
% UNITS_PER_WORD
;
4614 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
4616 /* Propagate original regno. We don't have any way to specify
4617 the offset inside original regno, so do so only for lowpart.
4618 The information is used only by alias analysis that can not
4619 grog partial register anyway. */
4621 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
4622 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
4627 /* If we have a SUBREG of a register that we are replacing and we are
4628 replacing it with a MEM, make a new MEM and try replacing the
4629 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4630 or if we would be widening it. */
4633 && ! mode_dependent_address_p (XEXP (op
, 0))
4634 /* Allow splitting of volatile memory references in case we don't
4635 have instruction to move the whole thing. */
4636 && (! MEM_VOLATILE_P (op
)
4637 || ! have_insn_for (SET
, innermode
))
4638 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
4639 return adjust_address_nv (op
, outermode
, byte
);
4641 /* Handle complex values represented as CONCAT
4642 of real and imaginary part. */
4643 if (GET_CODE (op
) == CONCAT
)
4645 unsigned int inner_size
, final_offset
;
4648 inner_size
= GET_MODE_UNIT_SIZE (innermode
);
4649 part
= byte
< inner_size
? XEXP (op
, 0) : XEXP (op
, 1);
4650 final_offset
= byte
% inner_size
;
4651 if (final_offset
+ GET_MODE_SIZE (outermode
) > inner_size
)
4654 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
4657 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
4658 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
4662 /* Optimize SUBREG truncations of zero and sign extended values. */
4663 if ((GET_CODE (op
) == ZERO_EXTEND
4664 || GET_CODE (op
) == SIGN_EXTEND
)
4665 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
4667 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
4669 /* If we're requesting the lowpart of a zero or sign extension,
4670 there are three possibilities. If the outermode is the same
4671 as the origmode, we can omit both the extension and the subreg.
4672 If the outermode is not larger than the origmode, we can apply
4673 the truncation without the extension. Finally, if the outermode
4674 is larger than the origmode, but both are integer modes, we
4675 can just extend to the appropriate mode. */
4678 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
4679 if (outermode
== origmode
)
4680 return XEXP (op
, 0);
4681 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
4682 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
4683 subreg_lowpart_offset (outermode
,
4685 if (SCALAR_INT_MODE_P (outermode
))
4686 return simplify_gen_unary (GET_CODE (op
), outermode
,
4687 XEXP (op
, 0), origmode
);
4690 /* A SUBREG resulting from a zero extension may fold to zero if
4691 it extracts higher bits that the ZERO_EXTEND's source bits. */
4692 if (GET_CODE (op
) == ZERO_EXTEND
4693 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
4694 return CONST0_RTX (outermode
);
4697 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4698 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4699 the outer subreg is effectively a truncation to the original mode. */
4700 if ((GET_CODE (op
) == LSHIFTRT
4701 || GET_CODE (op
) == ASHIFTRT
)
4702 && SCALAR_INT_MODE_P (outermode
)
4703 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4704 to avoid the possibility that an outer LSHIFTRT shifts by more
4705 than the sign extension's sign_bit_copies and introduces zeros
4706 into the high bits of the result. */
4707 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
4708 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4709 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
4710 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4711 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4712 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4713 return simplify_gen_binary (ASHIFTRT
, outermode
,
4714 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4716 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4717 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4718 the outer subreg is effectively a truncation to the original mode. */
4719 if ((GET_CODE (op
) == LSHIFTRT
4720 || GET_CODE (op
) == ASHIFTRT
)
4721 && SCALAR_INT_MODE_P (outermode
)
4722 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4723 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4724 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4725 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4726 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4727 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4728 return simplify_gen_binary (LSHIFTRT
, outermode
,
4729 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4731 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4732 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4733 the outer subreg is effectively a truncation to the original mode. */
4734 if (GET_CODE (op
) == ASHIFT
4735 && SCALAR_INT_MODE_P (outermode
)
4736 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4737 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4738 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4739 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
4740 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4741 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4742 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4743 return simplify_gen_binary (ASHIFT
, outermode
,
4744 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4749 /* Make a SUBREG operation or equivalent if it folds. */
4752 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
4753 enum machine_mode innermode
, unsigned int byte
)
4757 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
4761 if (GET_CODE (op
) == SUBREG
4762 || GET_CODE (op
) == CONCAT
4763 || GET_MODE (op
) == VOIDmode
)
4766 if (validate_subreg (outermode
, innermode
, op
, byte
))
4767 return gen_rtx_SUBREG (outermode
, op
, byte
);
4772 /* Simplify X, an rtx expression.
4774 Return the simplified expression or NULL if no simplifications
4777 This is the preferred entry point into the simplification routines;
4778 however, we still allow passes to call the more specific routines.
4780 Right now GCC has three (yes, three) major bodies of RTL simplification
4781 code that need to be unified.
4783 1. fold_rtx in cse.c. This code uses various CSE specific
4784 information to aid in RTL simplification.
4786 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4787 it uses combine specific information to aid in RTL
4790 3. The routines in this file.
4793 Long term we want to only have one body of simplification code; to
4794 get to that state I recommend the following steps:
4796 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4797 which are not pass dependent state into these routines.
4799 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4800 use this routine whenever possible.
4802 3. Allow for pass dependent state to be provided to these
4803 routines and add simplifications based on the pass dependent
4804 state. Remove code from cse.c & combine.c that becomes
4807 It will take time, but ultimately the compiler will be easier to
4808 maintain and improve. It's totally silly that when we add a
4809 simplification that it needs to be added to 4 places (3 for RTL
4810 simplification and 1 for tree simplification. */
4813 simplify_rtx (rtx x
)
4815 enum rtx_code code
= GET_CODE (x
);
4816 enum machine_mode mode
= GET_MODE (x
);
4818 switch (GET_RTX_CLASS (code
))
4821 return simplify_unary_operation (code
, mode
,
4822 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
4823 case RTX_COMM_ARITH
:
4824 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
4825 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
4827 /* Fall through.... */
4830 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
4833 case RTX_BITFIELD_OPS
:
4834 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
4835 XEXP (x
, 0), XEXP (x
, 1),
4839 case RTX_COMM_COMPARE
:
4840 return simplify_relational_operation (code
, mode
,
4841 ((GET_MODE (XEXP (x
, 0))
4843 ? GET_MODE (XEXP (x
, 0))
4844 : GET_MODE (XEXP (x
, 1))),
4850 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
4851 GET_MODE (SUBREG_REG (x
)),
4858 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4859 if (GET_CODE (XEXP (x
, 0)) == HIGH
4860 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))