1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static bool plus_minus_operand_p (rtx
);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
61 enum machine_mode
, rtx
, rtx
);
62 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
63 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode
, rtx i
)
71 return gen_int_mode (- INTVAL (i
), mode
);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode
, rtx x
)
80 unsigned HOST_WIDE_INT val
;
83 if (GET_MODE_CLASS (mode
) != MODE_INT
)
86 width
= GET_MODE_BITSIZE (mode
);
90 if (width
<= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x
) == CONST_INT
)
93 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x
) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x
) == 0)
97 val
= CONST_DOUBLE_HIGH (x
);
98 width
-= HOST_BITS_PER_WIDE_INT
;
103 if (width
< HOST_BITS_PER_WIDE_INT
)
104 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
105 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
117 /* If this simplifies, do it. */
118 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0
, op1
))
125 tem
= op0
, op0
= op1
, op1
= tem
;
127 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x
)
136 enum machine_mode cmode
;
137 HOST_WIDE_INT offset
= 0;
139 switch (GET_CODE (x
))
145 /* Handle float extensions of constant pool references. */
147 c
= avoid_constant_pool_reference (tmp
);
148 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
152 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr
= targetm
.delegitimize_address (addr
);
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr
) == CONST
168 && GET_CODE (XEXP (addr
, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST_INT
)
171 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
172 addr
= XEXP (XEXP (addr
, 0), 0);
175 if (GET_CODE (addr
) == LO_SUM
)
176 addr
= XEXP (addr
, 1);
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr
) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr
))
183 c
= get_pool_constant (addr
);
184 cmode
= get_pool_mode (addr
);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset
!= 0 || cmode
!= GET_MODE (x
))
191 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
192 if (tem
&& CONSTANT_P (tem
))
202 /* Return true if X is a MEM referencing the constant pool. */
205 constant_pool_reference_p (rtx x
)
207 return avoid_constant_pool_reference (x
) != x
;
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
214 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
215 enum machine_mode op_mode
)
219 /* If this simplifies, use it. */
220 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
223 return gen_rtx_fmt_e (code
, mode
, op
);
226 /* Likewise for ternary operations. */
229 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
230 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
234 /* If this simplifies, use it. */
235 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
239 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
246 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
247 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
251 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
255 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
262 simplify_replace_rtx (rtx x
, rtx old_rtx
, rtx new_rtx
)
264 enum rtx_code code
= GET_CODE (x
);
265 enum machine_mode mode
= GET_MODE (x
);
266 enum machine_mode op_mode
;
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
276 switch (GET_RTX_CLASS (code
))
280 op_mode
= GET_MODE (op0
);
281 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
282 if (op0
== XEXP (x
, 0))
284 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
288 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
289 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
290 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
292 return simplify_gen_binary (code
, mode
, op0
, op1
);
295 case RTX_COMM_COMPARE
:
298 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
299 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
300 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
301 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
303 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
306 case RTX_BITFIELD_OPS
:
308 op_mode
= GET_MODE (op0
);
309 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
310 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
311 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
312 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
314 if (op_mode
== VOIDmode
)
315 op_mode
= GET_MODE (op0
);
316 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
319 /* The only case we try to handle is a SUBREG. */
322 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
323 if (op0
== SUBREG_REG (x
))
325 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
326 GET_MODE (SUBREG_REG (x
)),
328 return op0
? op0
: x
;
335 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
336 if (op0
== XEXP (x
, 0))
338 return replace_equiv_address_nv (x
, op0
);
340 else if (code
== LO_SUM
)
342 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
343 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
349 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
351 return gen_rtx_LO_SUM (mode
, op0
, op1
);
353 else if (code
== REG
)
355 if (rtx_equal_p (x
, old_rtx
))
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
371 rtx op
, enum machine_mode op_mode
)
375 if (GET_CODE (op
) == CONST
)
378 trueop
= avoid_constant_pool_reference (op
);
380 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
384 return simplify_unary_operation_1 (code
, mode
, op
);
387 /* Perform some simplifications we can do even if the operands
390 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
392 enum rtx_code reversed
;
398 /* (not (not X)) == X. */
399 if (GET_CODE (op
) == NOT
)
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op
)
405 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
406 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
407 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
408 XEXP (op
, 0), XEXP (op
, 1));
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op
) == PLUS
412 && XEXP (op
, 1) == constm1_rtx
)
413 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op
) == NEG
)
417 return plus_constant (XEXP (op
, 0), -1);
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op
) == XOR
421 && GET_CODE (XEXP (op
, 1)) == CONST_INT
422 && (temp
= simplify_unary_operation (NOT
, mode
,
423 XEXP (op
, 1), mode
)) != 0)
424 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op
) == PLUS
428 && GET_CODE (XEXP (op
, 1)) == CONST_INT
429 && mode_signbit_p (mode
, XEXP (op
, 1))
430 && (temp
= simplify_unary_operation (NOT
, mode
,
431 XEXP (op
, 1), mode
)) != 0)
432 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
440 if (GET_CODE (op
) == ASHIFT
441 && XEXP (op
, 0) == const1_rtx
)
443 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
444 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
451 if (STORE_FLAG_VALUE
== -1
452 && GET_CODE (op
) == ASHIFTRT
453 && GET_CODE (XEXP (op
, 1)) == CONST_INT
454 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
455 return simplify_gen_relational (GE
, mode
, VOIDmode
,
456 XEXP (op
, 0), const0_rtx
);
459 if (GET_CODE (op
) == SUBREG
460 && subreg_lowpart_p (op
)
461 && (GET_MODE_SIZE (GET_MODE (op
))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
463 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
464 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
466 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
469 x
= gen_rtx_ROTATE (inner_mode
,
470 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
472 XEXP (SUBREG_REG (op
), 1));
473 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
481 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
483 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
484 enum machine_mode op_mode
;
486 op_mode
= GET_MODE (in1
);
487 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
489 op_mode
= GET_MODE (in2
);
490 if (op_mode
== VOIDmode
)
492 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
494 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
497 in2
= in1
; in1
= tem
;
500 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op
) == NEG
)
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op
) == PLUS
512 && XEXP (op
, 1) == const1_rtx
)
513 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op
) == NOT
)
517 return plus_constant (XEXP (op
, 0), 1);
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op
) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode
)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
527 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
529 if (GET_CODE (op
) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode
)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
535 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
537 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
539 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
544 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op
) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
552 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
553 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
559 if (GET_CODE (op
) == ASHIFT
)
561 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
563 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op
) == ASHIFTRT
569 && GET_CODE (XEXP (op
, 1)) == CONST_INT
570 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
571 return simplify_gen_binary (LSHIFTRT
, mode
,
572 XEXP (op
, 0), XEXP (op
, 1));
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op
) == LSHIFTRT
577 && GET_CODE (XEXP (op
, 1)) == CONST_INT
578 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
579 return simplify_gen_binary (ASHIFTRT
, mode
,
580 XEXP (op
, 0), XEXP (op
, 1));
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op
) == XOR
584 && XEXP (op
, 1) == const1_rtx
585 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
586 return plus_constant (XEXP (op
, 0), -1);
588 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
589 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
590 if (GET_CODE (op
) == LT
591 && XEXP (op
, 1) == const0_rtx
)
593 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
594 int isize
= GET_MODE_BITSIZE (inner
);
595 if (STORE_FLAG_VALUE
== 1)
597 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
598 GEN_INT (isize
- 1));
601 if (GET_MODE_BITSIZE (mode
) > isize
)
602 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
603 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
605 else if (STORE_FLAG_VALUE
== -1)
607 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
608 GEN_INT (isize
- 1));
611 if (GET_MODE_BITSIZE (mode
) > isize
)
612 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
613 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
619 /* We can't handle truncation to a partial integer mode here
620 because we don't know the real bitsize of the partial
622 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
625 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
626 if ((GET_CODE (op
) == SIGN_EXTEND
627 || GET_CODE (op
) == ZERO_EXTEND
)
628 && GET_MODE (XEXP (op
, 0)) == mode
)
631 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
632 (OP:SI foo:SI) if OP is NEG or ABS. */
633 if ((GET_CODE (op
) == ABS
634 || GET_CODE (op
) == NEG
)
635 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
636 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
637 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
638 return simplify_gen_unary (GET_CODE (op
), mode
,
639 XEXP (XEXP (op
, 0), 0), mode
);
641 /* (truncate:A (subreg:B (truncate:C X) 0)) is
643 if (GET_CODE (op
) == SUBREG
644 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
645 && subreg_lowpart_p (op
))
646 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
647 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
649 /* If we know that the value is already truncated, we can
650 replace the TRUNCATE with a SUBREG. Note that this is also
651 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
652 modes we just have to apply a different definition for
653 truncation. But don't do this for an (LSHIFTRT (MULT ...))
654 since this will cause problems with the umulXi3_highpart
656 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
657 GET_MODE_BITSIZE (GET_MODE (op
)))
658 ? (num_sign_bit_copies (op
, GET_MODE (op
))
659 >= (unsigned int) (GET_MODE_BITSIZE (mode
) + 1))
660 : truncated_to_mode (mode
, op
))
661 && ! (GET_CODE (op
) == LSHIFTRT
662 && GET_CODE (XEXP (op
, 0)) == MULT
))
663 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
665 /* A truncate of a comparison can be replaced with a subreg if
666 STORE_FLAG_VALUE permits. This is like the previous test,
667 but it works even if the comparison is done in a mode larger
668 than HOST_BITS_PER_WIDE_INT. */
669 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
671 && ((HOST_WIDE_INT
) STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
672 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
676 if (DECIMAL_FLOAT_MODE_P (mode
))
679 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
680 if (GET_CODE (op
) == FLOAT_EXTEND
681 && GET_MODE (XEXP (op
, 0)) == mode
)
684 /* (float_truncate:SF (float_truncate:DF foo:XF))
685 = (float_truncate:SF foo:XF).
686 This may eliminate double rounding, so it is unsafe.
688 (float_truncate:SF (float_extend:XF foo:DF))
689 = (float_truncate:SF foo:DF).
691 (float_truncate:DF (float_extend:XF foo:SF))
692 = (float_extend:SF foo:DF). */
693 if ((GET_CODE (op
) == FLOAT_TRUNCATE
694 && flag_unsafe_math_optimizations
)
695 || GET_CODE (op
) == FLOAT_EXTEND
)
696 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
698 > GET_MODE_SIZE (mode
)
699 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
703 /* (float_truncate (float x)) is (float x) */
704 if (GET_CODE (op
) == FLOAT
705 && (flag_unsafe_math_optimizations
706 || ((unsigned)significand_size (GET_MODE (op
))
707 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
708 - num_sign_bit_copies (XEXP (op
, 0),
709 GET_MODE (XEXP (op
, 0)))))))
710 return simplify_gen_unary (FLOAT
, mode
,
712 GET_MODE (XEXP (op
, 0)));
714 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
715 (OP:SF foo:SF) if OP is NEG or ABS. */
716 if ((GET_CODE (op
) == ABS
717 || GET_CODE (op
) == NEG
)
718 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
719 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
720 return simplify_gen_unary (GET_CODE (op
), mode
,
721 XEXP (XEXP (op
, 0), 0), mode
);
723 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
724 is (float_truncate:SF x). */
725 if (GET_CODE (op
) == SUBREG
726 && subreg_lowpart_p (op
)
727 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
728 return SUBREG_REG (op
);
732 if (DECIMAL_FLOAT_MODE_P (mode
))
735 /* (float_extend (float_extend x)) is (float_extend x)
737 (float_extend (float x)) is (float x) assuming that double
738 rounding can't happen.
740 if (GET_CODE (op
) == FLOAT_EXTEND
741 || (GET_CODE (op
) == FLOAT
742 && ((unsigned)significand_size (GET_MODE (op
))
743 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
744 - num_sign_bit_copies (XEXP (op
, 0),
745 GET_MODE (XEXP (op
, 0)))))))
746 return simplify_gen_unary (GET_CODE (op
), mode
,
748 GET_MODE (XEXP (op
, 0)));
753 /* (abs (neg <foo>)) -> (abs <foo>) */
754 if (GET_CODE (op
) == NEG
)
755 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
756 GET_MODE (XEXP (op
, 0)));
758 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
760 if (GET_MODE (op
) == VOIDmode
)
763 /* If operand is something known to be positive, ignore the ABS. */
764 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
765 || ((GET_MODE_BITSIZE (GET_MODE (op
))
766 <= HOST_BITS_PER_WIDE_INT
)
767 && ((nonzero_bits (op
, GET_MODE (op
))
769 << (GET_MODE_BITSIZE (GET_MODE (op
)) - 1)))
773 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
774 if (num_sign_bit_copies (op
, mode
) == GET_MODE_BITSIZE (mode
))
775 return gen_rtx_NEG (mode
, op
);
780 /* (ffs (*_extend <X>)) = (ffs <X>) */
781 if (GET_CODE (op
) == SIGN_EXTEND
782 || GET_CODE (op
) == ZERO_EXTEND
)
783 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
784 GET_MODE (XEXP (op
, 0)));
789 /* (pop* (zero_extend <X>)) = (pop* <X>) */
790 if (GET_CODE (op
) == ZERO_EXTEND
)
791 return simplify_gen_unary (code
, mode
, XEXP (op
, 0),
792 GET_MODE (XEXP (op
, 0)));
796 /* (float (sign_extend <X>)) = (float <X>). */
797 if (GET_CODE (op
) == SIGN_EXTEND
)
798 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
799 GET_MODE (XEXP (op
, 0)));
803 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
804 becomes just the MINUS if its mode is MODE. This allows
805 folding switch statements on machines using casesi (such as
807 if (GET_CODE (op
) == TRUNCATE
808 && GET_MODE (XEXP (op
, 0)) == mode
809 && GET_CODE (XEXP (op
, 0)) == MINUS
810 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
811 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
814 /* Check for a sign extension of a subreg of a promoted
815 variable, where the promotion is sign-extended, and the
816 target mode is the same as the variable's promotion. */
817 if (GET_CODE (op
) == SUBREG
818 && SUBREG_PROMOTED_VAR_P (op
)
819 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
820 && GET_MODE (XEXP (op
, 0)) == mode
)
823 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
824 if (! POINTERS_EXTEND_UNSIGNED
825 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
827 || (GET_CODE (op
) == SUBREG
828 && REG_P (SUBREG_REG (op
))
829 && REG_POINTER (SUBREG_REG (op
))
830 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
831 return convert_memory_address (Pmode
, op
);
836 /* Check for a zero extension of a subreg of a promoted
837 variable, where the promotion is zero-extended, and the
838 target mode is the same as the variable's promotion. */
839 if (GET_CODE (op
) == SUBREG
840 && SUBREG_PROMOTED_VAR_P (op
)
841 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
842 && GET_MODE (XEXP (op
, 0)) == mode
)
845 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
846 if (POINTERS_EXTEND_UNSIGNED
> 0
847 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
849 || (GET_CODE (op
) == SUBREG
850 && REG_P (SUBREG_REG (op
))
851 && REG_POINTER (SUBREG_REG (op
))
852 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
853 return convert_memory_address (Pmode
, op
);
864 /* Try to compute the value of a unary operation CODE whose output mode is to
865 be MODE with input operand OP whose mode was originally OP_MODE.
866 Return zero if the value cannot be computed. */
868 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
869 rtx op
, enum machine_mode op_mode
)
871 unsigned int width
= GET_MODE_BITSIZE (mode
);
873 if (code
== VEC_DUPLICATE
)
875 gcc_assert (VECTOR_MODE_P (mode
));
876 if (GET_MODE (op
) != VOIDmode
)
878 if (!VECTOR_MODE_P (GET_MODE (op
)))
879 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
881 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
884 if (GET_CODE (op
) == CONST_INT
|| GET_CODE (op
) == CONST_DOUBLE
885 || GET_CODE (op
) == CONST_VECTOR
)
887 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
888 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
889 rtvec v
= rtvec_alloc (n_elts
);
892 if (GET_CODE (op
) != CONST_VECTOR
)
893 for (i
= 0; i
< n_elts
; i
++)
894 RTVEC_ELT (v
, i
) = op
;
897 enum machine_mode inmode
= GET_MODE (op
);
898 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
899 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
901 gcc_assert (in_n_elts
< n_elts
);
902 gcc_assert ((n_elts
% in_n_elts
) == 0);
903 for (i
= 0; i
< n_elts
; i
++)
904 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
906 return gen_rtx_CONST_VECTOR (mode
, v
);
910 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
912 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
913 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
914 enum machine_mode opmode
= GET_MODE (op
);
915 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
916 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
917 rtvec v
= rtvec_alloc (n_elts
);
920 gcc_assert (op_n_elts
== n_elts
);
921 for (i
= 0; i
< n_elts
; i
++)
923 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
924 CONST_VECTOR_ELT (op
, i
),
925 GET_MODE_INNER (opmode
));
928 RTVEC_ELT (v
, i
) = x
;
930 return gen_rtx_CONST_VECTOR (mode
, v
);
933 /* The order of these tests is critical so that, for example, we don't
934 check the wrong mode (input vs. output) for a conversion operation,
935 such as FIX. At some point, this should be simplified. */
937 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
938 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
940 HOST_WIDE_INT hv
, lv
;
943 if (GET_CODE (op
) == CONST_INT
)
944 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
946 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
948 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
949 d
= real_value_truncate (mode
, d
);
950 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
952 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
953 && (GET_CODE (op
) == CONST_DOUBLE
954 || GET_CODE (op
) == CONST_INT
))
956 HOST_WIDE_INT hv
, lv
;
959 if (GET_CODE (op
) == CONST_INT
)
960 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
962 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
964 if (op_mode
== VOIDmode
)
966 /* We don't know how to interpret negative-looking numbers in
967 this case, so don't try to fold those. */
971 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
974 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
976 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
977 d
= real_value_truncate (mode
, d
);
978 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
981 if (GET_CODE (op
) == CONST_INT
982 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
984 HOST_WIDE_INT arg0
= INTVAL (op
);
998 val
= (arg0
>= 0 ? arg0
: - arg0
);
1002 /* Don't use ffs here. Instead, get low order bit and then its
1003 number. If arg0 is zero, this will return 0, as desired. */
1004 arg0
&= GET_MODE_MASK (mode
);
1005 val
= exact_log2 (arg0
& (- arg0
)) + 1;
1009 arg0
&= GET_MODE_MASK (mode
);
1010 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1013 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
1017 arg0
&= GET_MODE_MASK (mode
);
1020 /* Even if the value at zero is undefined, we have to come
1021 up with some replacement. Seems good enough. */
1022 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1023 val
= GET_MODE_BITSIZE (mode
);
1026 val
= exact_log2 (arg0
& -arg0
);
1030 arg0
&= GET_MODE_MASK (mode
);
1033 val
++, arg0
&= arg0
- 1;
1037 arg0
&= GET_MODE_MASK (mode
);
1040 val
++, arg0
&= arg0
- 1;
1049 /* When zero-extending a CONST_INT, we need to know its
1051 gcc_assert (op_mode
!= VOIDmode
);
1052 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1054 /* If we were really extending the mode,
1055 we would have to distinguish between zero-extension
1056 and sign-extension. */
1057 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1060 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1061 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1067 if (op_mode
== VOIDmode
)
1069 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1071 /* If we were really extending the mode,
1072 we would have to distinguish between zero-extension
1073 and sign-extension. */
1074 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1077 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1080 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1082 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
1083 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1091 case FLOAT_TRUNCATE
:
1100 return gen_int_mode (val
, mode
);
1103 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1104 for a DImode operation on a CONST_INT. */
1105 else if (GET_MODE (op
) == VOIDmode
1106 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1107 && (GET_CODE (op
) == CONST_DOUBLE
1108 || GET_CODE (op
) == CONST_INT
))
1110 unsigned HOST_WIDE_INT l1
, lv
;
1111 HOST_WIDE_INT h1
, hv
;
1113 if (GET_CODE (op
) == CONST_DOUBLE
)
1114 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1116 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1126 neg_double (l1
, h1
, &lv
, &hv
);
1131 neg_double (l1
, h1
, &lv
, &hv
);
1143 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
1146 lv
= exact_log2 (l1
& -l1
) + 1;
1152 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
1153 - HOST_BITS_PER_WIDE_INT
;
1155 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
1156 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1157 lv
= GET_MODE_BITSIZE (mode
);
1163 lv
= exact_log2 (l1
& -l1
);
1165 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
1166 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1167 lv
= GET_MODE_BITSIZE (mode
);
1190 /* This is just a change-of-mode, so do nothing. */
1195 gcc_assert (op_mode
!= VOIDmode
);
1197 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1201 lv
= l1
& GET_MODE_MASK (op_mode
);
1205 if (op_mode
== VOIDmode
1206 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1210 lv
= l1
& GET_MODE_MASK (op_mode
);
1211 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
1212 && (lv
& ((HOST_WIDE_INT
) 1
1213 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
1214 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1216 hv
= HWI_SIGN_EXTEND (lv
);
1227 return immed_double_const (lv
, hv
, mode
);
1230 else if (GET_CODE (op
) == CONST_DOUBLE
1231 && SCALAR_FLOAT_MODE_P (mode
))
1233 REAL_VALUE_TYPE d
, t
;
1234 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1239 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1241 real_sqrt (&t
, mode
, &d
);
1245 d
= REAL_VALUE_ABS (d
);
1248 d
= REAL_VALUE_NEGATE (d
);
1250 case FLOAT_TRUNCATE
:
1251 d
= real_value_truncate (mode
, d
);
1254 /* All this does is change the mode. */
1257 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1264 real_to_target (tmp
, &d
, GET_MODE (op
));
1265 for (i
= 0; i
< 4; i
++)
1267 real_from_target (&d
, tmp
, mode
);
1273 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1276 else if (GET_CODE (op
) == CONST_DOUBLE
1277 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1278 && GET_MODE_CLASS (mode
) == MODE_INT
1279 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1281 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1282 operators are intentionally left unspecified (to ease implementation
1283 by target backends), for consistency, this routine implements the
1284 same semantics for constant folding as used by the middle-end. */
1286 /* This was formerly used only for non-IEEE float.
1287 eggert@twinsun.com says it is safe for IEEE also. */
1288 HOST_WIDE_INT xh
, xl
, th
, tl
;
1289 REAL_VALUE_TYPE x
, t
;
1290 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1294 if (REAL_VALUE_ISNAN (x
))
1297 /* Test against the signed upper bound. */
1298 if (width
> HOST_BITS_PER_WIDE_INT
)
1300 th
= ((unsigned HOST_WIDE_INT
) 1
1301 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1307 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1309 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1310 if (REAL_VALUES_LESS (t
, x
))
1317 /* Test against the signed lower bound. */
1318 if (width
> HOST_BITS_PER_WIDE_INT
)
1320 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1326 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1328 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1329 if (REAL_VALUES_LESS (x
, t
))
1335 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1339 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1342 /* Test against the unsigned upper bound. */
1343 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1348 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1350 th
= ((unsigned HOST_WIDE_INT
) 1
1351 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1357 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1359 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1360 if (REAL_VALUES_LESS (t
, x
))
1367 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1373 return immed_double_const (xl
, xh
, mode
);
1379 /* Subroutine of simplify_binary_operation to simplify a commutative,
1380 associative binary operation CODE with result mode MODE, operating
1381 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1382 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1383 canonicalization is possible. */
1386 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1391 /* Linearize the operator to the left. */
1392 if (GET_CODE (op1
) == code
)
1394 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1395 if (GET_CODE (op0
) == code
)
1397 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1398 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1401 /* "a op (b op c)" becomes "(b op c) op a". */
1402 if (! swap_commutative_operands_p (op1
, op0
))
1403 return simplify_gen_binary (code
, mode
, op1
, op0
);
1410 if (GET_CODE (op0
) == code
)
1412 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1413 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1415 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1416 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1419 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1420 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1421 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1422 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1424 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1426 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1427 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1428 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1429 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1431 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1438 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1439 and OP1. Return 0 if no simplification is possible.
1441 Don't use this for relational operations such as EQ or LT.
1442 Use simplify_relational_operation instead. */
1444 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1447 rtx trueop0
, trueop1
;
1450 /* Relational operations don't work here. We must know the mode
1451 of the operands in order to do the comparison correctly.
1452 Assuming a full word can give incorrect results.
1453 Consider comparing 128 with -128 in QImode. */
1454 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1455 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1457 /* Make sure the constant is second. */
1458 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1459 && swap_commutative_operands_p (op0
, op1
))
1461 tem
= op0
, op0
= op1
, op1
= tem
;
1464 trueop0
= avoid_constant_pool_reference (op0
);
1465 trueop1
= avoid_constant_pool_reference (op1
);
1467 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1470 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1474 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1475 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1477 rtx tem
, reversed
, opleft
, opright
;
1479 unsigned int width
= GET_MODE_BITSIZE (mode
);
1481 /* Even if we can't compute a constant result,
1482 there are some cases worth simplifying. */
1487 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1488 when x is NaN, infinite, or finite and nonzero. They aren't
1489 when x is -0 and the rounding mode is not towards -infinity,
1490 since (-0) + 0 is then 0. */
1491 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1494 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1495 transformations are safe even for IEEE. */
1496 if (GET_CODE (op0
) == NEG
)
1497 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1498 else if (GET_CODE (op1
) == NEG
)
1499 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1501 /* (~a) + 1 -> -a */
1502 if (INTEGRAL_MODE_P (mode
)
1503 && GET_CODE (op0
) == NOT
1504 && trueop1
== const1_rtx
)
1505 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1507 /* Handle both-operands-constant cases. We can only add
1508 CONST_INTs to constants since the sum of relocatable symbols
1509 can't be handled by most assemblers. Don't add CONST_INT
1510 to CONST_INT since overflow won't be computed properly if wider
1511 than HOST_BITS_PER_WIDE_INT. */
1513 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1514 && GET_CODE (op1
) == CONST_INT
)
1515 return plus_constant (op0
, INTVAL (op1
));
1516 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1517 && GET_CODE (op0
) == CONST_INT
)
1518 return plus_constant (op1
, INTVAL (op0
));
1520 /* See if this is something like X * C - X or vice versa or
1521 if the multiplication is written as a shift. If so, we can
1522 distribute and make a new multiply, shift, or maybe just
1523 have X (if C is 2 in the example above). But don't make
1524 something more expensive than we had before. */
1526 if (SCALAR_INT_MODE_P (mode
))
1528 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1529 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1530 rtx lhs
= op0
, rhs
= op1
;
1532 if (GET_CODE (lhs
) == NEG
)
1536 lhs
= XEXP (lhs
, 0);
1538 else if (GET_CODE (lhs
) == MULT
1539 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1541 coeff0l
= INTVAL (XEXP (lhs
, 1));
1542 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1543 lhs
= XEXP (lhs
, 0);
1545 else if (GET_CODE (lhs
) == ASHIFT
1546 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1547 && INTVAL (XEXP (lhs
, 1)) >= 0
1548 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1550 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1552 lhs
= XEXP (lhs
, 0);
1555 if (GET_CODE (rhs
) == NEG
)
1559 rhs
= XEXP (rhs
, 0);
1561 else if (GET_CODE (rhs
) == MULT
1562 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1564 coeff1l
= INTVAL (XEXP (rhs
, 1));
1565 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1566 rhs
= XEXP (rhs
, 0);
1568 else if (GET_CODE (rhs
) == ASHIFT
1569 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1570 && INTVAL (XEXP (rhs
, 1)) >= 0
1571 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1573 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1575 rhs
= XEXP (rhs
, 0);
1578 if (rtx_equal_p (lhs
, rhs
))
1580 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1582 unsigned HOST_WIDE_INT l
;
1585 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1586 coeff
= immed_double_const (l
, h
, mode
);
1588 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1589 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1594 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1595 if ((GET_CODE (op1
) == CONST_INT
1596 || GET_CODE (op1
) == CONST_DOUBLE
)
1597 && GET_CODE (op0
) == XOR
1598 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1599 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1600 && mode_signbit_p (mode
, op1
))
1601 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1602 simplify_gen_binary (XOR
, mode
, op1
,
1605 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1606 if (GET_CODE (op0
) == MULT
1607 && GET_CODE (XEXP (op0
, 0)) == NEG
)
1611 in1
= XEXP (XEXP (op0
, 0), 0);
1612 in2
= XEXP (op0
, 1);
1613 return simplify_gen_binary (MINUS
, mode
, op1
,
1614 simplify_gen_binary (MULT
, mode
,
1618 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1619 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1621 if (COMPARISON_P (op0
)
1622 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
1623 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
1624 && (reversed
= reversed_comparison (op0
, mode
)))
1626 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
1628 /* If one of the operands is a PLUS or a MINUS, see if we can
1629 simplify this by the associative law.
1630 Don't use the associative law for floating point.
1631 The inaccuracy makes it nonassociative,
1632 and subtle programs can break if operations are associated. */
1634 if (INTEGRAL_MODE_P (mode
)
1635 && (plus_minus_operand_p (op0
)
1636 || plus_minus_operand_p (op1
))
1637 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1640 /* Reassociate floating point addition only when the user
1641 specifies unsafe math optimizations. */
1642 if (FLOAT_MODE_P (mode
)
1643 && flag_unsafe_math_optimizations
)
1645 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1653 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1654 using cc0, in which case we want to leave it as a COMPARE
1655 so we can distinguish it from a register-register-copy.
1657 In IEEE floating point, x-0 is not the same as x. */
1659 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1660 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1661 && trueop1
== CONST0_RTX (mode
))
1665 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1666 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1667 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1668 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1670 rtx xop00
= XEXP (op0
, 0);
1671 rtx xop10
= XEXP (op1
, 0);
1674 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1676 if (REG_P (xop00
) && REG_P (xop10
)
1677 && GET_MODE (xop00
) == GET_MODE (xop10
)
1678 && REGNO (xop00
) == REGNO (xop10
)
1679 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1680 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1687 /* We can't assume x-x is 0 even with non-IEEE floating point,
1688 but since it is zero except in very strange circumstances, we
1689 will treat it as zero with -funsafe-math-optimizations. */
1690 if (rtx_equal_p (trueop0
, trueop1
)
1691 && ! side_effects_p (op0
)
1692 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1693 return CONST0_RTX (mode
);
1695 /* Change subtraction from zero into negation. (0 - x) is the
1696 same as -x when x is NaN, infinite, or finite and nonzero.
1697 But if the mode has signed zeros, and does not round towards
1698 -infinity, then 0 - 0 is 0, not -0. */
1699 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1700 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1702 /* (-1 - a) is ~a. */
1703 if (trueop0
== constm1_rtx
)
1704 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1706 /* Subtracting 0 has no effect unless the mode has signed zeros
1707 and supports rounding towards -infinity. In such a case,
1709 if (!(HONOR_SIGNED_ZEROS (mode
)
1710 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1711 && trueop1
== CONST0_RTX (mode
))
1714 /* See if this is something like X * C - X or vice versa or
1715 if the multiplication is written as a shift. If so, we can
1716 distribute and make a new multiply, shift, or maybe just
1717 have X (if C is 2 in the example above). But don't make
1718 something more expensive than we had before. */
1720 if (SCALAR_INT_MODE_P (mode
))
1722 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1723 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1724 rtx lhs
= op0
, rhs
= op1
;
1726 if (GET_CODE (lhs
) == NEG
)
1730 lhs
= XEXP (lhs
, 0);
1732 else if (GET_CODE (lhs
) == MULT
1733 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1735 coeff0l
= INTVAL (XEXP (lhs
, 1));
1736 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1737 lhs
= XEXP (lhs
, 0);
1739 else if (GET_CODE (lhs
) == ASHIFT
1740 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1741 && INTVAL (XEXP (lhs
, 1)) >= 0
1742 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1744 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1746 lhs
= XEXP (lhs
, 0);
1749 if (GET_CODE (rhs
) == NEG
)
1753 rhs
= XEXP (rhs
, 0);
1755 else if (GET_CODE (rhs
) == MULT
1756 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1758 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1759 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1760 rhs
= XEXP (rhs
, 0);
1762 else if (GET_CODE (rhs
) == ASHIFT
1763 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1764 && INTVAL (XEXP (rhs
, 1)) >= 0
1765 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1767 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
1769 rhs
= XEXP (rhs
, 0);
1772 if (rtx_equal_p (lhs
, rhs
))
1774 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1776 unsigned HOST_WIDE_INT l
;
1779 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
1780 coeff
= immed_double_const (l
, h
, mode
);
1782 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1783 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1788 /* (a - (-b)) -> (a + b). True even for IEEE. */
1789 if (GET_CODE (op1
) == NEG
)
1790 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1792 /* (-x - c) may be simplified as (-c - x). */
1793 if (GET_CODE (op0
) == NEG
1794 && (GET_CODE (op1
) == CONST_INT
1795 || GET_CODE (op1
) == CONST_DOUBLE
))
1797 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1799 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1802 /* Don't let a relocatable value get a negative coeff. */
1803 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1804 return simplify_gen_binary (PLUS
, mode
,
1806 neg_const_int (mode
, op1
));
1808 /* (x - (x & y)) -> (x & ~y) */
1809 if (GET_CODE (op1
) == AND
)
1811 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1813 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1814 GET_MODE (XEXP (op1
, 1)));
1815 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1817 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1819 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1820 GET_MODE (XEXP (op1
, 0)));
1821 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1825 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1826 by reversing the comparison code if valid. */
1827 if (STORE_FLAG_VALUE
== 1
1828 && trueop0
== const1_rtx
1829 && COMPARISON_P (op1
)
1830 && (reversed
= reversed_comparison (op1
, mode
)))
1833 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1834 if (GET_CODE (op1
) == MULT
1835 && GET_CODE (XEXP (op1
, 0)) == NEG
)
1839 in1
= XEXP (XEXP (op1
, 0), 0);
1840 in2
= XEXP (op1
, 1);
1841 return simplify_gen_binary (PLUS
, mode
,
1842 simplify_gen_binary (MULT
, mode
,
1847 /* Canonicalize (minus (neg A) (mult B C)) to
1848 (minus (mult (neg B) C) A). */
1849 if (GET_CODE (op1
) == MULT
1850 && GET_CODE (op0
) == NEG
)
1854 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
1855 in2
= XEXP (op1
, 1);
1856 return simplify_gen_binary (MINUS
, mode
,
1857 simplify_gen_binary (MULT
, mode
,
1862 /* If one of the operands is a PLUS or a MINUS, see if we can
1863 simplify this by the associative law. This will, for example,
1864 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1865 Don't use the associative law for floating point.
1866 The inaccuracy makes it nonassociative,
1867 and subtle programs can break if operations are associated. */
1869 if (INTEGRAL_MODE_P (mode
)
1870 && (plus_minus_operand_p (op0
)
1871 || plus_minus_operand_p (op1
))
1872 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1877 if (trueop1
== constm1_rtx
)
1878 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1880 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1881 x is NaN, since x * 0 is then also NaN. Nor is it valid
1882 when the mode has signed zeros, since multiplying a negative
1883 number by 0 will give -0, not 0. */
1884 if (!HONOR_NANS (mode
)
1885 && !HONOR_SIGNED_ZEROS (mode
)
1886 && trueop1
== CONST0_RTX (mode
)
1887 && ! side_effects_p (op0
))
1890 /* In IEEE floating point, x*1 is not equivalent to x for
1892 if (!HONOR_SNANS (mode
)
1893 && trueop1
== CONST1_RTX (mode
))
1896 /* Convert multiply by constant power of two into shift unless
1897 we are still generating RTL. This test is a kludge. */
1898 if (GET_CODE (trueop1
) == CONST_INT
1899 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1900 /* If the mode is larger than the host word size, and the
1901 uppermost bit is set, then this isn't a power of two due
1902 to implicit sign extension. */
1903 && (width
<= HOST_BITS_PER_WIDE_INT
1904 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1905 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1907 /* Likewise for multipliers wider than a word. */
1908 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1909 && (GET_MODE (trueop1
) == VOIDmode
1910 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
1911 && GET_MODE (op0
) == mode
1912 && CONST_DOUBLE_LOW (trueop1
) == 0
1913 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
1914 return simplify_gen_binary (ASHIFT
, mode
, op0
,
1915 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
1917 /* x*2 is x+x and x*(-1) is -x */
1918 if (GET_CODE (trueop1
) == CONST_DOUBLE
1919 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
1920 && GET_MODE (op0
) == mode
)
1923 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1925 if (REAL_VALUES_EQUAL (d
, dconst2
))
1926 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1928 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1929 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1932 /* Reassociate multiplication, but for floating point MULTs
1933 only when the user specifies unsafe math optimizations. */
1934 if (! FLOAT_MODE_P (mode
)
1935 || flag_unsafe_math_optimizations
)
1937 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1944 if (trueop1
== const0_rtx
)
1946 if (GET_CODE (trueop1
) == CONST_INT
1947 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1948 == GET_MODE_MASK (mode
)))
1950 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1952 /* A | (~A) -> -1 */
1953 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1954 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1955 && ! side_effects_p (op0
)
1956 && SCALAR_INT_MODE_P (mode
))
1959 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1960 if (GET_CODE (op1
) == CONST_INT
1961 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1962 && (nonzero_bits (op0
, mode
) & ~INTVAL (op1
)) == 0)
1965 /* Convert (A & B) | A to A. */
1966 if (GET_CODE (op0
) == AND
1967 && (rtx_equal_p (XEXP (op0
, 0), op1
)
1968 || rtx_equal_p (XEXP (op0
, 1), op1
))
1969 && ! side_effects_p (XEXP (op0
, 0))
1970 && ! side_effects_p (XEXP (op0
, 1)))
1973 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1974 mode size to (rotate A CX). */
1976 if (GET_CODE (op1
) == ASHIFT
1977 || GET_CODE (op1
) == SUBREG
)
1988 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
1989 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
1990 && GET_CODE (XEXP (opleft
, 1)) == CONST_INT
1991 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
1992 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
1993 == GET_MODE_BITSIZE (mode
)))
1994 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
1996 /* Same, but for ashift that has been "simplified" to a wider mode
1997 by simplify_shift_const. */
1999 if (GET_CODE (opleft
) == SUBREG
2000 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2001 && GET_CODE (opright
) == LSHIFTRT
2002 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2003 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2004 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2005 && (GET_MODE_SIZE (GET_MODE (opleft
))
2006 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2007 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2008 SUBREG_REG (XEXP (opright
, 0)))
2009 && GET_CODE (XEXP (SUBREG_REG (opleft
), 1)) == CONST_INT
2010 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2011 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2012 == GET_MODE_BITSIZE (mode
)))
2013 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2014 XEXP (SUBREG_REG (opleft
), 1));
2016 /* If we have (ior (and (X C1) C2)), simplify this by making
2017 C1 as small as possible if C1 actually changes. */
2018 if (GET_CODE (op1
) == CONST_INT
2019 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2020 || INTVAL (op1
) > 0)
2021 && GET_CODE (op0
) == AND
2022 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2023 && GET_CODE (op1
) == CONST_INT
2024 && (INTVAL (XEXP (op0
, 1)) & INTVAL (op1
)) != 0)
2025 return simplify_gen_binary (IOR
, mode
,
2027 (AND
, mode
, XEXP (op0
, 0),
2028 GEN_INT (INTVAL (XEXP (op0
, 1))
2032 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2033 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2034 the PLUS does not affect any of the bits in OP1: then we can do
2035 the IOR as a PLUS and we can associate. This is valid if OP1
2036 can be safely shifted left C bits. */
2037 if (GET_CODE (trueop1
) == CONST_INT
&& GET_CODE (op0
) == ASHIFTRT
2038 && GET_CODE (XEXP (op0
, 0)) == PLUS
2039 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == CONST_INT
2040 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2041 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2043 int count
= INTVAL (XEXP (op0
, 1));
2044 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2046 if (mask
>> count
== INTVAL (trueop1
)
2047 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2048 return simplify_gen_binary (ASHIFTRT
, mode
,
2049 plus_constant (XEXP (op0
, 0), mask
),
2053 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2059 if (trueop1
== const0_rtx
)
2061 if (GET_CODE (trueop1
) == CONST_INT
2062 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2063 == GET_MODE_MASK (mode
)))
2064 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2065 if (rtx_equal_p (trueop0
, trueop1
)
2066 && ! side_effects_p (op0
)
2067 && GET_MODE_CLASS (mode
) != MODE_CC
)
2068 return CONST0_RTX (mode
);
2070 /* Canonicalize XOR of the most significant bit to PLUS. */
2071 if ((GET_CODE (op1
) == CONST_INT
2072 || GET_CODE (op1
) == CONST_DOUBLE
)
2073 && mode_signbit_p (mode
, op1
))
2074 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2075 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2076 if ((GET_CODE (op1
) == CONST_INT
2077 || GET_CODE (op1
) == CONST_DOUBLE
)
2078 && GET_CODE (op0
) == PLUS
2079 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
2080 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2081 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2082 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2083 simplify_gen_binary (XOR
, mode
, op1
,
2086 /* If we are XORing two things that have no bits in common,
2087 convert them into an IOR. This helps to detect rotation encoded
2088 using those methods and possibly other simplifications. */
2090 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2091 && (nonzero_bits (op0
, mode
)
2092 & nonzero_bits (op1
, mode
)) == 0)
2093 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2095 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2096 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2099 int num_negated
= 0;
2101 if (GET_CODE (op0
) == NOT
)
2102 num_negated
++, op0
= XEXP (op0
, 0);
2103 if (GET_CODE (op1
) == NOT
)
2104 num_negated
++, op1
= XEXP (op1
, 0);
2106 if (num_negated
== 2)
2107 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2108 else if (num_negated
== 1)
2109 return simplify_gen_unary (NOT
, mode
,
2110 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2114 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2115 correspond to a machine insn or result in further simplifications
2116 if B is a constant. */
2118 if (GET_CODE (op0
) == AND
2119 && rtx_equal_p (XEXP (op0
, 1), op1
)
2120 && ! side_effects_p (op1
))
2121 return simplify_gen_binary (AND
, mode
,
2122 simplify_gen_unary (NOT
, mode
,
2123 XEXP (op0
, 0), mode
),
2126 else if (GET_CODE (op0
) == AND
2127 && rtx_equal_p (XEXP (op0
, 0), op1
)
2128 && ! side_effects_p (op1
))
2129 return simplify_gen_binary (AND
, mode
,
2130 simplify_gen_unary (NOT
, mode
,
2131 XEXP (op0
, 1), mode
),
2134 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2135 comparison if STORE_FLAG_VALUE is 1. */
2136 if (STORE_FLAG_VALUE
== 1
2137 && trueop1
== const1_rtx
2138 && COMPARISON_P (op0
)
2139 && (reversed
= reversed_comparison (op0
, mode
)))
2142 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2143 is (lt foo (const_int 0)), so we can perform the above
2144 simplification if STORE_FLAG_VALUE is 1. */
2146 if (STORE_FLAG_VALUE
== 1
2147 && trueop1
== const1_rtx
2148 && GET_CODE (op0
) == LSHIFTRT
2149 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2150 && INTVAL (XEXP (op0
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
2151 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2153 /* (xor (comparison foo bar) (const_int sign-bit))
2154 when STORE_FLAG_VALUE is the sign bit. */
2155 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2156 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
2157 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
2158 && trueop1
== const_true_rtx
2159 && COMPARISON_P (op0
)
2160 && (reversed
= reversed_comparison (op0
, mode
)))
2165 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2171 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2173 /* If we are turning off bits already known off in OP0, we need
2175 if (GET_CODE (trueop1
) == CONST_INT
2176 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2177 && (nonzero_bits (trueop0
, mode
) & ~INTVAL (trueop1
)) == 0)
2179 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2180 && GET_MODE_CLASS (mode
) != MODE_CC
)
2183 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2184 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2185 && ! side_effects_p (op0
)
2186 && GET_MODE_CLASS (mode
) != MODE_CC
)
2187 return CONST0_RTX (mode
);
2189 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2190 there are no nonzero bits of C outside of X's mode. */
2191 if ((GET_CODE (op0
) == SIGN_EXTEND
2192 || GET_CODE (op0
) == ZERO_EXTEND
)
2193 && GET_CODE (trueop1
) == CONST_INT
2194 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2195 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2196 & INTVAL (trueop1
)) == 0)
2198 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2199 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2200 gen_int_mode (INTVAL (trueop1
),
2202 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2205 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2206 insn (and may simplify more). */
2207 if (GET_CODE (op0
) == XOR
2208 && rtx_equal_p (XEXP (op0
, 0), op1
)
2209 && ! side_effects_p (op1
))
2210 return simplify_gen_binary (AND
, mode
,
2211 simplify_gen_unary (NOT
, mode
,
2212 XEXP (op0
, 1), mode
),
2215 if (GET_CODE (op0
) == XOR
2216 && rtx_equal_p (XEXP (op0
, 1), op1
)
2217 && ! side_effects_p (op1
))
2218 return simplify_gen_binary (AND
, mode
,
2219 simplify_gen_unary (NOT
, mode
,
2220 XEXP (op0
, 0), mode
),
2223 /* Similarly for (~(A ^ B)) & A. */
2224 if (GET_CODE (op0
) == NOT
2225 && GET_CODE (XEXP (op0
, 0)) == XOR
2226 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2227 && ! side_effects_p (op1
))
2228 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2230 if (GET_CODE (op0
) == NOT
2231 && GET_CODE (XEXP (op0
, 0)) == XOR
2232 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2233 && ! side_effects_p (op1
))
2234 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2236 /* Convert (A | B) & A to A. */
2237 if (GET_CODE (op0
) == IOR
2238 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2239 || rtx_equal_p (XEXP (op0
, 1), op1
))
2240 && ! side_effects_p (XEXP (op0
, 0))
2241 && ! side_effects_p (XEXP (op0
, 1)))
2244 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2245 ((A & N) + B) & M -> (A + B) & M
2246 Similarly if (N & M) == 0,
2247 ((A | N) + B) & M -> (A + B) & M
2248 and for - instead of + and/or ^ instead of |. */
2249 if (GET_CODE (trueop1
) == CONST_INT
2250 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2251 && ~INTVAL (trueop1
)
2252 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
2253 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2258 pmop
[0] = XEXP (op0
, 0);
2259 pmop
[1] = XEXP (op0
, 1);
2261 for (which
= 0; which
< 2; which
++)
2264 switch (GET_CODE (tem
))
2267 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2268 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
2269 == INTVAL (trueop1
))
2270 pmop
[which
] = XEXP (tem
, 0);
2274 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2275 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
2276 pmop
[which
] = XEXP (tem
, 0);
2283 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2285 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2287 return simplify_gen_binary (code
, mode
, tem
, op1
);
2290 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2296 /* 0/x is 0 (or x&0 if x has side-effects). */
2297 if (trueop0
== CONST0_RTX (mode
))
2299 if (side_effects_p (op1
))
2300 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2304 if (trueop1
== CONST1_RTX (mode
))
2305 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2306 /* Convert divide by power of two into shift. */
2307 if (GET_CODE (trueop1
) == CONST_INT
2308 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
2309 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2313 /* Handle floating point and integers separately. */
2314 if (SCALAR_FLOAT_MODE_P (mode
))
2316 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2317 safe for modes with NaNs, since 0.0 / 0.0 will then be
2318 NaN rather than 0.0. Nor is it safe for modes with signed
2319 zeros, since dividing 0 by a negative number gives -0.0 */
2320 if (trueop0
== CONST0_RTX (mode
)
2321 && !HONOR_NANS (mode
)
2322 && !HONOR_SIGNED_ZEROS (mode
)
2323 && ! side_effects_p (op1
))
2326 if (trueop1
== CONST1_RTX (mode
)
2327 && !HONOR_SNANS (mode
))
2330 if (GET_CODE (trueop1
) == CONST_DOUBLE
2331 && trueop1
!= CONST0_RTX (mode
))
2334 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2337 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2338 && !HONOR_SNANS (mode
))
2339 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2341 /* Change FP division by a constant into multiplication.
2342 Only do this with -funsafe-math-optimizations. */
2343 if (flag_unsafe_math_optimizations
2344 && !REAL_VALUES_EQUAL (d
, dconst0
))
2346 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2347 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2348 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2354 /* 0/x is 0 (or x&0 if x has side-effects). */
2355 if (trueop0
== CONST0_RTX (mode
))
2357 if (side_effects_p (op1
))
2358 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2362 if (trueop1
== CONST1_RTX (mode
))
2363 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2365 if (trueop1
== constm1_rtx
)
2367 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2368 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2374 /* 0%x is 0 (or x&0 if x has side-effects). */
2375 if (trueop0
== CONST0_RTX (mode
))
2377 if (side_effects_p (op1
))
2378 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2381 /* x%1 is 0 (of x&0 if x has side-effects). */
2382 if (trueop1
== CONST1_RTX (mode
))
2384 if (side_effects_p (op0
))
2385 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2386 return CONST0_RTX (mode
);
2388 /* Implement modulus by power of two as AND. */
2389 if (GET_CODE (trueop1
) == CONST_INT
2390 && exact_log2 (INTVAL (trueop1
)) > 0)
2391 return simplify_gen_binary (AND
, mode
, op0
,
2392 GEN_INT (INTVAL (op1
) - 1));
2396 /* 0%x is 0 (or x&0 if x has side-effects). */
2397 if (trueop0
== CONST0_RTX (mode
))
2399 if (side_effects_p (op1
))
2400 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2403 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2404 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
2406 if (side_effects_p (op0
))
2407 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2408 return CONST0_RTX (mode
);
2415 /* Rotating ~0 always results in ~0. */
2416 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
2417 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2418 && ! side_effects_p (op1
))
2421 /* Fall through.... */
2425 if (trueop1
== CONST0_RTX (mode
))
2427 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2432 if (width
<= HOST_BITS_PER_WIDE_INT
2433 && GET_CODE (trueop1
) == CONST_INT
2434 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2435 && ! side_effects_p (op0
))
2437 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2439 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2445 if (width
<= HOST_BITS_PER_WIDE_INT
2446 && GET_CODE (trueop1
) == CONST_INT
2447 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2448 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2449 && ! side_effects_p (op0
))
2451 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2453 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2459 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2461 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2463 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2469 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2471 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2473 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2482 /* ??? There are simplifications that can be done. */
2486 if (!VECTOR_MODE_P (mode
))
2488 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2489 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2490 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2491 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2492 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
2494 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2495 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2500 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2501 gcc_assert (GET_MODE_INNER (mode
)
2502 == GET_MODE_INNER (GET_MODE (trueop0
)));
2503 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2505 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2507 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2508 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2509 rtvec v
= rtvec_alloc (n_elts
);
2512 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2513 for (i
= 0; i
< n_elts
; i
++)
2515 rtx x
= XVECEXP (trueop1
, 0, i
);
2517 gcc_assert (GET_CODE (x
) == CONST_INT
);
2518 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2522 return gen_rtx_CONST_VECTOR (mode
, v
);
2526 if (XVECLEN (trueop1
, 0) == 1
2527 && GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
2528 && GET_CODE (trueop0
) == VEC_CONCAT
)
2531 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
2533 /* Try to find the element in the VEC_CONCAT. */
2534 while (GET_MODE (vec
) != mode
2535 && GET_CODE (vec
) == VEC_CONCAT
)
2537 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
2538 if (offset
< vec_size
)
2539 vec
= XEXP (vec
, 0);
2543 vec
= XEXP (vec
, 1);
2545 vec
= avoid_constant_pool_reference (vec
);
2548 if (GET_MODE (vec
) == mode
)
2555 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2556 ? GET_MODE (trueop0
)
2557 : GET_MODE_INNER (mode
));
2558 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2559 ? GET_MODE (trueop1
)
2560 : GET_MODE_INNER (mode
));
2562 gcc_assert (VECTOR_MODE_P (mode
));
2563 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2564 == GET_MODE_SIZE (mode
));
2566 if (VECTOR_MODE_P (op0_mode
))
2567 gcc_assert (GET_MODE_INNER (mode
)
2568 == GET_MODE_INNER (op0_mode
));
2570 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2572 if (VECTOR_MODE_P (op1_mode
))
2573 gcc_assert (GET_MODE_INNER (mode
)
2574 == GET_MODE_INNER (op1_mode
));
2576 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2578 if ((GET_CODE (trueop0
) == CONST_VECTOR
2579 || GET_CODE (trueop0
) == CONST_INT
2580 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2581 && (GET_CODE (trueop1
) == CONST_VECTOR
2582 || GET_CODE (trueop1
) == CONST_INT
2583 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2585 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2586 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2587 rtvec v
= rtvec_alloc (n_elts
);
2589 unsigned in_n_elts
= 1;
2591 if (VECTOR_MODE_P (op0_mode
))
2592 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2593 for (i
= 0; i
< n_elts
; i
++)
2597 if (!VECTOR_MODE_P (op0_mode
))
2598 RTVEC_ELT (v
, i
) = trueop0
;
2600 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2604 if (!VECTOR_MODE_P (op1_mode
))
2605 RTVEC_ELT (v
, i
) = trueop1
;
2607 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2612 return gen_rtx_CONST_VECTOR (mode
, v
);
2625 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2628 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
2630 unsigned int width
= GET_MODE_BITSIZE (mode
);
2632 if (VECTOR_MODE_P (mode
)
2633 && code
!= VEC_CONCAT
2634 && GET_CODE (op0
) == CONST_VECTOR
2635 && GET_CODE (op1
) == CONST_VECTOR
)
2637 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2638 enum machine_mode op0mode
= GET_MODE (op0
);
2639 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
2640 enum machine_mode op1mode
= GET_MODE (op1
);
2641 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
2642 rtvec v
= rtvec_alloc (n_elts
);
2645 gcc_assert (op0_n_elts
== n_elts
);
2646 gcc_assert (op1_n_elts
== n_elts
);
2647 for (i
= 0; i
< n_elts
; i
++)
2649 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
2650 CONST_VECTOR_ELT (op0
, i
),
2651 CONST_VECTOR_ELT (op1
, i
));
2654 RTVEC_ELT (v
, i
) = x
;
2657 return gen_rtx_CONST_VECTOR (mode
, v
);
2660 if (VECTOR_MODE_P (mode
)
2661 && code
== VEC_CONCAT
2662 && CONSTANT_P (op0
) && CONSTANT_P (op1
))
2664 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2665 rtvec v
= rtvec_alloc (n_elts
);
2667 gcc_assert (n_elts
>= 2);
2670 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
2671 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
2673 RTVEC_ELT (v
, 0) = op0
;
2674 RTVEC_ELT (v
, 1) = op1
;
2678 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
2679 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
2682 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
2683 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
2684 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
2686 for (i
= 0; i
< op0_n_elts
; ++i
)
2687 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
2688 for (i
= 0; i
< op1_n_elts
; ++i
)
2689 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
2692 return gen_rtx_CONST_VECTOR (mode
, v
);
2695 if (SCALAR_FLOAT_MODE_P (mode
)
2696 && GET_CODE (op0
) == CONST_DOUBLE
2697 && GET_CODE (op1
) == CONST_DOUBLE
2698 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
2709 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
2711 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
2713 for (i
= 0; i
< 4; i
++)
2730 real_from_target (&r
, tmp0
, mode
);
2731 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
2735 REAL_VALUE_TYPE f0
, f1
, value
, result
;
2738 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
2739 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
2740 real_convert (&f0
, mode
, &f0
);
2741 real_convert (&f1
, mode
, &f1
);
2743 if (HONOR_SNANS (mode
)
2744 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
2748 && REAL_VALUES_EQUAL (f1
, dconst0
)
2749 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
2752 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2753 && flag_trapping_math
2754 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
2756 int s0
= REAL_VALUE_NEGATIVE (f0
);
2757 int s1
= REAL_VALUE_NEGATIVE (f1
);
2762 /* Inf + -Inf = NaN plus exception. */
2767 /* Inf - Inf = NaN plus exception. */
2772 /* Inf / Inf = NaN plus exception. */
2779 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2780 && flag_trapping_math
2781 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
2782 || (REAL_VALUE_ISINF (f1
)
2783 && REAL_VALUES_EQUAL (f0
, dconst0
))))
2784 /* Inf * 0 = NaN plus exception. */
2787 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
2789 real_convert (&result
, mode
, &value
);
2791 /* Don't constant fold this floating point operation if
2792 the result has overflowed and flag_trapping_math. */
2794 if (flag_trapping_math
2795 && MODE_HAS_INFINITIES (mode
)
2796 && REAL_VALUE_ISINF (result
)
2797 && !REAL_VALUE_ISINF (f0
)
2798 && !REAL_VALUE_ISINF (f1
))
2799 /* Overflow plus exception. */
2802 /* Don't constant fold this floating point operation if the
2803 result may dependent upon the run-time rounding mode and
2804 flag_rounding_math is set, or if GCC's software emulation
2805 is unable to accurately represent the result. */
2807 if ((flag_rounding_math
2808 || (REAL_MODE_FORMAT_COMPOSITE_P (mode
)
2809 && !flag_unsafe_math_optimizations
))
2810 && (inexact
|| !real_identical (&result
, &value
)))
2813 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
2817 /* We can fold some multi-word operations. */
2818 if (GET_MODE_CLASS (mode
) == MODE_INT
2819 && width
== HOST_BITS_PER_WIDE_INT
* 2
2820 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
2821 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
2823 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
2824 HOST_WIDE_INT h1
, h2
, hv
, ht
;
2826 if (GET_CODE (op0
) == CONST_DOUBLE
)
2827 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
2829 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
2831 if (GET_CODE (op1
) == CONST_DOUBLE
)
2832 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
2834 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
2839 /* A - B == A + (-B). */
2840 neg_double (l2
, h2
, &lv
, &hv
);
2843 /* Fall through.... */
2846 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2850 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2854 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2855 &lv
, &hv
, <
, &ht
))
2860 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2861 <
, &ht
, &lv
, &hv
))
2866 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2867 &lv
, &hv
, <
, &ht
))
2872 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2873 <
, &ht
, &lv
, &hv
))
2878 lv
= l1
& l2
, hv
= h1
& h2
;
2882 lv
= l1
| l2
, hv
= h1
| h2
;
2886 lv
= l1
^ l2
, hv
= h1
^ h2
;
2892 && ((unsigned HOST_WIDE_INT
) l1
2893 < (unsigned HOST_WIDE_INT
) l2
)))
2902 && ((unsigned HOST_WIDE_INT
) l1
2903 > (unsigned HOST_WIDE_INT
) l2
)))
2910 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
2912 && ((unsigned HOST_WIDE_INT
) l1
2913 < (unsigned HOST_WIDE_INT
) l2
)))
2920 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
2922 && ((unsigned HOST_WIDE_INT
) l1
2923 > (unsigned HOST_WIDE_INT
) l2
)))
2929 case LSHIFTRT
: case ASHIFTRT
:
2931 case ROTATE
: case ROTATERT
:
2932 if (SHIFT_COUNT_TRUNCATED
)
2933 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
2935 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
2938 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
2939 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
2941 else if (code
== ASHIFT
)
2942 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
2943 else if (code
== ROTATE
)
2944 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
2945 else /* code == ROTATERT */
2946 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
2953 return immed_double_const (lv
, hv
, mode
);
2956 if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
2957 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
2959 /* Get the integer argument values in two forms:
2960 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2962 arg0
= INTVAL (op0
);
2963 arg1
= INTVAL (op1
);
2965 if (width
< HOST_BITS_PER_WIDE_INT
)
2967 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2968 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2971 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2972 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2975 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2976 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2984 /* Compute the value of the arithmetic. */
2989 val
= arg0s
+ arg1s
;
2993 val
= arg0s
- arg1s
;
2997 val
= arg0s
* arg1s
;
3002 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3005 val
= arg0s
/ arg1s
;
3010 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3013 val
= arg0s
% arg1s
;
3018 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3021 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3026 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3029 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3047 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3048 the value is in range. We can't return any old value for
3049 out-of-range arguments because either the middle-end (via
3050 shift_truncation_mask) or the back-end might be relying on
3051 target-specific knowledge. Nor can we rely on
3052 shift_truncation_mask, since the shift might not be part of an
3053 ashlM3, lshrM3 or ashrM3 instruction. */
3054 if (SHIFT_COUNT_TRUNCATED
)
3055 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3056 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3059 val
= (code
== ASHIFT
3060 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3061 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3063 /* Sign-extend the result for arithmetic right shifts. */
3064 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3065 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
3073 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3074 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3082 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3083 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3087 /* Do nothing here. */
3091 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3095 val
= ((unsigned HOST_WIDE_INT
) arg0
3096 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3100 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3104 val
= ((unsigned HOST_WIDE_INT
) arg0
3105 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3112 /* ??? There are simplifications that can be done. */
3119 return gen_int_mode (val
, mode
);
3127 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3130 Rather than test for specific case, we do this by a brute-force method
3131 and do all possible simplifications until no more changes occur. Then
3132 we rebuild the operation. */
3134 struct simplify_plus_minus_op_data
3142 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
3144 const struct simplify_plus_minus_op_data
*d1
= p1
;
3145 const struct simplify_plus_minus_op_data
*d2
= p2
;
3148 result
= (commutative_operand_precedence (d2
->op
)
3149 - commutative_operand_precedence (d1
->op
));
3152 return d1
->ix
- d2
->ix
;
3156 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3159 struct simplify_plus_minus_op_data ops
[8];
3161 int n_ops
= 2, input_ops
= 2;
3162 int first
, changed
, canonicalized
= 0;
3165 memset (ops
, 0, sizeof ops
);
3167 /* Set up the two operands and then expand them until nothing has been
3168 changed. If we run out of room in our array, give up; this should
3169 almost never happen. */
3174 ops
[1].neg
= (code
== MINUS
);
3180 for (i
= 0; i
< n_ops
; i
++)
3182 rtx this_op
= ops
[i
].op
;
3183 int this_neg
= ops
[i
].neg
;
3184 enum rtx_code this_code
= GET_CODE (this_op
);
3193 ops
[n_ops
].op
= XEXP (this_op
, 1);
3194 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3197 ops
[i
].op
= XEXP (this_op
, 0);
3200 canonicalized
|= this_neg
;
3204 ops
[i
].op
= XEXP (this_op
, 0);
3205 ops
[i
].neg
= ! this_neg
;
3212 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3213 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3214 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3216 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3217 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3218 ops
[n_ops
].neg
= this_neg
;
3226 /* ~a -> (-a - 1) */
3229 ops
[n_ops
].op
= constm1_rtx
;
3230 ops
[n_ops
++].neg
= this_neg
;
3231 ops
[i
].op
= XEXP (this_op
, 0);
3232 ops
[i
].neg
= !this_neg
;
3241 ops
[i
].op
= neg_const_int (mode
, this_op
);
3255 gcc_assert (n_ops
>= 2);
3258 int n_constants
= 0;
3260 for (i
= 0; i
< n_ops
; i
++)
3261 if (GET_CODE (ops
[i
].op
) == CONST_INT
)
3264 if (n_constants
<= 1)
3268 /* If we only have two operands, we can avoid the loops. */
3271 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3274 /* Get the two operands. Be careful with the order, especially for
3275 the cases where code == MINUS. */
3276 if (ops
[0].neg
&& ops
[1].neg
)
3278 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3281 else if (ops
[0].neg
)
3292 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
3295 /* Now simplify each pair of operands until nothing changes. The first
3296 time through just simplify constants against each other. */
3303 for (i
= 0; i
< n_ops
- 1; i
++)
3304 for (j
= i
+ 1; j
< n_ops
; j
++)
3306 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
3307 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
3309 if (lhs
!= 0 && rhs
!= 0
3310 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
3312 enum rtx_code ncode
= PLUS
;
3318 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3320 else if (swap_commutative_operands_p (lhs
, rhs
))
3321 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3323 if ((GET_CODE (lhs
) == CONST
|| GET_CODE (lhs
) == CONST_INT
)
3324 && (GET_CODE (rhs
) == CONST
|| GET_CODE (rhs
) == CONST_INT
))
3326 rtx tem_lhs
, tem_rhs
;
3328 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
3329 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
3330 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
3332 if (tem
&& !CONSTANT_P (tem
))
3333 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
3336 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
3338 /* Reject "simplifications" that just wrap the two
3339 arguments in a CONST. Failure to do so can result
3340 in infinite recursion with simplify_binary_operation
3341 when it calls us to simplify CONST operations. */
3343 && ! (GET_CODE (tem
) == CONST
3344 && GET_CODE (XEXP (tem
, 0)) == ncode
3345 && XEXP (XEXP (tem
, 0), 0) == lhs
3346 && XEXP (XEXP (tem
, 0), 1) == rhs
)
3347 /* Don't allow -x + -1 -> ~x simplifications in the
3348 first pass. This allows us the chance to combine
3349 the -1 with other constants. */
3351 && GET_CODE (tem
) == NOT
3352 && XEXP (tem
, 0) == rhs
))
3355 if (GET_CODE (tem
) == NEG
)
3356 tem
= XEXP (tem
, 0), lneg
= !lneg
;
3357 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
3358 tem
= neg_const_int (mode
, tem
), lneg
= 0;
3362 ops
[j
].op
= NULL_RTX
;
3372 /* Pack all the operands to the lower-numbered entries. */
3373 for (i
= 0, j
= 0; j
< n_ops
; j
++)
3377 /* Stabilize sort. */
3383 /* Sort the operations based on swap_commutative_operands_p. */
3384 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
3386 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3388 && GET_CODE (ops
[1].op
) == CONST_INT
3389 && CONSTANT_P (ops
[0].op
)
3391 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
3393 /* We suppressed creation of trivial CONST expressions in the
3394 combination loop to avoid recursion. Create one manually now.
3395 The combination loop should have ensured that there is exactly
3396 one CONST_INT, and the sort will have ensured that it is last
3397 in the array and that any other constant will be next-to-last. */
3400 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
3401 && CONSTANT_P (ops
[n_ops
- 2].op
))
3403 rtx value
= ops
[n_ops
- 1].op
;
3404 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
3405 value
= neg_const_int (mode
, value
);
3406 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
3410 /* Put a non-negated operand first, if possible. */
3412 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
3415 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
3424 /* Now make the result by performing the requested operations. */
3426 for (i
= 1; i
< n_ops
; i
++)
3427 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
3428 mode
, result
, ops
[i
].op
);
3433 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3435 plus_minus_operand_p (rtx x
)
3437 return GET_CODE (x
) == PLUS
3438 || GET_CODE (x
) == MINUS
3439 || (GET_CODE (x
) == CONST
3440 && GET_CODE (XEXP (x
, 0)) == PLUS
3441 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
3442 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
3445 /* Like simplify_binary_operation except used for relational operators.
3446 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3447 not also be VOIDmode.
3449 CMP_MODE specifies in which mode the comparison is done in, so it is
3450 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3451 the operands or, if both are VOIDmode, the operands are compared in
3452 "infinite precision". */
3454 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
3455 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3457 rtx tem
, trueop0
, trueop1
;
3459 if (cmp_mode
== VOIDmode
)
3460 cmp_mode
= GET_MODE (op0
);
3461 if (cmp_mode
== VOIDmode
)
3462 cmp_mode
= GET_MODE (op1
);
3464 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
3467 if (SCALAR_FLOAT_MODE_P (mode
))
3469 if (tem
== const0_rtx
)
3470 return CONST0_RTX (mode
);
3471 #ifdef FLOAT_STORE_FLAG_VALUE
3473 REAL_VALUE_TYPE val
;
3474 val
= FLOAT_STORE_FLAG_VALUE (mode
);
3475 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
3481 if (VECTOR_MODE_P (mode
))
3483 if (tem
== const0_rtx
)
3484 return CONST0_RTX (mode
);
3485 #ifdef VECTOR_STORE_FLAG_VALUE
3490 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
3491 if (val
== NULL_RTX
)
3493 if (val
== const1_rtx
)
3494 return CONST1_RTX (mode
);
3496 units
= GET_MODE_NUNITS (mode
);
3497 v
= rtvec_alloc (units
);
3498 for (i
= 0; i
< units
; i
++)
3499 RTVEC_ELT (v
, i
) = val
;
3500 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
3510 /* For the following tests, ensure const0_rtx is op1. */
3511 if (swap_commutative_operands_p (op0
, op1
)
3512 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
3513 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
3515 /* If op0 is a compare, extract the comparison arguments from it. */
3516 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3517 return simplify_relational_operation (code
, mode
, VOIDmode
,
3518 XEXP (op0
, 0), XEXP (op0
, 1));
3520 if (mode
== VOIDmode
3521 || GET_MODE_CLASS (cmp_mode
) == MODE_CC
3525 trueop0
= avoid_constant_pool_reference (op0
);
3526 trueop1
= avoid_constant_pool_reference (op1
);
3527 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
3531 /* This part of simplify_relational_operation is only used when CMP_MODE
3532 is not in class MODE_CC (i.e. it is a real comparison).
3534 MODE is the mode of the result, while CMP_MODE specifies in which
3535 mode the comparison is done in, so it is the mode of the operands. */
3538 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
3539 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3541 enum rtx_code op0code
= GET_CODE (op0
);
3543 if (GET_CODE (op1
) == CONST_INT
)
3545 if (INTVAL (op1
) == 0 && COMPARISON_P (op0
))
3547 /* If op0 is a comparison, extract the comparison arguments
3551 if (GET_MODE (op0
) == mode
)
3552 return simplify_rtx (op0
);
3554 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
3555 XEXP (op0
, 0), XEXP (op0
, 1));
3557 else if (code
== EQ
)
3559 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
3560 if (new_code
!= UNKNOWN
)
3561 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
3562 XEXP (op0
, 0), XEXP (op0
, 1));
3567 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3568 if ((code
== EQ
|| code
== NE
)
3569 && (op0code
== PLUS
|| op0code
== MINUS
)
3571 && CONSTANT_P (XEXP (op0
, 1))
3572 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
3574 rtx x
= XEXP (op0
, 0);
3575 rtx c
= XEXP (op0
, 1);
3577 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
3579 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
3582 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3583 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3585 && op1
== const0_rtx
3586 && GET_MODE_CLASS (mode
) == MODE_INT
3587 && cmp_mode
!= VOIDmode
3588 /* ??? Work-around BImode bugs in the ia64 backend. */
3590 && cmp_mode
!= BImode
3591 && nonzero_bits (op0
, cmp_mode
) == 1
3592 && STORE_FLAG_VALUE
== 1)
3593 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
3594 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
3595 : lowpart_subreg (mode
, op0
, cmp_mode
);
3597 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3598 if ((code
== EQ
|| code
== NE
)
3599 && op1
== const0_rtx
3601 return simplify_gen_relational (code
, mode
, cmp_mode
,
3602 XEXP (op0
, 0), XEXP (op0
, 1));
3604 /* (eq/ne (xor x y) x) simplifies to (eq/ne x 0). */
3605 if ((code
== EQ
|| code
== NE
)
3607 && rtx_equal_p (XEXP (op0
, 0), op1
)
3608 && !side_effects_p (XEXP (op0
, 1)))
3609 return simplify_gen_relational (code
, mode
, cmp_mode
, op1
, const0_rtx
);
3610 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne y 0). */
3611 if ((code
== EQ
|| code
== NE
)
3613 && rtx_equal_p (XEXP (op0
, 1), op1
)
3614 && !side_effects_p (XEXP (op0
, 0)))
3615 return simplify_gen_relational (code
, mode
, cmp_mode
, op1
, const0_rtx
);
3617 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3618 if ((code
== EQ
|| code
== NE
)
3620 && (GET_CODE (op1
) == CONST_INT
3621 || GET_CODE (op1
) == CONST_DOUBLE
)
3622 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
3623 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
3624 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
3625 simplify_gen_binary (XOR
, cmp_mode
,
3626 XEXP (op0
, 1), op1
));
3631 /* Check if the given comparison (done in the given MODE) is actually a
3632 tautology or a contradiction.
3633 If no simplification is possible, this function returns zero.
3634 Otherwise, it returns either const_true_rtx or const0_rtx. */
3637 simplify_const_relational_operation (enum rtx_code code
,
3638 enum machine_mode mode
,
3641 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
3646 gcc_assert (mode
!= VOIDmode
3647 || (GET_MODE (op0
) == VOIDmode
3648 && GET_MODE (op1
) == VOIDmode
));
3650 /* If op0 is a compare, extract the comparison arguments from it. */
3651 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3653 op1
= XEXP (op0
, 1);
3654 op0
= XEXP (op0
, 0);
3656 if (GET_MODE (op0
) != VOIDmode
)
3657 mode
= GET_MODE (op0
);
3658 else if (GET_MODE (op1
) != VOIDmode
)
3659 mode
= GET_MODE (op1
);
3664 /* We can't simplify MODE_CC values since we don't know what the
3665 actual comparison is. */
3666 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
3669 /* Make sure the constant is second. */
3670 if (swap_commutative_operands_p (op0
, op1
))
3672 tem
= op0
, op0
= op1
, op1
= tem
;
3673 code
= swap_condition (code
);
3676 trueop0
= avoid_constant_pool_reference (op0
);
3677 trueop1
= avoid_constant_pool_reference (op1
);
3679 /* For integer comparisons of A and B maybe we can simplify A - B and can
3680 then simplify a comparison of that with zero. If A and B are both either
3681 a register or a CONST_INT, this can't help; testing for these cases will
3682 prevent infinite recursion here and speed things up.
3684 If CODE is an unsigned comparison, then we can never do this optimization,
3685 because it gives an incorrect result if the subtraction wraps around zero.
3686 ANSI C defines unsigned operations such that they never overflow, and
3687 thus such cases can not be ignored; but we cannot do it even for
3688 signed comparisons for languages such as Java, so test flag_wrapv. */
3690 if (!flag_wrapv
&& INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
3691 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
3692 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
3693 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
3694 /* We cannot do this for == or != if tem is a nonzero address. */
3695 && ((code
!= EQ
&& code
!= NE
) || ! nonzero_address_p (tem
))
3696 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
3697 return simplify_const_relational_operation (signed_condition (code
),
3698 mode
, tem
, const0_rtx
);
3700 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
3701 return const_true_rtx
;
3703 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
3706 /* For modes without NaNs, if the two operands are equal, we know the
3707 result except if they have side-effects. */
3708 if (! HONOR_NANS (GET_MODE (trueop0
))
3709 && rtx_equal_p (trueop0
, trueop1
)
3710 && ! side_effects_p (trueop0
))
3711 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
3713 /* If the operands are floating-point constants, see if we can fold
3715 else if (GET_CODE (trueop0
) == CONST_DOUBLE
3716 && GET_CODE (trueop1
) == CONST_DOUBLE
3717 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
3719 REAL_VALUE_TYPE d0
, d1
;
3721 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
3722 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
3724 /* Comparisons are unordered iff at least one of the values is NaN. */
3725 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
3735 return const_true_rtx
;
3748 equal
= REAL_VALUES_EQUAL (d0
, d1
);
3749 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
3750 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
3753 /* Otherwise, see if the operands are both integers. */
3754 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
3755 && (GET_CODE (trueop0
) == CONST_DOUBLE
3756 || GET_CODE (trueop0
) == CONST_INT
)
3757 && (GET_CODE (trueop1
) == CONST_DOUBLE
3758 || GET_CODE (trueop1
) == CONST_INT
))
3760 int width
= GET_MODE_BITSIZE (mode
);
3761 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
3762 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
3764 /* Get the two words comprising each integer constant. */
3765 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
3767 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
3768 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
3772 l0u
= l0s
= INTVAL (trueop0
);
3773 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
3776 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
3778 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
3779 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
3783 l1u
= l1s
= INTVAL (trueop1
);
3784 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
3787 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3788 we have to sign or zero-extend the values. */
3789 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
3791 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3792 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3794 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3795 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3797 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3798 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3800 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
3801 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
3803 equal
= (h0u
== h1u
&& l0u
== l1u
);
3804 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
3805 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
3806 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
3807 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
3810 /* Otherwise, there are some code-specific tests we can make. */
3813 /* Optimize comparisons with upper and lower bounds. */
3814 if (SCALAR_INT_MODE_P (mode
)
3815 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3828 get_mode_bounds (mode
, sign
, mode
, &mmin
, &mmax
);
3835 /* x >= min is always true. */
3836 if (rtx_equal_p (trueop1
, mmin
))
3837 tem
= const_true_rtx
;
3843 /* x <= max is always true. */
3844 if (rtx_equal_p (trueop1
, mmax
))
3845 tem
= const_true_rtx
;
3850 /* x > max is always false. */
3851 if (rtx_equal_p (trueop1
, mmax
))
3857 /* x < min is always false. */
3858 if (rtx_equal_p (trueop1
, mmin
))
3865 if (tem
== const0_rtx
3866 || tem
== const_true_rtx
)
3873 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3878 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3879 return const_true_rtx
;
3883 /* Optimize abs(x) < 0.0. */
3884 if (trueop1
== CONST0_RTX (mode
)
3885 && !HONOR_SNANS (mode
)
3886 && !(flag_wrapv
&& INTEGRAL_MODE_P (mode
)))
3888 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3890 if (GET_CODE (tem
) == ABS
)
3896 /* Optimize abs(x) >= 0.0. */
3897 if (trueop1
== CONST0_RTX (mode
)
3898 && !HONOR_NANS (mode
)
3899 && !(flag_wrapv
&& INTEGRAL_MODE_P (mode
)))
3901 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3903 if (GET_CODE (tem
) == ABS
)
3904 return const_true_rtx
;
3909 /* Optimize ! (abs(x) < 0.0). */
3910 if (trueop1
== CONST0_RTX (mode
))
3912 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3914 if (GET_CODE (tem
) == ABS
)
3915 return const_true_rtx
;
3926 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3932 return equal
? const_true_rtx
: const0_rtx
;
3935 return ! equal
? const_true_rtx
: const0_rtx
;
3938 return op0lt
? const_true_rtx
: const0_rtx
;
3941 return op1lt
? const_true_rtx
: const0_rtx
;
3943 return op0ltu
? const_true_rtx
: const0_rtx
;
3945 return op1ltu
? const_true_rtx
: const0_rtx
;
3948 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
3951 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
3953 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
3955 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
3957 return const_true_rtx
;
3965 /* Simplify CODE, an operation with result mode MODE and three operands,
3966 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3967 a constant. Return 0 if no simplifications is possible. */
3970 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
3971 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
3974 unsigned int width
= GET_MODE_BITSIZE (mode
);
3976 /* VOIDmode means "infinite" precision. */
3978 width
= HOST_BITS_PER_WIDE_INT
;
3984 if (GET_CODE (op0
) == CONST_INT
3985 && GET_CODE (op1
) == CONST_INT
3986 && GET_CODE (op2
) == CONST_INT
3987 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
3988 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
3990 /* Extracting a bit-field from a constant */
3991 HOST_WIDE_INT val
= INTVAL (op0
);
3993 if (BITS_BIG_ENDIAN
)
3994 val
>>= (GET_MODE_BITSIZE (op0_mode
)
3995 - INTVAL (op2
) - INTVAL (op1
));
3997 val
>>= INTVAL (op2
);
3999 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
4001 /* First zero-extend. */
4002 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
4003 /* If desired, propagate sign bit. */
4004 if (code
== SIGN_EXTRACT
4005 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
4006 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
4009 /* Clear the bits that don't belong in our mode,
4010 unless they and our sign bit are all one.
4011 So we get either a reasonable negative value or a reasonable
4012 unsigned value for this mode. */
4013 if (width
< HOST_BITS_PER_WIDE_INT
4014 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
4015 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
4016 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4018 return gen_int_mode (val
, mode
);
4023 if (GET_CODE (op0
) == CONST_INT
)
4024 return op0
!= const0_rtx
? op1
: op2
;
4026 /* Convert c ? a : a into "a". */
4027 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4030 /* Convert a != b ? a : b into "a". */
4031 if (GET_CODE (op0
) == NE
4032 && ! side_effects_p (op0
)
4033 && ! HONOR_NANS (mode
)
4034 && ! HONOR_SIGNED_ZEROS (mode
)
4035 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4036 && rtx_equal_p (XEXP (op0
, 1), op2
))
4037 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4038 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4041 /* Convert a == b ? a : b into "b". */
4042 if (GET_CODE (op0
) == EQ
4043 && ! side_effects_p (op0
)
4044 && ! HONOR_NANS (mode
)
4045 && ! HONOR_SIGNED_ZEROS (mode
)
4046 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4047 && rtx_equal_p (XEXP (op0
, 1), op2
))
4048 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4049 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4052 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
4054 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
4055 ? GET_MODE (XEXP (op0
, 1))
4056 : GET_MODE (XEXP (op0
, 0)));
4059 /* Look for happy constants in op1 and op2. */
4060 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
4062 HOST_WIDE_INT t
= INTVAL (op1
);
4063 HOST_WIDE_INT f
= INTVAL (op2
);
4065 if (t
== STORE_FLAG_VALUE
&& f
== 0)
4066 code
= GET_CODE (op0
);
4067 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
4070 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
4078 return simplify_gen_relational (code
, mode
, cmp_mode
,
4079 XEXP (op0
, 0), XEXP (op0
, 1));
4082 if (cmp_mode
== VOIDmode
)
4083 cmp_mode
= op0_mode
;
4084 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
4085 cmp_mode
, XEXP (op0
, 0),
4088 /* See if any simplifications were possible. */
4091 if (GET_CODE (temp
) == CONST_INT
)
4092 return temp
== const0_rtx
? op2
: op1
;
4094 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
4100 gcc_assert (GET_MODE (op0
) == mode
);
4101 gcc_assert (GET_MODE (op1
) == mode
);
4102 gcc_assert (VECTOR_MODE_P (mode
));
4103 op2
= avoid_constant_pool_reference (op2
);
4104 if (GET_CODE (op2
) == CONST_INT
)
4106 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
4107 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
4108 int mask
= (1 << n_elts
) - 1;
4110 if (!(INTVAL (op2
) & mask
))
4112 if ((INTVAL (op2
) & mask
) == mask
)
4115 op0
= avoid_constant_pool_reference (op0
);
4116 op1
= avoid_constant_pool_reference (op1
);
4117 if (GET_CODE (op0
) == CONST_VECTOR
4118 && GET_CODE (op1
) == CONST_VECTOR
)
4120 rtvec v
= rtvec_alloc (n_elts
);
4123 for (i
= 0; i
< n_elts
; i
++)
4124 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
4125 ? CONST_VECTOR_ELT (op0
, i
)
4126 : CONST_VECTOR_ELT (op1
, i
));
4127 return gen_rtx_CONST_VECTOR (mode
, v
);
4139 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4140 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4142 Works by unpacking OP into a collection of 8-bit values
4143 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4144 and then repacking them again for OUTERMODE. */
4147 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
4148 enum machine_mode innermode
, unsigned int byte
)
4150 /* We support up to 512-bit values (for V8DFmode). */
4154 value_mask
= (1 << value_bit
) - 1
4156 unsigned char value
[max_bitsize
/ value_bit
];
4165 rtvec result_v
= NULL
;
4166 enum mode_class outer_class
;
4167 enum machine_mode outer_submode
;
4169 /* Some ports misuse CCmode. */
4170 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
4173 /* We have no way to represent a complex constant at the rtl level. */
4174 if (COMPLEX_MODE_P (outermode
))
4177 /* Unpack the value. */
4179 if (GET_CODE (op
) == CONST_VECTOR
)
4181 num_elem
= CONST_VECTOR_NUNITS (op
);
4182 elems
= &CONST_VECTOR_ELT (op
, 0);
4183 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
4189 elem_bitsize
= max_bitsize
;
4191 /* If this asserts, it is too complicated; reducing value_bit may help. */
4192 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
4193 /* I don't know how to handle endianness of sub-units. */
4194 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
4196 for (elem
= 0; elem
< num_elem
; elem
++)
4199 rtx el
= elems
[elem
];
4201 /* Vectors are kept in target memory order. (This is probably
4204 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4205 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4207 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4208 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4209 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4210 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4211 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4214 switch (GET_CODE (el
))
4218 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4220 *vp
++ = INTVAL (el
) >> i
;
4221 /* CONST_INTs are always logically sign-extended. */
4222 for (; i
< elem_bitsize
; i
+= value_bit
)
4223 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
4227 if (GET_MODE (el
) == VOIDmode
)
4229 /* If this triggers, someone should have generated a
4230 CONST_INT instead. */
4231 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
4233 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4234 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
4235 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
4238 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
4241 /* It shouldn't matter what's done here, so fill it with
4243 for (; i
< elem_bitsize
; i
+= value_bit
)
4248 long tmp
[max_bitsize
/ 32];
4249 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
4251 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
4252 gcc_assert (bitsize
<= elem_bitsize
);
4253 gcc_assert (bitsize
% value_bit
== 0);
4255 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
4258 /* real_to_target produces its result in words affected by
4259 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4260 and use WORDS_BIG_ENDIAN instead; see the documentation
4261 of SUBREG in rtl.texi. */
4262 for (i
= 0; i
< bitsize
; i
+= value_bit
)
4265 if (WORDS_BIG_ENDIAN
)
4266 ibase
= bitsize
- 1 - i
;
4269 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
4272 /* It shouldn't matter what's done here, so fill it with
4274 for (; i
< elem_bitsize
; i
+= value_bit
)
4284 /* Now, pick the right byte to start with. */
4285 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4286 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4287 will already have offset 0. */
4288 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
4290 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
4292 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4293 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4294 byte
= (subword_byte
% UNITS_PER_WORD
4295 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4298 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4299 so if it's become negative it will instead be very large.) */
4300 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4302 /* Convert from bytes to chunks of size value_bit. */
4303 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
4305 /* Re-pack the value. */
4307 if (VECTOR_MODE_P (outermode
))
4309 num_elem
= GET_MODE_NUNITS (outermode
);
4310 result_v
= rtvec_alloc (num_elem
);
4311 elems
= &RTVEC_ELT (result_v
, 0);
4312 outer_submode
= GET_MODE_INNER (outermode
);
4318 outer_submode
= outermode
;
4321 outer_class
= GET_MODE_CLASS (outer_submode
);
4322 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
4324 gcc_assert (elem_bitsize
% value_bit
== 0);
4325 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
4327 for (elem
= 0; elem
< num_elem
; elem
++)
4331 /* Vectors are stored in target memory order. (This is probably
4334 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4335 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4337 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4338 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4339 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4340 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4341 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4344 switch (outer_class
)
4347 case MODE_PARTIAL_INT
:
4349 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
4352 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4354 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
4355 for (; i
< elem_bitsize
; i
+= value_bit
)
4356 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
4357 << (i
- HOST_BITS_PER_WIDE_INT
));
4359 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4361 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4362 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
4363 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
4364 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
4371 case MODE_DECIMAL_FLOAT
:
4374 long tmp
[max_bitsize
/ 32];
4376 /* real_from_target wants its input in words affected by
4377 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4378 and use WORDS_BIG_ENDIAN instead; see the documentation
4379 of SUBREG in rtl.texi. */
4380 for (i
= 0; i
< max_bitsize
/ 32; i
++)
4382 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4385 if (WORDS_BIG_ENDIAN
)
4386 ibase
= elem_bitsize
- 1 - i
;
4389 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
4392 real_from_target (&r
, tmp
, outer_submode
);
4393 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
4401 if (VECTOR_MODE_P (outermode
))
4402 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
4407 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4408 Return 0 if no simplifications are possible. */
4410 simplify_subreg (enum machine_mode outermode
, rtx op
,
4411 enum machine_mode innermode
, unsigned int byte
)
4413 /* Little bit of sanity checking. */
4414 gcc_assert (innermode
!= VOIDmode
);
4415 gcc_assert (outermode
!= VOIDmode
);
4416 gcc_assert (innermode
!= BLKmode
);
4417 gcc_assert (outermode
!= BLKmode
);
4419 gcc_assert (GET_MODE (op
) == innermode
4420 || GET_MODE (op
) == VOIDmode
);
4422 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
4423 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4425 if (outermode
== innermode
&& !byte
)
4428 if (GET_CODE (op
) == CONST_INT
4429 || GET_CODE (op
) == CONST_DOUBLE
4430 || GET_CODE (op
) == CONST_VECTOR
)
4431 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
4433 /* Changing mode twice with SUBREG => just change it once,
4434 or not at all if changing back op starting mode. */
4435 if (GET_CODE (op
) == SUBREG
)
4437 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
4438 int final_offset
= byte
+ SUBREG_BYTE (op
);
4441 if (outermode
== innermostmode
4442 && byte
== 0 && SUBREG_BYTE (op
) == 0)
4443 return SUBREG_REG (op
);
4445 /* The SUBREG_BYTE represents offset, as if the value were stored
4446 in memory. Irritating exception is paradoxical subreg, where
4447 we define SUBREG_BYTE to be 0. On big endian machines, this
4448 value should be negative. For a moment, undo this exception. */
4449 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
4451 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
4452 if (WORDS_BIG_ENDIAN
)
4453 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4454 if (BYTES_BIG_ENDIAN
)
4455 final_offset
+= difference
% UNITS_PER_WORD
;
4457 if (SUBREG_BYTE (op
) == 0
4458 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
4460 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
4461 if (WORDS_BIG_ENDIAN
)
4462 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4463 if (BYTES_BIG_ENDIAN
)
4464 final_offset
+= difference
% UNITS_PER_WORD
;
4467 /* See whether resulting subreg will be paradoxical. */
4468 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
4470 /* In nonparadoxical subregs we can't handle negative offsets. */
4471 if (final_offset
< 0)
4473 /* Bail out in case resulting subreg would be incorrect. */
4474 if (final_offset
% GET_MODE_SIZE (outermode
)
4475 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
4481 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
4483 /* In paradoxical subreg, see if we are still looking on lower part.
4484 If so, our SUBREG_BYTE will be 0. */
4485 if (WORDS_BIG_ENDIAN
)
4486 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4487 if (BYTES_BIG_ENDIAN
)
4488 offset
+= difference
% UNITS_PER_WORD
;
4489 if (offset
== final_offset
)
4495 /* Recurse for further possible simplifications. */
4496 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
4500 if (validate_subreg (outermode
, innermostmode
,
4501 SUBREG_REG (op
), final_offset
))
4502 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
4506 /* Merge implicit and explicit truncations. */
4508 if (GET_CODE (op
) == TRUNCATE
4509 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
4510 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
4511 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
4512 GET_MODE (XEXP (op
, 0)));
4514 /* SUBREG of a hard register => just change the register number
4515 and/or mode. If the hard register is not valid in that mode,
4516 suppress this simplification. If the hard register is the stack,
4517 frame, or argument pointer, leave this as a SUBREG. */
4520 && REGNO (op
) < FIRST_PSEUDO_REGISTER
4521 #ifdef CANNOT_CHANGE_MODE_CLASS
4522 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
4523 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
4524 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
4526 && ((reload_completed
&& !frame_pointer_needed
)
4527 || (REGNO (op
) != FRAME_POINTER_REGNUM
4528 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4529 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
4532 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4533 && REGNO (op
) != ARG_POINTER_REGNUM
4535 && REGNO (op
) != STACK_POINTER_REGNUM
4536 && subreg_offset_representable_p (REGNO (op
), innermode
,
4539 unsigned int regno
= REGNO (op
);
4540 unsigned int final_regno
4541 = regno
+ subreg_regno_offset (regno
, innermode
, byte
, outermode
);
4543 /* ??? We do allow it if the current REG is not valid for
4544 its mode. This is a kludge to work around how float/complex
4545 arguments are passed on 32-bit SPARC and should be fixed. */
4546 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
4547 || ! HARD_REGNO_MODE_OK (regno
, innermode
))
4549 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
4551 /* Propagate original regno. We don't have any way to specify
4552 the offset inside original regno, so do so only for lowpart.
4553 The information is used only by alias analysis that can not
4554 grog partial register anyway. */
4556 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
4557 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
4562 /* If we have a SUBREG of a register that we are replacing and we are
4563 replacing it with a MEM, make a new MEM and try replacing the
4564 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4565 or if we would be widening it. */
4568 && ! mode_dependent_address_p (XEXP (op
, 0))
4569 /* Allow splitting of volatile memory references in case we don't
4570 have instruction to move the whole thing. */
4571 && (! MEM_VOLATILE_P (op
)
4572 || ! have_insn_for (SET
, innermode
))
4573 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
4574 return adjust_address_nv (op
, outermode
, byte
);
4576 /* Handle complex values represented as CONCAT
4577 of real and imaginary part. */
4578 if (GET_CODE (op
) == CONCAT
)
4580 unsigned int inner_size
, final_offset
;
4583 inner_size
= GET_MODE_UNIT_SIZE (innermode
);
4584 part
= byte
< inner_size
? XEXP (op
, 0) : XEXP (op
, 1);
4585 final_offset
= byte
% inner_size
;
4586 if (final_offset
+ GET_MODE_SIZE (outermode
) > inner_size
)
4589 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
4592 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
4593 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
4597 /* Optimize SUBREG truncations of zero and sign extended values. */
4598 if ((GET_CODE (op
) == ZERO_EXTEND
4599 || GET_CODE (op
) == SIGN_EXTEND
)
4600 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
4602 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
4604 /* If we're requesting the lowpart of a zero or sign extension,
4605 there are three possibilities. If the outermode is the same
4606 as the origmode, we can omit both the extension and the subreg.
4607 If the outermode is not larger than the origmode, we can apply
4608 the truncation without the extension. Finally, if the outermode
4609 is larger than the origmode, but both are integer modes, we
4610 can just extend to the appropriate mode. */
4613 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
4614 if (outermode
== origmode
)
4615 return XEXP (op
, 0);
4616 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
4617 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
4618 subreg_lowpart_offset (outermode
,
4620 if (SCALAR_INT_MODE_P (outermode
))
4621 return simplify_gen_unary (GET_CODE (op
), outermode
,
4622 XEXP (op
, 0), origmode
);
4625 /* A SUBREG resulting from a zero extension may fold to zero if
4626 it extracts higher bits that the ZERO_EXTEND's source bits. */
4627 if (GET_CODE (op
) == ZERO_EXTEND
4628 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
4629 return CONST0_RTX (outermode
);
4632 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4633 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4634 the outer subreg is effectively a truncation to the original mode. */
4635 if ((GET_CODE (op
) == LSHIFTRT
4636 || GET_CODE (op
) == ASHIFTRT
)
4637 && SCALAR_INT_MODE_P (outermode
)
4638 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4639 to avoid the possibility that an outer LSHIFTRT shifts by more
4640 than the sign extension's sign_bit_copies and introduces zeros
4641 into the high bits of the result. */
4642 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
4643 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4644 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
4645 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4646 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4647 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4648 return simplify_gen_binary (ASHIFTRT
, outermode
,
4649 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4651 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4652 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4653 the outer subreg is effectively a truncation to the original mode. */
4654 if ((GET_CODE (op
) == LSHIFTRT
4655 || GET_CODE (op
) == ASHIFTRT
)
4656 && SCALAR_INT_MODE_P (outermode
)
4657 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4658 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4659 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4660 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4661 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4662 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4663 return simplify_gen_binary (LSHIFTRT
, outermode
,
4664 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4666 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4667 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4668 the outer subreg is effectively a truncation to the original mode. */
4669 if (GET_CODE (op
) == ASHIFT
4670 && SCALAR_INT_MODE_P (outermode
)
4671 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4672 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4673 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4674 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
4675 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4676 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4677 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4678 return simplify_gen_binary (ASHIFT
, outermode
,
4679 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4684 /* Make a SUBREG operation or equivalent if it folds. */
4687 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
4688 enum machine_mode innermode
, unsigned int byte
)
4692 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
4696 if (GET_CODE (op
) == SUBREG
4697 || GET_CODE (op
) == CONCAT
4698 || GET_MODE (op
) == VOIDmode
)
4701 if (validate_subreg (outermode
, innermode
, op
, byte
))
4702 return gen_rtx_SUBREG (outermode
, op
, byte
);
4707 /* Simplify X, an rtx expression.
4709 Return the simplified expression or NULL if no simplifications
4712 This is the preferred entry point into the simplification routines;
4713 however, we still allow passes to call the more specific routines.
4715 Right now GCC has three (yes, three) major bodies of RTL simplification
4716 code that need to be unified.
4718 1. fold_rtx in cse.c. This code uses various CSE specific
4719 information to aid in RTL simplification.
4721 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4722 it uses combine specific information to aid in RTL
4725 3. The routines in this file.
4728 Long term we want to only have one body of simplification code; to
4729 get to that state I recommend the following steps:
4731 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4732 which are not pass dependent state into these routines.
4734 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4735 use this routine whenever possible.
4737 3. Allow for pass dependent state to be provided to these
4738 routines and add simplifications based on the pass dependent
4739 state. Remove code from cse.c & combine.c that becomes
4742 It will take time, but ultimately the compiler will be easier to
4743 maintain and improve. It's totally silly that when we add a
4744 simplification that it needs to be added to 4 places (3 for RTL
4745 simplification and 1 for tree simplification. */
4748 simplify_rtx (rtx x
)
4750 enum rtx_code code
= GET_CODE (x
);
4751 enum machine_mode mode
= GET_MODE (x
);
4753 switch (GET_RTX_CLASS (code
))
4756 return simplify_unary_operation (code
, mode
,
4757 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
4758 case RTX_COMM_ARITH
:
4759 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
4760 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
4762 /* Fall through.... */
4765 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
4768 case RTX_BITFIELD_OPS
:
4769 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
4770 XEXP (x
, 0), XEXP (x
, 1),
4774 case RTX_COMM_COMPARE
:
4775 return simplify_relational_operation (code
, mode
,
4776 ((GET_MODE (XEXP (x
, 0))
4778 ? GET_MODE (XEXP (x
, 0))
4779 : GET_MODE (XEXP (x
, 1))),
4785 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
4786 GET_MODE (SUBREG_REG (x
)),
4793 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4794 if (GET_CODE (XEXP (x
, 0)) == HIGH
4795 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))