1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
53 static bool plus_minus_operand_p (const_rtx
);
54 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
61 enum machine_mode
, rtx
, rtx
);
62 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
63 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode
, const_rtx i
)
71 return gen_int_mode (- INTVAL (i
), mode
);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
80 unsigned HOST_WIDE_INT val
;
83 if (GET_MODE_CLASS (mode
) != MODE_INT
)
86 width
= GET_MODE_BITSIZE (mode
);
90 if (width
<= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x
) == CONST_INT
)
93 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x
) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x
) == 0)
97 val
= CONST_DOUBLE_HIGH (x
);
98 width
-= HOST_BITS_PER_WIDE_INT
;
103 if (width
< HOST_BITS_PER_WIDE_INT
)
104 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
105 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
117 /* If this simplifies, do it. */
118 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0
, op1
))
125 tem
= op0
, op0
= op1
, op1
= tem
;
127 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x
)
136 enum machine_mode cmode
;
137 HOST_WIDE_INT offset
= 0;
139 switch (GET_CODE (x
))
145 /* Handle float extensions of constant pool references. */
147 c
= avoid_constant_pool_reference (tmp
);
148 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
152 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
161 if (GET_MODE (x
) == BLKmode
)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr
= targetm
.delegitimize_address (addr
);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr
) == CONST
171 && GET_CODE (XEXP (addr
, 0)) == PLUS
172 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST_INT
)
174 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
175 addr
= XEXP (XEXP (addr
, 0), 0);
178 if (GET_CODE (addr
) == LO_SUM
)
179 addr
= XEXP (addr
, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr
) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr
))
186 c
= get_pool_constant (addr
);
187 cmode
= get_pool_mode (addr
);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset
!= 0 || cmode
!= GET_MODE (x
))
194 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
195 if (tem
&& CONSTANT_P (tem
))
205 /* Make a unary operation by first seeing if it folds and otherwise making
206 the specified operation. */
209 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
210 enum machine_mode op_mode
)
214 /* If this simplifies, use it. */
215 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
218 return gen_rtx_fmt_e (code
, mode
, op
);
221 /* Likewise for ternary operations. */
224 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
225 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
229 /* If this simplifies, use it. */
230 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
234 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
237 /* Likewise, for relational operations.
238 CMP_MODE specifies mode comparison is done in. */
241 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
242 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
246 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
250 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
253 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
257 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
259 enum rtx_code code
= GET_CODE (x
);
260 enum machine_mode mode
= GET_MODE (x
);
261 enum machine_mode op_mode
;
264 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
265 to build a new expression substituting recursively. If we can't do
266 anything, return our input. */
271 switch (GET_RTX_CLASS (code
))
275 op_mode
= GET_MODE (op0
);
276 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
277 if (op0
== XEXP (x
, 0))
279 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
283 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
284 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
285 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
287 return simplify_gen_binary (code
, mode
, op0
, op1
);
290 case RTX_COMM_COMPARE
:
293 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
294 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
295 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
296 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
298 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
301 case RTX_BITFIELD_OPS
:
303 op_mode
= GET_MODE (op0
);
304 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
305 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
306 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
307 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
309 if (op_mode
== VOIDmode
)
310 op_mode
= GET_MODE (op0
);
311 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
314 /* The only case we try to handle is a SUBREG. */
317 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
318 if (op0
== SUBREG_REG (x
))
320 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
321 GET_MODE (SUBREG_REG (x
)),
323 return op0
? op0
: x
;
330 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
331 if (op0
== XEXP (x
, 0))
333 return replace_equiv_address_nv (x
, op0
);
335 else if (code
== LO_SUM
)
337 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
338 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
340 /* (lo_sum (high x) x) -> x */
341 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
344 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
346 return gen_rtx_LO_SUM (mode
, op0
, op1
);
348 else if (code
== REG
)
350 if (rtx_equal_p (x
, old_rtx
))
361 /* Try to simplify a unary operation CODE whose output mode is to be
362 MODE with input operand OP whose mode was originally OP_MODE.
363 Return zero if no simplification can be made. */
365 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
366 rtx op
, enum machine_mode op_mode
)
370 if (GET_CODE (op
) == CONST
)
373 trueop
= avoid_constant_pool_reference (op
);
375 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
379 return simplify_unary_operation_1 (code
, mode
, op
);
382 /* Perform some simplifications we can do even if the operands
385 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
387 enum rtx_code reversed
;
393 /* (not (not X)) == X. */
394 if (GET_CODE (op
) == NOT
)
397 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
398 comparison is all ones. */
399 if (COMPARISON_P (op
)
400 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
401 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
402 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
403 XEXP (op
, 0), XEXP (op
, 1));
405 /* (not (plus X -1)) can become (neg X). */
406 if (GET_CODE (op
) == PLUS
407 && XEXP (op
, 1) == constm1_rtx
)
408 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
410 /* Similarly, (not (neg X)) is (plus X -1). */
411 if (GET_CODE (op
) == NEG
)
412 return plus_constant (XEXP (op
, 0), -1);
414 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
415 if (GET_CODE (op
) == XOR
416 && GET_CODE (XEXP (op
, 1)) == CONST_INT
417 && (temp
= simplify_unary_operation (NOT
, mode
,
418 XEXP (op
, 1), mode
)) != 0)
419 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
421 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
422 if (GET_CODE (op
) == PLUS
423 && GET_CODE (XEXP (op
, 1)) == CONST_INT
424 && mode_signbit_p (mode
, XEXP (op
, 1))
425 && (temp
= simplify_unary_operation (NOT
, mode
,
426 XEXP (op
, 1), mode
)) != 0)
427 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
430 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
431 operands other than 1, but that is not valid. We could do a
432 similar simplification for (not (lshiftrt C X)) where C is
433 just the sign bit, but this doesn't seem common enough to
435 if (GET_CODE (op
) == ASHIFT
436 && XEXP (op
, 0) == const1_rtx
)
438 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
439 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
446 if (STORE_FLAG_VALUE
== -1
447 && GET_CODE (op
) == ASHIFTRT
448 && GET_CODE (XEXP (op
, 1)) == CONST_INT
449 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
450 return simplify_gen_relational (GE
, mode
, VOIDmode
,
451 XEXP (op
, 0), const0_rtx
);
454 if (GET_CODE (op
) == SUBREG
455 && subreg_lowpart_p (op
)
456 && (GET_MODE_SIZE (GET_MODE (op
))
457 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
458 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
459 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
461 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
464 x
= gen_rtx_ROTATE (inner_mode
,
465 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
467 XEXP (SUBREG_REG (op
), 1));
468 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
471 /* Apply De Morgan's laws to reduce number of patterns for machines
472 with negating logical insns (and-not, nand, etc.). If result has
473 only one NOT, put it first, since that is how the patterns are
476 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
478 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
479 enum machine_mode op_mode
;
481 op_mode
= GET_MODE (in1
);
482 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
484 op_mode
= GET_MODE (in2
);
485 if (op_mode
== VOIDmode
)
487 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
489 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
492 in2
= in1
; in1
= tem
;
495 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
501 /* (neg (neg X)) == X. */
502 if (GET_CODE (op
) == NEG
)
505 /* (neg (plus X 1)) can become (not X). */
506 if (GET_CODE (op
) == PLUS
507 && XEXP (op
, 1) == const1_rtx
)
508 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
510 /* Similarly, (neg (not X)) is (plus X 1). */
511 if (GET_CODE (op
) == NOT
)
512 return plus_constant (XEXP (op
, 0), 1);
514 /* (neg (minus X Y)) can become (minus Y X). This transformation
515 isn't safe for modes with signed zeros, since if X and Y are
516 both +0, (minus Y X) is the same as (minus X Y). If the
517 rounding mode is towards +infinity (or -infinity) then the two
518 expressions will be rounded differently. */
519 if (GET_CODE (op
) == MINUS
520 && !HONOR_SIGNED_ZEROS (mode
)
521 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
522 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
524 if (GET_CODE (op
) == PLUS
525 && !HONOR_SIGNED_ZEROS (mode
)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
528 /* (neg (plus A C)) is simplified to (minus -C A). */
529 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
530 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
532 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
534 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
537 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
538 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
539 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
542 /* (neg (mult A B)) becomes (mult (neg A) B).
543 This works even for floating-point values. */
544 if (GET_CODE (op
) == MULT
545 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
547 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
548 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
551 /* NEG commutes with ASHIFT since it is multiplication. Only do
552 this if we can then eliminate the NEG (e.g., if the operand
554 if (GET_CODE (op
) == ASHIFT
)
556 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
558 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
561 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
562 C is equal to the width of MODE minus 1. */
563 if (GET_CODE (op
) == ASHIFTRT
564 && GET_CODE (XEXP (op
, 1)) == CONST_INT
565 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
566 return simplify_gen_binary (LSHIFTRT
, mode
,
567 XEXP (op
, 0), XEXP (op
, 1));
569 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
570 C is equal to the width of MODE minus 1. */
571 if (GET_CODE (op
) == LSHIFTRT
572 && GET_CODE (XEXP (op
, 1)) == CONST_INT
573 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
574 return simplify_gen_binary (ASHIFTRT
, mode
,
575 XEXP (op
, 0), XEXP (op
, 1));
577 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
578 if (GET_CODE (op
) == XOR
579 && XEXP (op
, 1) == const1_rtx
580 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
581 return plus_constant (XEXP (op
, 0), -1);
583 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
584 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
585 if (GET_CODE (op
) == LT
586 && XEXP (op
, 1) == const0_rtx
587 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
589 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
590 int isize
= GET_MODE_BITSIZE (inner
);
591 if (STORE_FLAG_VALUE
== 1)
593 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
594 GEN_INT (isize
- 1));
597 if (GET_MODE_BITSIZE (mode
) > isize
)
598 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
599 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
601 else if (STORE_FLAG_VALUE
== -1)
603 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
604 GEN_INT (isize
- 1));
607 if (GET_MODE_BITSIZE (mode
) > isize
)
608 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
609 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
615 /* We can't handle truncation to a partial integer mode here
616 because we don't know the real bitsize of the partial
618 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
622 if ((GET_CODE (op
) == SIGN_EXTEND
623 || GET_CODE (op
) == ZERO_EXTEND
)
624 && GET_MODE (XEXP (op
, 0)) == mode
)
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
628 (OP:SI foo:SI) if OP is NEG or ABS. */
629 if ((GET_CODE (op
) == ABS
630 || GET_CODE (op
) == NEG
)
631 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
632 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
633 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
634 return simplify_gen_unary (GET_CODE (op
), mode
,
635 XEXP (XEXP (op
, 0), 0), mode
);
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is
639 if (GET_CODE (op
) == SUBREG
640 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
641 && subreg_lowpart_p (op
))
642 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
643 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
645 /* If we know that the value is already truncated, we can
646 replace the TRUNCATE with a SUBREG. Note that this is also
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
648 modes we just have to apply a different definition for
649 truncation. But don't do this for an (LSHIFTRT (MULT ...))
650 since this will cause problems with the umulXi3_highpart
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
653 GET_MODE_BITSIZE (GET_MODE (op
)))
654 ? (num_sign_bit_copies (op
, GET_MODE (op
))
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op
))
656 - GET_MODE_BITSIZE (mode
)))
657 : truncated_to_mode (mode
, op
))
658 && ! (GET_CODE (op
) == LSHIFTRT
659 && GET_CODE (XEXP (op
, 0)) == MULT
))
660 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
662 /* A truncate of a comparison can be replaced with a subreg if
663 STORE_FLAG_VALUE permits. This is like the previous test,
664 but it works even if the comparison is done in a mode larger
665 than HOST_BITS_PER_WIDE_INT. */
666 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
668 && ((HOST_WIDE_INT
) STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
669 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
673 if (DECIMAL_FLOAT_MODE_P (mode
))
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
677 if (GET_CODE (op
) == FLOAT_EXTEND
678 && GET_MODE (XEXP (op
, 0)) == mode
)
681 /* (float_truncate:SF (float_truncate:DF foo:XF))
682 = (float_truncate:SF foo:XF).
683 This may eliminate double rounding, so it is unsafe.
685 (float_truncate:SF (float_extend:XF foo:DF))
686 = (float_truncate:SF foo:DF).
688 (float_truncate:DF (float_extend:XF foo:SF))
689 = (float_extend:SF foo:DF). */
690 if ((GET_CODE (op
) == FLOAT_TRUNCATE
691 && flag_unsafe_math_optimizations
)
692 || GET_CODE (op
) == FLOAT_EXTEND
)
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
695 > GET_MODE_SIZE (mode
)
696 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
700 /* (float_truncate (float x)) is (float x) */
701 if (GET_CODE (op
) == FLOAT
702 && (flag_unsafe_math_optimizations
703 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
704 && ((unsigned)significand_size (GET_MODE (op
))
705 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
706 - num_sign_bit_copies (XEXP (op
, 0),
707 GET_MODE (XEXP (op
, 0))))))))
708 return simplify_gen_unary (FLOAT
, mode
,
710 GET_MODE (XEXP (op
, 0)));
712 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
713 (OP:SF foo:SF) if OP is NEG or ABS. */
714 if ((GET_CODE (op
) == ABS
715 || GET_CODE (op
) == NEG
)
716 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
717 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
718 return simplify_gen_unary (GET_CODE (op
), mode
,
719 XEXP (XEXP (op
, 0), 0), mode
);
721 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
722 is (float_truncate:SF x). */
723 if (GET_CODE (op
) == SUBREG
724 && subreg_lowpart_p (op
)
725 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
726 return SUBREG_REG (op
);
730 if (DECIMAL_FLOAT_MODE_P (mode
))
733 /* (float_extend (float_extend x)) is (float_extend x)
735 (float_extend (float x)) is (float x) assuming that double
736 rounding can't happen.
738 if (GET_CODE (op
) == FLOAT_EXTEND
739 || (GET_CODE (op
) == FLOAT
740 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
741 && ((unsigned)significand_size (GET_MODE (op
))
742 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
743 - num_sign_bit_copies (XEXP (op
, 0),
744 GET_MODE (XEXP (op
, 0)))))))
745 return simplify_gen_unary (GET_CODE (op
), mode
,
747 GET_MODE (XEXP (op
, 0)));
752 /* (abs (neg <foo>)) -> (abs <foo>) */
753 if (GET_CODE (op
) == NEG
)
754 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
755 GET_MODE (XEXP (op
, 0)));
757 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
759 if (GET_MODE (op
) == VOIDmode
)
762 /* If operand is something known to be positive, ignore the ABS. */
763 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
764 || ((GET_MODE_BITSIZE (GET_MODE (op
))
765 <= HOST_BITS_PER_WIDE_INT
)
766 && ((nonzero_bits (op
, GET_MODE (op
))
768 << (GET_MODE_BITSIZE (GET_MODE (op
)) - 1)))
772 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
773 if (num_sign_bit_copies (op
, mode
) == GET_MODE_BITSIZE (mode
))
774 return gen_rtx_NEG (mode
, op
);
779 /* (ffs (*_extend <X>)) = (ffs <X>) */
780 if (GET_CODE (op
) == SIGN_EXTEND
781 || GET_CODE (op
) == ZERO_EXTEND
)
782 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
783 GET_MODE (XEXP (op
, 0)));
787 switch (GET_CODE (op
))
791 /* (popcount (zero_extend <X>)) = (popcount <X>) */
792 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
793 GET_MODE (XEXP (op
, 0)));
797 /* Rotations don't affect popcount. */
798 if (!side_effects_p (XEXP (op
, 1)))
799 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
800 GET_MODE (XEXP (op
, 0)));
809 switch (GET_CODE (op
))
815 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
816 GET_MODE (XEXP (op
, 0)));
820 /* Rotations don't affect parity. */
821 if (!side_effects_p (XEXP (op
, 1)))
822 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
823 GET_MODE (XEXP (op
, 0)));
832 /* (bswap (bswap x)) -> x. */
833 if (GET_CODE (op
) == BSWAP
)
838 /* (float (sign_extend <X>)) = (float <X>). */
839 if (GET_CODE (op
) == SIGN_EXTEND
)
840 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
841 GET_MODE (XEXP (op
, 0)));
845 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
846 becomes just the MINUS if its mode is MODE. This allows
847 folding switch statements on machines using casesi (such as
849 if (GET_CODE (op
) == TRUNCATE
850 && GET_MODE (XEXP (op
, 0)) == mode
851 && GET_CODE (XEXP (op
, 0)) == MINUS
852 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
853 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
856 /* Check for a sign extension of a subreg of a promoted
857 variable, where the promotion is sign-extended, and the
858 target mode is the same as the variable's promotion. */
859 if (GET_CODE (op
) == SUBREG
860 && SUBREG_PROMOTED_VAR_P (op
)
861 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
862 && GET_MODE (XEXP (op
, 0)) == mode
)
865 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
866 if (! POINTERS_EXTEND_UNSIGNED
867 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
869 || (GET_CODE (op
) == SUBREG
870 && REG_P (SUBREG_REG (op
))
871 && REG_POINTER (SUBREG_REG (op
))
872 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
873 return convert_memory_address (Pmode
, op
);
878 /* Check for a zero extension of a subreg of a promoted
879 variable, where the promotion is zero-extended, and the
880 target mode is the same as the variable's promotion. */
881 if (GET_CODE (op
) == SUBREG
882 && SUBREG_PROMOTED_VAR_P (op
)
883 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
884 && GET_MODE (XEXP (op
, 0)) == mode
)
887 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
888 if (POINTERS_EXTEND_UNSIGNED
> 0
889 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
891 || (GET_CODE (op
) == SUBREG
892 && REG_P (SUBREG_REG (op
))
893 && REG_POINTER (SUBREG_REG (op
))
894 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
895 return convert_memory_address (Pmode
, op
);
906 /* Try to compute the value of a unary operation CODE whose output mode is to
907 be MODE with input operand OP whose mode was originally OP_MODE.
908 Return zero if the value cannot be computed. */
910 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
911 rtx op
, enum machine_mode op_mode
)
913 unsigned int width
= GET_MODE_BITSIZE (mode
);
915 if (code
== VEC_DUPLICATE
)
917 gcc_assert (VECTOR_MODE_P (mode
));
918 if (GET_MODE (op
) != VOIDmode
)
920 if (!VECTOR_MODE_P (GET_MODE (op
)))
921 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
923 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
926 if (GET_CODE (op
) == CONST_INT
|| GET_CODE (op
) == CONST_DOUBLE
927 || GET_CODE (op
) == CONST_VECTOR
)
929 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
930 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
931 rtvec v
= rtvec_alloc (n_elts
);
934 if (GET_CODE (op
) != CONST_VECTOR
)
935 for (i
= 0; i
< n_elts
; i
++)
936 RTVEC_ELT (v
, i
) = op
;
939 enum machine_mode inmode
= GET_MODE (op
);
940 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
941 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
943 gcc_assert (in_n_elts
< n_elts
);
944 gcc_assert ((n_elts
% in_n_elts
) == 0);
945 for (i
= 0; i
< n_elts
; i
++)
946 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
948 return gen_rtx_CONST_VECTOR (mode
, v
);
952 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
954 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
955 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
956 enum machine_mode opmode
= GET_MODE (op
);
957 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
958 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
959 rtvec v
= rtvec_alloc (n_elts
);
962 gcc_assert (op_n_elts
== n_elts
);
963 for (i
= 0; i
< n_elts
; i
++)
965 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
966 CONST_VECTOR_ELT (op
, i
),
967 GET_MODE_INNER (opmode
));
970 RTVEC_ELT (v
, i
) = x
;
972 return gen_rtx_CONST_VECTOR (mode
, v
);
975 /* The order of these tests is critical so that, for example, we don't
976 check the wrong mode (input vs. output) for a conversion operation,
977 such as FIX. At some point, this should be simplified. */
979 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
980 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
982 HOST_WIDE_INT hv
, lv
;
985 if (GET_CODE (op
) == CONST_INT
)
986 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
988 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
990 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
991 d
= real_value_truncate (mode
, d
);
992 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
994 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
995 && (GET_CODE (op
) == CONST_DOUBLE
996 || GET_CODE (op
) == CONST_INT
))
998 HOST_WIDE_INT hv
, lv
;
1001 if (GET_CODE (op
) == CONST_INT
)
1002 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1004 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1006 if (op_mode
== VOIDmode
)
1008 /* We don't know how to interpret negative-looking numbers in
1009 this case, so don't try to fold those. */
1013 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
1016 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1018 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1019 d
= real_value_truncate (mode
, d
);
1020 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1023 if (GET_CODE (op
) == CONST_INT
1024 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1026 HOST_WIDE_INT arg0
= INTVAL (op
);
1040 val
= (arg0
>= 0 ? arg0
: - arg0
);
1044 /* Don't use ffs here. Instead, get low order bit and then its
1045 number. If arg0 is zero, this will return 0, as desired. */
1046 arg0
&= GET_MODE_MASK (mode
);
1047 val
= exact_log2 (arg0
& (- arg0
)) + 1;
1051 arg0
&= GET_MODE_MASK (mode
);
1052 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1055 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
1059 arg0
&= GET_MODE_MASK (mode
);
1062 /* Even if the value at zero is undefined, we have to come
1063 up with some replacement. Seems good enough. */
1064 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1065 val
= GET_MODE_BITSIZE (mode
);
1068 val
= exact_log2 (arg0
& -arg0
);
1072 arg0
&= GET_MODE_MASK (mode
);
1075 val
++, arg0
&= arg0
- 1;
1079 arg0
&= GET_MODE_MASK (mode
);
1082 val
++, arg0
&= arg0
- 1;
1091 for (s
= 0; s
< width
; s
+= 8)
1093 unsigned int d
= width
- s
- 8;
1094 unsigned HOST_WIDE_INT byte
;
1095 byte
= (arg0
>> s
) & 0xff;
1106 /* When zero-extending a CONST_INT, we need to know its
1108 gcc_assert (op_mode
!= VOIDmode
);
1109 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1111 /* If we were really extending the mode,
1112 we would have to distinguish between zero-extension
1113 and sign-extension. */
1114 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1117 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1118 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1124 if (op_mode
== VOIDmode
)
1126 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1128 /* If we were really extending the mode,
1129 we would have to distinguish between zero-extension
1130 and sign-extension. */
1131 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1134 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1137 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1139 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
1140 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1148 case FLOAT_TRUNCATE
:
1159 return gen_int_mode (val
, mode
);
1162 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1163 for a DImode operation on a CONST_INT. */
1164 else if (GET_MODE (op
) == VOIDmode
1165 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1166 && (GET_CODE (op
) == CONST_DOUBLE
1167 || GET_CODE (op
) == CONST_INT
))
1169 unsigned HOST_WIDE_INT l1
, lv
;
1170 HOST_WIDE_INT h1
, hv
;
1172 if (GET_CODE (op
) == CONST_DOUBLE
)
1173 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1175 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1185 neg_double (l1
, h1
, &lv
, &hv
);
1190 neg_double (l1
, h1
, &lv
, &hv
);
1202 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
1205 lv
= exact_log2 (l1
& -l1
) + 1;
1211 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
1212 - HOST_BITS_PER_WIDE_INT
;
1214 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
1215 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1216 lv
= GET_MODE_BITSIZE (mode
);
1222 lv
= exact_log2 (l1
& -l1
);
1224 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
1225 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1226 lv
= GET_MODE_BITSIZE (mode
);
1254 for (s
= 0; s
< width
; s
+= 8)
1256 unsigned int d
= width
- s
- 8;
1257 unsigned HOST_WIDE_INT byte
;
1259 if (s
< HOST_BITS_PER_WIDE_INT
)
1260 byte
= (l1
>> s
) & 0xff;
1262 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1264 if (d
< HOST_BITS_PER_WIDE_INT
)
1267 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1273 /* This is just a change-of-mode, so do nothing. */
1278 gcc_assert (op_mode
!= VOIDmode
);
1280 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1284 lv
= l1
& GET_MODE_MASK (op_mode
);
1288 if (op_mode
== VOIDmode
1289 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1293 lv
= l1
& GET_MODE_MASK (op_mode
);
1294 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
1295 && (lv
& ((HOST_WIDE_INT
) 1
1296 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
1297 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1299 hv
= HWI_SIGN_EXTEND (lv
);
1310 return immed_double_const (lv
, hv
, mode
);
1313 else if (GET_CODE (op
) == CONST_DOUBLE
1314 && SCALAR_FLOAT_MODE_P (mode
))
1316 REAL_VALUE_TYPE d
, t
;
1317 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1322 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1324 real_sqrt (&t
, mode
, &d
);
1328 d
= REAL_VALUE_ABS (d
);
1331 d
= REAL_VALUE_NEGATE (d
);
1333 case FLOAT_TRUNCATE
:
1334 d
= real_value_truncate (mode
, d
);
1337 /* All this does is change the mode. */
1340 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1347 real_to_target (tmp
, &d
, GET_MODE (op
));
1348 for (i
= 0; i
< 4; i
++)
1350 real_from_target (&d
, tmp
, mode
);
1356 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1359 else if (GET_CODE (op
) == CONST_DOUBLE
1360 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1361 && GET_MODE_CLASS (mode
) == MODE_INT
1362 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1364 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1365 operators are intentionally left unspecified (to ease implementation
1366 by target backends), for consistency, this routine implements the
1367 same semantics for constant folding as used by the middle-end. */
1369 /* This was formerly used only for non-IEEE float.
1370 eggert@twinsun.com says it is safe for IEEE also. */
1371 HOST_WIDE_INT xh
, xl
, th
, tl
;
1372 REAL_VALUE_TYPE x
, t
;
1373 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1377 if (REAL_VALUE_ISNAN (x
))
1380 /* Test against the signed upper bound. */
1381 if (width
> HOST_BITS_PER_WIDE_INT
)
1383 th
= ((unsigned HOST_WIDE_INT
) 1
1384 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1390 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1392 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1393 if (REAL_VALUES_LESS (t
, x
))
1400 /* Test against the signed lower bound. */
1401 if (width
> HOST_BITS_PER_WIDE_INT
)
1403 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1409 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1411 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1412 if (REAL_VALUES_LESS (x
, t
))
1418 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1422 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1425 /* Test against the unsigned upper bound. */
1426 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1431 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1433 th
= ((unsigned HOST_WIDE_INT
) 1
1434 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1440 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1442 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1443 if (REAL_VALUES_LESS (t
, x
))
1450 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1456 return immed_double_const (xl
, xh
, mode
);
1462 /* Subroutine of simplify_binary_operation to simplify a commutative,
1463 associative binary operation CODE with result mode MODE, operating
1464 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1465 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1466 canonicalization is possible. */
1469 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1474 /* Linearize the operator to the left. */
1475 if (GET_CODE (op1
) == code
)
1477 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1478 if (GET_CODE (op0
) == code
)
1480 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1481 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1484 /* "a op (b op c)" becomes "(b op c) op a". */
1485 if (! swap_commutative_operands_p (op1
, op0
))
1486 return simplify_gen_binary (code
, mode
, op1
, op0
);
1493 if (GET_CODE (op0
) == code
)
1495 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1496 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1498 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1499 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1502 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1503 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1505 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1507 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1508 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1510 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1517 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1518 and OP1. Return 0 if no simplification is possible.
1520 Don't use this for relational operations such as EQ or LT.
1521 Use simplify_relational_operation instead. */
1523 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1526 rtx trueop0
, trueop1
;
1529 /* Relational operations don't work here. We must know the mode
1530 of the operands in order to do the comparison correctly.
1531 Assuming a full word can give incorrect results.
1532 Consider comparing 128 with -128 in QImode. */
1533 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1534 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1536 /* Make sure the constant is second. */
1537 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1538 && swap_commutative_operands_p (op0
, op1
))
1540 tem
= op0
, op0
= op1
, op1
= tem
;
1543 trueop0
= avoid_constant_pool_reference (op0
);
1544 trueop1
= avoid_constant_pool_reference (op1
);
1546 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1549 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1552 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1553 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1554 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1555 actual constants. */
1558 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1559 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1561 rtx tem
, reversed
, opleft
, opright
;
1563 unsigned int width
= GET_MODE_BITSIZE (mode
);
1565 /* Even if we can't compute a constant result,
1566 there are some cases worth simplifying. */
1571 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1572 when x is NaN, infinite, or finite and nonzero. They aren't
1573 when x is -0 and the rounding mode is not towards -infinity,
1574 since (-0) + 0 is then 0. */
1575 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1578 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1579 transformations are safe even for IEEE. */
1580 if (GET_CODE (op0
) == NEG
)
1581 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1582 else if (GET_CODE (op1
) == NEG
)
1583 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1585 /* (~a) + 1 -> -a */
1586 if (INTEGRAL_MODE_P (mode
)
1587 && GET_CODE (op0
) == NOT
1588 && trueop1
== const1_rtx
)
1589 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1591 /* Handle both-operands-constant cases. We can only add
1592 CONST_INTs to constants since the sum of relocatable symbols
1593 can't be handled by most assemblers. Don't add CONST_INT
1594 to CONST_INT since overflow won't be computed properly if wider
1595 than HOST_BITS_PER_WIDE_INT. */
1597 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1598 && GET_CODE (op1
) == CONST_INT
)
1599 return plus_constant (op0
, INTVAL (op1
));
1600 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1601 && GET_CODE (op0
) == CONST_INT
)
1602 return plus_constant (op1
, INTVAL (op0
));
1604 /* See if this is something like X * C - X or vice versa or
1605 if the multiplication is written as a shift. If so, we can
1606 distribute and make a new multiply, shift, or maybe just
1607 have X (if C is 2 in the example above). But don't make
1608 something more expensive than we had before. */
1610 if (SCALAR_INT_MODE_P (mode
))
1612 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1613 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1614 rtx lhs
= op0
, rhs
= op1
;
1616 if (GET_CODE (lhs
) == NEG
)
1620 lhs
= XEXP (lhs
, 0);
1622 else if (GET_CODE (lhs
) == MULT
1623 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1625 coeff0l
= INTVAL (XEXP (lhs
, 1));
1626 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1627 lhs
= XEXP (lhs
, 0);
1629 else if (GET_CODE (lhs
) == ASHIFT
1630 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1631 && INTVAL (XEXP (lhs
, 1)) >= 0
1632 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1634 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1636 lhs
= XEXP (lhs
, 0);
1639 if (GET_CODE (rhs
) == NEG
)
1643 rhs
= XEXP (rhs
, 0);
1645 else if (GET_CODE (rhs
) == MULT
1646 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1648 coeff1l
= INTVAL (XEXP (rhs
, 1));
1649 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1650 rhs
= XEXP (rhs
, 0);
1652 else if (GET_CODE (rhs
) == ASHIFT
1653 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1654 && INTVAL (XEXP (rhs
, 1)) >= 0
1655 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1657 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1659 rhs
= XEXP (rhs
, 0);
1662 if (rtx_equal_p (lhs
, rhs
))
1664 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1666 unsigned HOST_WIDE_INT l
;
1669 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1670 coeff
= immed_double_const (l
, h
, mode
);
1672 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1673 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1678 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1679 if ((GET_CODE (op1
) == CONST_INT
1680 || GET_CODE (op1
) == CONST_DOUBLE
)
1681 && GET_CODE (op0
) == XOR
1682 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1683 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1684 && mode_signbit_p (mode
, op1
))
1685 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1686 simplify_gen_binary (XOR
, mode
, op1
,
1689 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1690 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1691 && GET_CODE (op0
) == MULT
1692 && GET_CODE (XEXP (op0
, 0)) == NEG
)
1696 in1
= XEXP (XEXP (op0
, 0), 0);
1697 in2
= XEXP (op0
, 1);
1698 return simplify_gen_binary (MINUS
, mode
, op1
,
1699 simplify_gen_binary (MULT
, mode
,
1703 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1704 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1706 if (COMPARISON_P (op0
)
1707 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
1708 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
1709 && (reversed
= reversed_comparison (op0
, mode
)))
1711 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
1713 /* If one of the operands is a PLUS or a MINUS, see if we can
1714 simplify this by the associative law.
1715 Don't use the associative law for floating point.
1716 The inaccuracy makes it nonassociative,
1717 and subtle programs can break if operations are associated. */
1719 if (INTEGRAL_MODE_P (mode
)
1720 && (plus_minus_operand_p (op0
)
1721 || plus_minus_operand_p (op1
))
1722 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1725 /* Reassociate floating point addition only when the user
1726 specifies associative math operations. */
1727 if (FLOAT_MODE_P (mode
)
1728 && flag_associative_math
)
1730 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1738 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1739 using cc0, in which case we want to leave it as a COMPARE
1740 so we can distinguish it from a register-register-copy.
1742 In IEEE floating point, x-0 is not the same as x. */
1744 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1745 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1746 && trueop1
== CONST0_RTX (mode
))
1750 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1751 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1752 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1753 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1755 rtx xop00
= XEXP (op0
, 0);
1756 rtx xop10
= XEXP (op1
, 0);
1759 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1761 if (REG_P (xop00
) && REG_P (xop10
)
1762 && GET_MODE (xop00
) == GET_MODE (xop10
)
1763 && REGNO (xop00
) == REGNO (xop10
)
1764 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1765 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1772 /* We can't assume x-x is 0 even with non-IEEE floating point,
1773 but since it is zero except in very strange circumstances, we
1774 will treat it as zero with -ffinite-math-only. */
1775 if (rtx_equal_p (trueop0
, trueop1
)
1776 && ! side_effects_p (op0
)
1777 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
1778 return CONST0_RTX (mode
);
1780 /* Change subtraction from zero into negation. (0 - x) is the
1781 same as -x when x is NaN, infinite, or finite and nonzero.
1782 But if the mode has signed zeros, and does not round towards
1783 -infinity, then 0 - 0 is 0, not -0. */
1784 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1785 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1787 /* (-1 - a) is ~a. */
1788 if (trueop0
== constm1_rtx
)
1789 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1791 /* Subtracting 0 has no effect unless the mode has signed zeros
1792 and supports rounding towards -infinity. In such a case,
1794 if (!(HONOR_SIGNED_ZEROS (mode
)
1795 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1796 && trueop1
== CONST0_RTX (mode
))
1799 /* See if this is something like X * C - X or vice versa or
1800 if the multiplication is written as a shift. If so, we can
1801 distribute and make a new multiply, shift, or maybe just
1802 have X (if C is 2 in the example above). But don't make
1803 something more expensive than we had before. */
1805 if (SCALAR_INT_MODE_P (mode
))
1807 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1808 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1809 rtx lhs
= op0
, rhs
= op1
;
1811 if (GET_CODE (lhs
) == NEG
)
1815 lhs
= XEXP (lhs
, 0);
1817 else if (GET_CODE (lhs
) == MULT
1818 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1820 coeff0l
= INTVAL (XEXP (lhs
, 1));
1821 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1822 lhs
= XEXP (lhs
, 0);
1824 else if (GET_CODE (lhs
) == ASHIFT
1825 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1826 && INTVAL (XEXP (lhs
, 1)) >= 0
1827 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1829 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1831 lhs
= XEXP (lhs
, 0);
1834 if (GET_CODE (rhs
) == NEG
)
1838 rhs
= XEXP (rhs
, 0);
1840 else if (GET_CODE (rhs
) == MULT
1841 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1843 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1844 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1845 rhs
= XEXP (rhs
, 0);
1847 else if (GET_CODE (rhs
) == ASHIFT
1848 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1849 && INTVAL (XEXP (rhs
, 1)) >= 0
1850 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1852 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
1854 rhs
= XEXP (rhs
, 0);
1857 if (rtx_equal_p (lhs
, rhs
))
1859 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1861 unsigned HOST_WIDE_INT l
;
1864 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
1865 coeff
= immed_double_const (l
, h
, mode
);
1867 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1868 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1873 /* (a - (-b)) -> (a + b). True even for IEEE. */
1874 if (GET_CODE (op1
) == NEG
)
1875 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1877 /* (-x - c) may be simplified as (-c - x). */
1878 if (GET_CODE (op0
) == NEG
1879 && (GET_CODE (op1
) == CONST_INT
1880 || GET_CODE (op1
) == CONST_DOUBLE
))
1882 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1884 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1887 /* Don't let a relocatable value get a negative coeff. */
1888 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1889 return simplify_gen_binary (PLUS
, mode
,
1891 neg_const_int (mode
, op1
));
1893 /* (x - (x & y)) -> (x & ~y) */
1894 if (GET_CODE (op1
) == AND
)
1896 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1898 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1899 GET_MODE (XEXP (op1
, 1)));
1900 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1902 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1904 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1905 GET_MODE (XEXP (op1
, 0)));
1906 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1910 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1911 by reversing the comparison code if valid. */
1912 if (STORE_FLAG_VALUE
== 1
1913 && trueop0
== const1_rtx
1914 && COMPARISON_P (op1
)
1915 && (reversed
= reversed_comparison (op1
, mode
)))
1918 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1919 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1920 && GET_CODE (op1
) == MULT
1921 && GET_CODE (XEXP (op1
, 0)) == NEG
)
1925 in1
= XEXP (XEXP (op1
, 0), 0);
1926 in2
= XEXP (op1
, 1);
1927 return simplify_gen_binary (PLUS
, mode
,
1928 simplify_gen_binary (MULT
, mode
,
1933 /* Canonicalize (minus (neg A) (mult B C)) to
1934 (minus (mult (neg B) C) A). */
1935 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1936 && GET_CODE (op1
) == MULT
1937 && GET_CODE (op0
) == NEG
)
1941 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
1942 in2
= XEXP (op1
, 1);
1943 return simplify_gen_binary (MINUS
, mode
,
1944 simplify_gen_binary (MULT
, mode
,
1949 /* If one of the operands is a PLUS or a MINUS, see if we can
1950 simplify this by the associative law. This will, for example,
1951 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1952 Don't use the associative law for floating point.
1953 The inaccuracy makes it nonassociative,
1954 and subtle programs can break if operations are associated. */
1956 if (INTEGRAL_MODE_P (mode
)
1957 && (plus_minus_operand_p (op0
)
1958 || plus_minus_operand_p (op1
))
1959 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1964 if (trueop1
== constm1_rtx
)
1965 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1967 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1968 x is NaN, since x * 0 is then also NaN. Nor is it valid
1969 when the mode has signed zeros, since multiplying a negative
1970 number by 0 will give -0, not 0. */
1971 if (!HONOR_NANS (mode
)
1972 && !HONOR_SIGNED_ZEROS (mode
)
1973 && trueop1
== CONST0_RTX (mode
)
1974 && ! side_effects_p (op0
))
1977 /* In IEEE floating point, x*1 is not equivalent to x for
1979 if (!HONOR_SNANS (mode
)
1980 && trueop1
== CONST1_RTX (mode
))
1983 /* Convert multiply by constant power of two into shift unless
1984 we are still generating RTL. This test is a kludge. */
1985 if (GET_CODE (trueop1
) == CONST_INT
1986 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1987 /* If the mode is larger than the host word size, and the
1988 uppermost bit is set, then this isn't a power of two due
1989 to implicit sign extension. */
1990 && (width
<= HOST_BITS_PER_WIDE_INT
1991 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1992 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1994 /* Likewise for multipliers wider than a word. */
1995 if (GET_CODE (trueop1
) == CONST_DOUBLE
1996 && (GET_MODE (trueop1
) == VOIDmode
1997 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
1998 && GET_MODE (op0
) == mode
1999 && CONST_DOUBLE_LOW (trueop1
) == 0
2000 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
2001 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2002 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2004 /* x*2 is x+x and x*(-1) is -x */
2005 if (GET_CODE (trueop1
) == CONST_DOUBLE
2006 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2007 && GET_MODE (op0
) == mode
)
2010 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2012 if (REAL_VALUES_EQUAL (d
, dconst2
))
2013 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2015 if (!HONOR_SNANS (mode
)
2016 && REAL_VALUES_EQUAL (d
, dconstm1
))
2017 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2020 /* Optimize -x * -x as x * x. */
2021 if (FLOAT_MODE_P (mode
)
2022 && GET_CODE (op0
) == NEG
2023 && GET_CODE (op1
) == NEG
2024 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2025 && !side_effects_p (XEXP (op0
, 0)))
2026 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2028 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2029 if (SCALAR_FLOAT_MODE_P (mode
)
2030 && GET_CODE (op0
) == ABS
2031 && GET_CODE (op1
) == ABS
2032 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2033 && !side_effects_p (XEXP (op0
, 0)))
2034 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2036 /* Reassociate multiplication, but for floating point MULTs
2037 only when the user specifies unsafe math optimizations. */
2038 if (! FLOAT_MODE_P (mode
)
2039 || flag_unsafe_math_optimizations
)
2041 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2048 if (trueop1
== const0_rtx
)
2050 if (GET_CODE (trueop1
) == CONST_INT
2051 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2052 == GET_MODE_MASK (mode
)))
2054 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2056 /* A | (~A) -> -1 */
2057 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2058 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2059 && ! side_effects_p (op0
)
2060 && SCALAR_INT_MODE_P (mode
))
2063 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2064 if (GET_CODE (op1
) == CONST_INT
2065 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2066 && (nonzero_bits (op0
, mode
) & ~INTVAL (op1
)) == 0)
2069 /* Canonicalize (X & C1) | C2. */
2070 if (GET_CODE (op0
) == AND
2071 && GET_CODE (trueop1
) == CONST_INT
2072 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
)
2074 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2075 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2076 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2078 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2080 && !side_effects_p (XEXP (op0
, 0)))
2083 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2084 if (((c1
|c2
) & mask
) == mask
)
2085 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2087 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2088 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2090 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2091 gen_int_mode (c1
& ~c2
, mode
));
2092 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2096 /* Convert (A & B) | A to A. */
2097 if (GET_CODE (op0
) == AND
2098 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2099 || rtx_equal_p (XEXP (op0
, 1), op1
))
2100 && ! side_effects_p (XEXP (op0
, 0))
2101 && ! side_effects_p (XEXP (op0
, 1)))
2104 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2105 mode size to (rotate A CX). */
2107 if (GET_CODE (op1
) == ASHIFT
2108 || GET_CODE (op1
) == SUBREG
)
2119 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2120 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2121 && GET_CODE (XEXP (opleft
, 1)) == CONST_INT
2122 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2123 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2124 == GET_MODE_BITSIZE (mode
)))
2125 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2127 /* Same, but for ashift that has been "simplified" to a wider mode
2128 by simplify_shift_const. */
2130 if (GET_CODE (opleft
) == SUBREG
2131 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2132 && GET_CODE (opright
) == LSHIFTRT
2133 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2134 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2135 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2136 && (GET_MODE_SIZE (GET_MODE (opleft
))
2137 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2138 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2139 SUBREG_REG (XEXP (opright
, 0)))
2140 && GET_CODE (XEXP (SUBREG_REG (opleft
), 1)) == CONST_INT
2141 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2142 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2143 == GET_MODE_BITSIZE (mode
)))
2144 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2145 XEXP (SUBREG_REG (opleft
), 1));
2147 /* If we have (ior (and (X C1) C2)), simplify this by making
2148 C1 as small as possible if C1 actually changes. */
2149 if (GET_CODE (op1
) == CONST_INT
2150 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2151 || INTVAL (op1
) > 0)
2152 && GET_CODE (op0
) == AND
2153 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2154 && GET_CODE (op1
) == CONST_INT
2155 && (INTVAL (XEXP (op0
, 1)) & INTVAL (op1
)) != 0)
2156 return simplify_gen_binary (IOR
, mode
,
2158 (AND
, mode
, XEXP (op0
, 0),
2159 GEN_INT (INTVAL (XEXP (op0
, 1))
2163 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2164 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2165 the PLUS does not affect any of the bits in OP1: then we can do
2166 the IOR as a PLUS and we can associate. This is valid if OP1
2167 can be safely shifted left C bits. */
2168 if (GET_CODE (trueop1
) == CONST_INT
&& GET_CODE (op0
) == ASHIFTRT
2169 && GET_CODE (XEXP (op0
, 0)) == PLUS
2170 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == CONST_INT
2171 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2172 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2174 int count
= INTVAL (XEXP (op0
, 1));
2175 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2177 if (mask
>> count
== INTVAL (trueop1
)
2178 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2179 return simplify_gen_binary (ASHIFTRT
, mode
,
2180 plus_constant (XEXP (op0
, 0), mask
),
2184 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2190 if (trueop1
== const0_rtx
)
2192 if (GET_CODE (trueop1
) == CONST_INT
2193 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2194 == GET_MODE_MASK (mode
)))
2195 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2196 if (rtx_equal_p (trueop0
, trueop1
)
2197 && ! side_effects_p (op0
)
2198 && GET_MODE_CLASS (mode
) != MODE_CC
)
2199 return CONST0_RTX (mode
);
2201 /* Canonicalize XOR of the most significant bit to PLUS. */
2202 if ((GET_CODE (op1
) == CONST_INT
2203 || GET_CODE (op1
) == CONST_DOUBLE
)
2204 && mode_signbit_p (mode
, op1
))
2205 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2206 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2207 if ((GET_CODE (op1
) == CONST_INT
2208 || GET_CODE (op1
) == CONST_DOUBLE
)
2209 && GET_CODE (op0
) == PLUS
2210 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
2211 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2212 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2213 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2214 simplify_gen_binary (XOR
, mode
, op1
,
2217 /* If we are XORing two things that have no bits in common,
2218 convert them into an IOR. This helps to detect rotation encoded
2219 using those methods and possibly other simplifications. */
2221 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2222 && (nonzero_bits (op0
, mode
)
2223 & nonzero_bits (op1
, mode
)) == 0)
2224 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2226 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2227 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2230 int num_negated
= 0;
2232 if (GET_CODE (op0
) == NOT
)
2233 num_negated
++, op0
= XEXP (op0
, 0);
2234 if (GET_CODE (op1
) == NOT
)
2235 num_negated
++, op1
= XEXP (op1
, 0);
2237 if (num_negated
== 2)
2238 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2239 else if (num_negated
== 1)
2240 return simplify_gen_unary (NOT
, mode
,
2241 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2245 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2246 correspond to a machine insn or result in further simplifications
2247 if B is a constant. */
2249 if (GET_CODE (op0
) == AND
2250 && rtx_equal_p (XEXP (op0
, 1), op1
)
2251 && ! side_effects_p (op1
))
2252 return simplify_gen_binary (AND
, mode
,
2253 simplify_gen_unary (NOT
, mode
,
2254 XEXP (op0
, 0), mode
),
2257 else if (GET_CODE (op0
) == AND
2258 && rtx_equal_p (XEXP (op0
, 0), op1
)
2259 && ! side_effects_p (op1
))
2260 return simplify_gen_binary (AND
, mode
,
2261 simplify_gen_unary (NOT
, mode
,
2262 XEXP (op0
, 1), mode
),
2265 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2266 comparison if STORE_FLAG_VALUE is 1. */
2267 if (STORE_FLAG_VALUE
== 1
2268 && trueop1
== const1_rtx
2269 && COMPARISON_P (op0
)
2270 && (reversed
= reversed_comparison (op0
, mode
)))
2273 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2274 is (lt foo (const_int 0)), so we can perform the above
2275 simplification if STORE_FLAG_VALUE is 1. */
2277 if (STORE_FLAG_VALUE
== 1
2278 && trueop1
== const1_rtx
2279 && GET_CODE (op0
) == LSHIFTRT
2280 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2281 && INTVAL (XEXP (op0
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
2282 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2284 /* (xor (comparison foo bar) (const_int sign-bit))
2285 when STORE_FLAG_VALUE is the sign bit. */
2286 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2287 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
2288 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
2289 && trueop1
== const_true_rtx
2290 && COMPARISON_P (op0
)
2291 && (reversed
= reversed_comparison (op0
, mode
)))
2294 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2300 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2302 /* If we are turning off bits already known off in OP0, we need
2304 if (GET_CODE (trueop1
) == CONST_INT
2305 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2306 && (nonzero_bits (trueop0
, mode
) & ~INTVAL (trueop1
)) == 0)
2308 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2309 && GET_MODE_CLASS (mode
) != MODE_CC
)
2312 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2313 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2314 && ! side_effects_p (op0
)
2315 && GET_MODE_CLASS (mode
) != MODE_CC
)
2316 return CONST0_RTX (mode
);
2318 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2319 there are no nonzero bits of C outside of X's mode. */
2320 if ((GET_CODE (op0
) == SIGN_EXTEND
2321 || GET_CODE (op0
) == ZERO_EXTEND
)
2322 && GET_CODE (trueop1
) == CONST_INT
2323 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2324 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2325 & INTVAL (trueop1
)) == 0)
2327 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2328 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2329 gen_int_mode (INTVAL (trueop1
),
2331 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2334 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2335 if (GET_CODE (op0
) == IOR
2336 && GET_CODE (trueop1
) == CONST_INT
2337 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
)
2339 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2340 return simplify_gen_binary (IOR
, mode
,
2341 simplify_gen_binary (AND
, mode
,
2342 XEXP (op0
, 0), op1
),
2343 gen_int_mode (tmp
, mode
));
2346 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2347 insn (and may simplify more). */
2348 if (GET_CODE (op0
) == XOR
2349 && rtx_equal_p (XEXP (op0
, 0), op1
)
2350 && ! side_effects_p (op1
))
2351 return simplify_gen_binary (AND
, mode
,
2352 simplify_gen_unary (NOT
, mode
,
2353 XEXP (op0
, 1), mode
),
2356 if (GET_CODE (op0
) == XOR
2357 && rtx_equal_p (XEXP (op0
, 1), op1
)
2358 && ! side_effects_p (op1
))
2359 return simplify_gen_binary (AND
, mode
,
2360 simplify_gen_unary (NOT
, mode
,
2361 XEXP (op0
, 0), mode
),
2364 /* Similarly for (~(A ^ B)) & A. */
2365 if (GET_CODE (op0
) == NOT
2366 && GET_CODE (XEXP (op0
, 0)) == XOR
2367 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2368 && ! side_effects_p (op1
))
2369 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2371 if (GET_CODE (op0
) == NOT
2372 && GET_CODE (XEXP (op0
, 0)) == XOR
2373 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2374 && ! side_effects_p (op1
))
2375 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2377 /* Convert (A | B) & A to A. */
2378 if (GET_CODE (op0
) == IOR
2379 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2380 || rtx_equal_p (XEXP (op0
, 1), op1
))
2381 && ! side_effects_p (XEXP (op0
, 0))
2382 && ! side_effects_p (XEXP (op0
, 1)))
2385 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2386 ((A & N) + B) & M -> (A + B) & M
2387 Similarly if (N & M) == 0,
2388 ((A | N) + B) & M -> (A + B) & M
2389 and for - instead of + and/or ^ instead of |. */
2390 if (GET_CODE (trueop1
) == CONST_INT
2391 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2392 && ~INTVAL (trueop1
)
2393 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
2394 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2399 pmop
[0] = XEXP (op0
, 0);
2400 pmop
[1] = XEXP (op0
, 1);
2402 for (which
= 0; which
< 2; which
++)
2405 switch (GET_CODE (tem
))
2408 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2409 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
2410 == INTVAL (trueop1
))
2411 pmop
[which
] = XEXP (tem
, 0);
2415 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2416 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
2417 pmop
[which
] = XEXP (tem
, 0);
2424 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2426 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2428 return simplify_gen_binary (code
, mode
, tem
, op1
);
2431 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2437 /* 0/x is 0 (or x&0 if x has side-effects). */
2438 if (trueop0
== CONST0_RTX (mode
))
2440 if (side_effects_p (op1
))
2441 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2445 if (trueop1
== CONST1_RTX (mode
))
2446 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2447 /* Convert divide by power of two into shift. */
2448 if (GET_CODE (trueop1
) == CONST_INT
2449 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
2450 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2454 /* Handle floating point and integers separately. */
2455 if (SCALAR_FLOAT_MODE_P (mode
))
2457 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2458 safe for modes with NaNs, since 0.0 / 0.0 will then be
2459 NaN rather than 0.0. Nor is it safe for modes with signed
2460 zeros, since dividing 0 by a negative number gives -0.0 */
2461 if (trueop0
== CONST0_RTX (mode
)
2462 && !HONOR_NANS (mode
)
2463 && !HONOR_SIGNED_ZEROS (mode
)
2464 && ! side_effects_p (op1
))
2467 if (trueop1
== CONST1_RTX (mode
)
2468 && !HONOR_SNANS (mode
))
2471 if (GET_CODE (trueop1
) == CONST_DOUBLE
2472 && trueop1
!= CONST0_RTX (mode
))
2475 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2478 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2479 && !HONOR_SNANS (mode
))
2480 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2482 /* Change FP division by a constant into multiplication.
2483 Only do this with -freciprocal-math. */
2484 if (flag_reciprocal_math
2485 && !REAL_VALUES_EQUAL (d
, dconst0
))
2487 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2488 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2489 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2495 /* 0/x is 0 (or x&0 if x has side-effects). */
2496 if (trueop0
== CONST0_RTX (mode
))
2498 if (side_effects_p (op1
))
2499 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2503 if (trueop1
== CONST1_RTX (mode
))
2504 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2506 if (trueop1
== constm1_rtx
)
2508 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2509 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2515 /* 0%x is 0 (or x&0 if x has side-effects). */
2516 if (trueop0
== CONST0_RTX (mode
))
2518 if (side_effects_p (op1
))
2519 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2522 /* x%1 is 0 (of x&0 if x has side-effects). */
2523 if (trueop1
== CONST1_RTX (mode
))
2525 if (side_effects_p (op0
))
2526 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2527 return CONST0_RTX (mode
);
2529 /* Implement modulus by power of two as AND. */
2530 if (GET_CODE (trueop1
) == CONST_INT
2531 && exact_log2 (INTVAL (trueop1
)) > 0)
2532 return simplify_gen_binary (AND
, mode
, op0
,
2533 GEN_INT (INTVAL (op1
) - 1));
2537 /* 0%x is 0 (or x&0 if x has side-effects). */
2538 if (trueop0
== CONST0_RTX (mode
))
2540 if (side_effects_p (op1
))
2541 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2544 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2545 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
2547 if (side_effects_p (op0
))
2548 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2549 return CONST0_RTX (mode
);
2556 if (trueop1
== CONST0_RTX (mode
))
2558 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2560 /* Rotating ~0 always results in ~0. */
2561 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
2562 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2563 && ! side_effects_p (op1
))
2566 if (SHIFT_COUNT_TRUNCATED
&& GET_CODE (op1
) == CONST_INT
)
2568 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
2569 if (val
!= INTVAL (op1
))
2570 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
2577 if (trueop1
== CONST0_RTX (mode
))
2579 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2581 goto canonicalize_shift
;
2584 if (trueop1
== CONST0_RTX (mode
))
2586 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2588 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2589 if (GET_CODE (op0
) == CLZ
2590 && GET_CODE (trueop1
) == CONST_INT
2591 && STORE_FLAG_VALUE
== 1
2592 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
2594 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2595 unsigned HOST_WIDE_INT zero_val
= 0;
2597 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
2598 && zero_val
== GET_MODE_BITSIZE (imode
)
2599 && INTVAL (trueop1
) == exact_log2 (zero_val
))
2600 return simplify_gen_relational (EQ
, mode
, imode
,
2601 XEXP (op0
, 0), const0_rtx
);
2603 goto canonicalize_shift
;
2606 if (width
<= HOST_BITS_PER_WIDE_INT
2607 && GET_CODE (trueop1
) == CONST_INT
2608 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2609 && ! side_effects_p (op0
))
2611 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2613 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2619 if (width
<= HOST_BITS_PER_WIDE_INT
2620 && GET_CODE (trueop1
) == CONST_INT
2621 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2622 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2623 && ! side_effects_p (op0
))
2625 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2627 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2633 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2635 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2637 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2643 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2645 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2647 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2660 /* ??? There are simplifications that can be done. */
2664 if (!VECTOR_MODE_P (mode
))
2666 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2667 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2668 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2669 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2670 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
2672 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2673 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2676 /* Extract a scalar element from a nested VEC_SELECT expression
2677 (with optional nested VEC_CONCAT expression). Some targets
2678 (i386) extract scalar element from a vector using chain of
2679 nested VEC_SELECT expressions. When input operand is a memory
2680 operand, this operation can be simplified to a simple scalar
2681 load from an offseted memory address. */
2682 if (GET_CODE (trueop0
) == VEC_SELECT
)
2684 rtx op0
= XEXP (trueop0
, 0);
2685 rtx op1
= XEXP (trueop0
, 1);
2687 enum machine_mode opmode
= GET_MODE (op0
);
2688 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
2689 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
2691 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
2697 gcc_assert (GET_CODE (op1
) == PARALLEL
);
2698 gcc_assert (i
< n_elts
);
2700 /* Select element, pointed by nested selector. */
2701 elem
= INTVAL (XVECEXP (op1
, 0, i
));
2703 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2704 if (GET_CODE (op0
) == VEC_CONCAT
)
2706 rtx op00
= XEXP (op0
, 0);
2707 rtx op01
= XEXP (op0
, 1);
2709 enum machine_mode mode00
, mode01
;
2710 int n_elts00
, n_elts01
;
2712 mode00
= GET_MODE (op00
);
2713 mode01
= GET_MODE (op01
);
2715 /* Find out number of elements of each operand. */
2716 if (VECTOR_MODE_P (mode00
))
2718 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
2719 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
2724 if (VECTOR_MODE_P (mode01
))
2726 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
2727 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
2732 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
2734 /* Select correct operand of VEC_CONCAT
2735 and adjust selector. */
2736 if (elem
< n_elts01
)
2747 vec
= rtvec_alloc (1);
2748 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
2750 tmp
= gen_rtx_fmt_ee (code
, mode
,
2751 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
2757 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2758 gcc_assert (GET_MODE_INNER (mode
)
2759 == GET_MODE_INNER (GET_MODE (trueop0
)));
2760 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2762 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2764 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2765 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2766 rtvec v
= rtvec_alloc (n_elts
);
2769 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2770 for (i
= 0; i
< n_elts
; i
++)
2772 rtx x
= XVECEXP (trueop1
, 0, i
);
2774 gcc_assert (GET_CODE (x
) == CONST_INT
);
2775 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2779 return gen_rtx_CONST_VECTOR (mode
, v
);
2783 if (XVECLEN (trueop1
, 0) == 1
2784 && GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
2785 && GET_CODE (trueop0
) == VEC_CONCAT
)
2788 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
2790 /* Try to find the element in the VEC_CONCAT. */
2791 while (GET_MODE (vec
) != mode
2792 && GET_CODE (vec
) == VEC_CONCAT
)
2794 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
2795 if (offset
< vec_size
)
2796 vec
= XEXP (vec
, 0);
2800 vec
= XEXP (vec
, 1);
2802 vec
= avoid_constant_pool_reference (vec
);
2805 if (GET_MODE (vec
) == mode
)
2812 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2813 ? GET_MODE (trueop0
)
2814 : GET_MODE_INNER (mode
));
2815 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2816 ? GET_MODE (trueop1
)
2817 : GET_MODE_INNER (mode
));
2819 gcc_assert (VECTOR_MODE_P (mode
));
2820 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2821 == GET_MODE_SIZE (mode
));
2823 if (VECTOR_MODE_P (op0_mode
))
2824 gcc_assert (GET_MODE_INNER (mode
)
2825 == GET_MODE_INNER (op0_mode
));
2827 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2829 if (VECTOR_MODE_P (op1_mode
))
2830 gcc_assert (GET_MODE_INNER (mode
)
2831 == GET_MODE_INNER (op1_mode
));
2833 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2835 if ((GET_CODE (trueop0
) == CONST_VECTOR
2836 || GET_CODE (trueop0
) == CONST_INT
2837 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2838 && (GET_CODE (trueop1
) == CONST_VECTOR
2839 || GET_CODE (trueop1
) == CONST_INT
2840 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2842 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2843 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2844 rtvec v
= rtvec_alloc (n_elts
);
2846 unsigned in_n_elts
= 1;
2848 if (VECTOR_MODE_P (op0_mode
))
2849 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2850 for (i
= 0; i
< n_elts
; i
++)
2854 if (!VECTOR_MODE_P (op0_mode
))
2855 RTVEC_ELT (v
, i
) = trueop0
;
2857 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2861 if (!VECTOR_MODE_P (op1_mode
))
2862 RTVEC_ELT (v
, i
) = trueop1
;
2864 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2869 return gen_rtx_CONST_VECTOR (mode
, v
);
2882 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2885 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
2887 unsigned int width
= GET_MODE_BITSIZE (mode
);
2889 if (VECTOR_MODE_P (mode
)
2890 && code
!= VEC_CONCAT
2891 && GET_CODE (op0
) == CONST_VECTOR
2892 && GET_CODE (op1
) == CONST_VECTOR
)
2894 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2895 enum machine_mode op0mode
= GET_MODE (op0
);
2896 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
2897 enum machine_mode op1mode
= GET_MODE (op1
);
2898 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
2899 rtvec v
= rtvec_alloc (n_elts
);
2902 gcc_assert (op0_n_elts
== n_elts
);
2903 gcc_assert (op1_n_elts
== n_elts
);
2904 for (i
= 0; i
< n_elts
; i
++)
2906 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
2907 CONST_VECTOR_ELT (op0
, i
),
2908 CONST_VECTOR_ELT (op1
, i
));
2911 RTVEC_ELT (v
, i
) = x
;
2914 return gen_rtx_CONST_VECTOR (mode
, v
);
2917 if (VECTOR_MODE_P (mode
)
2918 && code
== VEC_CONCAT
2919 && (CONST_INT_P (op0
)
2920 || GET_CODE (op0
) == CONST_DOUBLE
2921 || GET_CODE (op0
) == CONST_FIXED
)
2922 && (CONST_INT_P (op1
)
2923 || GET_CODE (op1
) == CONST_DOUBLE
2924 || GET_CODE (op1
) == CONST_FIXED
))
2926 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2927 rtvec v
= rtvec_alloc (n_elts
);
2929 gcc_assert (n_elts
>= 2);
2932 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
2933 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
2935 RTVEC_ELT (v
, 0) = op0
;
2936 RTVEC_ELT (v
, 1) = op1
;
2940 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
2941 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
2944 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
2945 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
2946 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
2948 for (i
= 0; i
< op0_n_elts
; ++i
)
2949 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
2950 for (i
= 0; i
< op1_n_elts
; ++i
)
2951 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
2954 return gen_rtx_CONST_VECTOR (mode
, v
);
2957 if (SCALAR_FLOAT_MODE_P (mode
)
2958 && GET_CODE (op0
) == CONST_DOUBLE
2959 && GET_CODE (op1
) == CONST_DOUBLE
2960 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
2971 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
2973 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
2975 for (i
= 0; i
< 4; i
++)
2992 real_from_target (&r
, tmp0
, mode
);
2993 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
2997 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3000 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3001 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3002 real_convert (&f0
, mode
, &f0
);
3003 real_convert (&f1
, mode
, &f1
);
3005 if (HONOR_SNANS (mode
)
3006 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3010 && REAL_VALUES_EQUAL (f1
, dconst0
)
3011 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3014 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3015 && flag_trapping_math
3016 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3018 int s0
= REAL_VALUE_NEGATIVE (f0
);
3019 int s1
= REAL_VALUE_NEGATIVE (f1
);
3024 /* Inf + -Inf = NaN plus exception. */
3029 /* Inf - Inf = NaN plus exception. */
3034 /* Inf / Inf = NaN plus exception. */
3041 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3042 && flag_trapping_math
3043 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3044 || (REAL_VALUE_ISINF (f1
)
3045 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3046 /* Inf * 0 = NaN plus exception. */
3049 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3051 real_convert (&result
, mode
, &value
);
3053 /* Don't constant fold this floating point operation if
3054 the result has overflowed and flag_trapping_math. */
3056 if (flag_trapping_math
3057 && MODE_HAS_INFINITIES (mode
)
3058 && REAL_VALUE_ISINF (result
)
3059 && !REAL_VALUE_ISINF (f0
)
3060 && !REAL_VALUE_ISINF (f1
))
3061 /* Overflow plus exception. */
3064 /* Don't constant fold this floating point operation if the
3065 result may dependent upon the run-time rounding mode and
3066 flag_rounding_math is set, or if GCC's software emulation
3067 is unable to accurately represent the result. */
3069 if ((flag_rounding_math
3070 || (REAL_MODE_FORMAT_COMPOSITE_P (mode
)
3071 && !flag_unsafe_math_optimizations
))
3072 && (inexact
|| !real_identical (&result
, &value
)))
3075 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3079 /* We can fold some multi-word operations. */
3080 if (GET_MODE_CLASS (mode
) == MODE_INT
3081 && width
== HOST_BITS_PER_WIDE_INT
* 2
3082 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
3083 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
3085 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
3086 HOST_WIDE_INT h1
, h2
, hv
, ht
;
3088 if (GET_CODE (op0
) == CONST_DOUBLE
)
3089 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
3091 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
3093 if (GET_CODE (op1
) == CONST_DOUBLE
)
3094 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
3096 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
3101 /* A - B == A + (-B). */
3102 neg_double (l2
, h2
, &lv
, &hv
);
3105 /* Fall through.... */
3108 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3112 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3116 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3117 &lv
, &hv
, <
, &ht
))
3122 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3123 <
, &ht
, &lv
, &hv
))
3128 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3129 &lv
, &hv
, <
, &ht
))
3134 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3135 <
, &ht
, &lv
, &hv
))
3140 lv
= l1
& l2
, hv
= h1
& h2
;
3144 lv
= l1
| l2
, hv
= h1
| h2
;
3148 lv
= l1
^ l2
, hv
= h1
^ h2
;
3154 && ((unsigned HOST_WIDE_INT
) l1
3155 < (unsigned HOST_WIDE_INT
) l2
)))
3164 && ((unsigned HOST_WIDE_INT
) l1
3165 > (unsigned HOST_WIDE_INT
) l2
)))
3172 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
3174 && ((unsigned HOST_WIDE_INT
) l1
3175 < (unsigned HOST_WIDE_INT
) l2
)))
3182 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
3184 && ((unsigned HOST_WIDE_INT
) l1
3185 > (unsigned HOST_WIDE_INT
) l2
)))
3191 case LSHIFTRT
: case ASHIFTRT
:
3193 case ROTATE
: case ROTATERT
:
3194 if (SHIFT_COUNT_TRUNCATED
)
3195 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
3197 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
3200 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3201 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
3203 else if (code
== ASHIFT
)
3204 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
3205 else if (code
== ROTATE
)
3206 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3207 else /* code == ROTATERT */
3208 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3215 return immed_double_const (lv
, hv
, mode
);
3218 if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
3219 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3221 /* Get the integer argument values in two forms:
3222 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3224 arg0
= INTVAL (op0
);
3225 arg1
= INTVAL (op1
);
3227 if (width
< HOST_BITS_PER_WIDE_INT
)
3229 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3230 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3233 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3234 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3237 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3238 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3246 /* Compute the value of the arithmetic. */
3251 val
= arg0s
+ arg1s
;
3255 val
= arg0s
- arg1s
;
3259 val
= arg0s
* arg1s
;
3264 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3267 val
= arg0s
/ arg1s
;
3272 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3275 val
= arg0s
% arg1s
;
3280 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3283 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3288 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3291 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3309 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3310 the value is in range. We can't return any old value for
3311 out-of-range arguments because either the middle-end (via
3312 shift_truncation_mask) or the back-end might be relying on
3313 target-specific knowledge. Nor can we rely on
3314 shift_truncation_mask, since the shift might not be part of an
3315 ashlM3, lshrM3 or ashrM3 instruction. */
3316 if (SHIFT_COUNT_TRUNCATED
)
3317 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3318 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3321 val
= (code
== ASHIFT
3322 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3323 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3325 /* Sign-extend the result for arithmetic right shifts. */
3326 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3327 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
3335 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3336 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3344 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3345 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3349 /* Do nothing here. */
3353 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3357 val
= ((unsigned HOST_WIDE_INT
) arg0
3358 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3362 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3366 val
= ((unsigned HOST_WIDE_INT
) arg0
3367 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3380 /* ??? There are simplifications that can be done. */
3387 return gen_int_mode (val
, mode
);
3395 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3398 Rather than test for specific case, we do this by a brute-force method
3399 and do all possible simplifications until no more changes occur. Then
3400 we rebuild the operation. */
3402 struct simplify_plus_minus_op_data
3409 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3413 result
= (commutative_operand_precedence (y
)
3414 - commutative_operand_precedence (x
));
3418 /* Group together equal REGs to do more simplification. */
3419 if (REG_P (x
) && REG_P (y
))
3420 return REGNO (x
) > REGNO (y
);
3426 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3429 struct simplify_plus_minus_op_data ops
[8];
3431 int n_ops
= 2, input_ops
= 2;
3432 int changed
, n_constants
= 0, canonicalized
= 0;
3435 memset (ops
, 0, sizeof ops
);
3437 /* Set up the two operands and then expand them until nothing has been
3438 changed. If we run out of room in our array, give up; this should
3439 almost never happen. */
3444 ops
[1].neg
= (code
== MINUS
);
3450 for (i
= 0; i
< n_ops
; i
++)
3452 rtx this_op
= ops
[i
].op
;
3453 int this_neg
= ops
[i
].neg
;
3454 enum rtx_code this_code
= GET_CODE (this_op
);
3463 ops
[n_ops
].op
= XEXP (this_op
, 1);
3464 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3467 ops
[i
].op
= XEXP (this_op
, 0);
3470 canonicalized
|= this_neg
;
3474 ops
[i
].op
= XEXP (this_op
, 0);
3475 ops
[i
].neg
= ! this_neg
;
3482 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3483 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3484 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3486 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3487 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3488 ops
[n_ops
].neg
= this_neg
;
3496 /* ~a -> (-a - 1) */
3499 ops
[n_ops
].op
= constm1_rtx
;
3500 ops
[n_ops
++].neg
= this_neg
;
3501 ops
[i
].op
= XEXP (this_op
, 0);
3502 ops
[i
].neg
= !this_neg
;
3512 ops
[i
].op
= neg_const_int (mode
, this_op
);
3526 if (n_constants
> 1)
3529 gcc_assert (n_ops
>= 2);
3531 /* If we only have two operands, we can avoid the loops. */
3534 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3537 /* Get the two operands. Be careful with the order, especially for
3538 the cases where code == MINUS. */
3539 if (ops
[0].neg
&& ops
[1].neg
)
3541 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3544 else if (ops
[0].neg
)
3555 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
3558 /* Now simplify each pair of operands until nothing changes. */
3561 /* Insertion sort is good enough for an eight-element array. */
3562 for (i
= 1; i
< n_ops
; i
++)
3564 struct simplify_plus_minus_op_data save
;
3566 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
3572 ops
[j
+ 1] = ops
[j
];
3573 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
3577 /* This is only useful the first time through. */
3582 for (i
= n_ops
- 1; i
> 0; i
--)
3583 for (j
= i
- 1; j
>= 0; j
--)
3585 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
3586 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
3588 if (lhs
!= 0 && rhs
!= 0)
3590 enum rtx_code ncode
= PLUS
;
3596 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3598 else if (swap_commutative_operands_p (lhs
, rhs
))
3599 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3601 if ((GET_CODE (lhs
) == CONST
|| GET_CODE (lhs
) == CONST_INT
)
3602 && (GET_CODE (rhs
) == CONST
|| GET_CODE (rhs
) == CONST_INT
))
3604 rtx tem_lhs
, tem_rhs
;
3606 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
3607 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
3608 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
3610 if (tem
&& !CONSTANT_P (tem
))
3611 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
3614 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
3616 /* Reject "simplifications" that just wrap the two
3617 arguments in a CONST. Failure to do so can result
3618 in infinite recursion with simplify_binary_operation
3619 when it calls us to simplify CONST operations. */
3621 && ! (GET_CODE (tem
) == CONST
3622 && GET_CODE (XEXP (tem
, 0)) == ncode
3623 && XEXP (XEXP (tem
, 0), 0) == lhs
3624 && XEXP (XEXP (tem
, 0), 1) == rhs
))
3627 if (GET_CODE (tem
) == NEG
)
3628 tem
= XEXP (tem
, 0), lneg
= !lneg
;
3629 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
3630 tem
= neg_const_int (mode
, tem
), lneg
= 0;
3634 ops
[j
].op
= NULL_RTX
;
3640 /* Pack all the operands to the lower-numbered entries. */
3641 for (i
= 0, j
= 0; j
< n_ops
; j
++)
3651 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3653 && GET_CODE (ops
[1].op
) == CONST_INT
3654 && CONSTANT_P (ops
[0].op
)
3656 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
3658 /* We suppressed creation of trivial CONST expressions in the
3659 combination loop to avoid recursion. Create one manually now.
3660 The combination loop should have ensured that there is exactly
3661 one CONST_INT, and the sort will have ensured that it is last
3662 in the array and that any other constant will be next-to-last. */
3665 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
3666 && CONSTANT_P (ops
[n_ops
- 2].op
))
3668 rtx value
= ops
[n_ops
- 1].op
;
3669 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
3670 value
= neg_const_int (mode
, value
);
3671 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
3675 /* Put a non-negated operand first, if possible. */
3677 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
3680 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
3689 /* Now make the result by performing the requested operations. */
3691 for (i
= 1; i
< n_ops
; i
++)
3692 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
3693 mode
, result
, ops
[i
].op
);
3698 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3700 plus_minus_operand_p (const_rtx x
)
3702 return GET_CODE (x
) == PLUS
3703 || GET_CODE (x
) == MINUS
3704 || (GET_CODE (x
) == CONST
3705 && GET_CODE (XEXP (x
, 0)) == PLUS
3706 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
3707 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
3710 /* Like simplify_binary_operation except used for relational operators.
3711 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3712 not also be VOIDmode.
3714 CMP_MODE specifies in which mode the comparison is done in, so it is
3715 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3716 the operands or, if both are VOIDmode, the operands are compared in
3717 "infinite precision". */
3719 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
3720 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3722 rtx tem
, trueop0
, trueop1
;
3724 if (cmp_mode
== VOIDmode
)
3725 cmp_mode
= GET_MODE (op0
);
3726 if (cmp_mode
== VOIDmode
)
3727 cmp_mode
= GET_MODE (op1
);
3729 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
3732 if (SCALAR_FLOAT_MODE_P (mode
))
3734 if (tem
== const0_rtx
)
3735 return CONST0_RTX (mode
);
3736 #ifdef FLOAT_STORE_FLAG_VALUE
3738 REAL_VALUE_TYPE val
;
3739 val
= FLOAT_STORE_FLAG_VALUE (mode
);
3740 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
3746 if (VECTOR_MODE_P (mode
))
3748 if (tem
== const0_rtx
)
3749 return CONST0_RTX (mode
);
3750 #ifdef VECTOR_STORE_FLAG_VALUE
3755 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
3756 if (val
== NULL_RTX
)
3758 if (val
== const1_rtx
)
3759 return CONST1_RTX (mode
);
3761 units
= GET_MODE_NUNITS (mode
);
3762 v
= rtvec_alloc (units
);
3763 for (i
= 0; i
< units
; i
++)
3764 RTVEC_ELT (v
, i
) = val
;
3765 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
3775 /* For the following tests, ensure const0_rtx is op1. */
3776 if (swap_commutative_operands_p (op0
, op1
)
3777 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
3778 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
3780 /* If op0 is a compare, extract the comparison arguments from it. */
3781 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3782 return simplify_relational_operation (code
, mode
, VOIDmode
,
3783 XEXP (op0
, 0), XEXP (op0
, 1));
3785 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
3789 trueop0
= avoid_constant_pool_reference (op0
);
3790 trueop1
= avoid_constant_pool_reference (op1
);
3791 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
3795 /* This part of simplify_relational_operation is only used when CMP_MODE
3796 is not in class MODE_CC (i.e. it is a real comparison).
3798 MODE is the mode of the result, while CMP_MODE specifies in which
3799 mode the comparison is done in, so it is the mode of the operands. */
3802 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
3803 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3805 enum rtx_code op0code
= GET_CODE (op0
);
3807 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
3809 /* If op0 is a comparison, extract the comparison arguments
3813 if (GET_MODE (op0
) == mode
)
3814 return simplify_rtx (op0
);
3816 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
3817 XEXP (op0
, 0), XEXP (op0
, 1));
3819 else if (code
== EQ
)
3821 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
3822 if (new_code
!= UNKNOWN
)
3823 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
3824 XEXP (op0
, 0), XEXP (op0
, 1));
3828 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
3829 if ((code
== LTU
|| code
== GEU
)
3830 && GET_CODE (op0
) == PLUS
3831 && rtx_equal_p (op1
, XEXP (op0
, 1))
3832 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
3833 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
3834 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
, XEXP (op0
, 0));
3836 if (op1
== const0_rtx
)
3838 /* Canonicalize (GTU x 0) as (NE x 0). */
3840 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
3841 /* Canonicalize (LEU x 0) as (EQ x 0). */
3843 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
3845 else if (op1
== const1_rtx
)
3850 /* Canonicalize (GE x 1) as (GT x 0). */
3851 return simplify_gen_relational (GT
, mode
, cmp_mode
,
3854 /* Canonicalize (GEU x 1) as (NE x 0). */
3855 return simplify_gen_relational (NE
, mode
, cmp_mode
,
3858 /* Canonicalize (LT x 1) as (LE x 0). */
3859 return simplify_gen_relational (LE
, mode
, cmp_mode
,
3862 /* Canonicalize (LTU x 1) as (EQ x 0). */
3863 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
3869 else if (op1
== constm1_rtx
)
3871 /* Canonicalize (LE x -1) as (LT x 0). */
3873 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
3874 /* Canonicalize (GT x -1) as (GE x 0). */
3876 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
3879 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3880 if ((code
== EQ
|| code
== NE
)
3881 && (op0code
== PLUS
|| op0code
== MINUS
)
3883 && CONSTANT_P (XEXP (op0
, 1))
3884 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
3886 rtx x
= XEXP (op0
, 0);
3887 rtx c
= XEXP (op0
, 1);
3889 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
3891 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
3894 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3895 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3897 && op1
== const0_rtx
3898 && GET_MODE_CLASS (mode
) == MODE_INT
3899 && cmp_mode
!= VOIDmode
3900 /* ??? Work-around BImode bugs in the ia64 backend. */
3902 && cmp_mode
!= BImode
3903 && nonzero_bits (op0
, cmp_mode
) == 1
3904 && STORE_FLAG_VALUE
== 1)
3905 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
3906 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
3907 : lowpart_subreg (mode
, op0
, cmp_mode
);
3909 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3910 if ((code
== EQ
|| code
== NE
)
3911 && op1
== const0_rtx
3913 return simplify_gen_relational (code
, mode
, cmp_mode
,
3914 XEXP (op0
, 0), XEXP (op0
, 1));
3916 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3917 if ((code
== EQ
|| code
== NE
)
3919 && rtx_equal_p (XEXP (op0
, 0), op1
)
3920 && !side_effects_p (XEXP (op0
, 0)))
3921 return simplify_gen_relational (code
, mode
, cmp_mode
,
3922 XEXP (op0
, 1), const0_rtx
);
3924 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3925 if ((code
== EQ
|| code
== NE
)
3927 && rtx_equal_p (XEXP (op0
, 1), op1
)
3928 && !side_effects_p (XEXP (op0
, 1)))
3929 return simplify_gen_relational (code
, mode
, cmp_mode
,
3930 XEXP (op0
, 0), const0_rtx
);
3932 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3933 if ((code
== EQ
|| code
== NE
)
3935 && (GET_CODE (op1
) == CONST_INT
3936 || GET_CODE (op1
) == CONST_DOUBLE
)
3937 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
3938 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
3939 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
3940 simplify_gen_binary (XOR
, cmp_mode
,
3941 XEXP (op0
, 1), op1
));
3943 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
3949 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3950 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
3951 XEXP (op0
, 0), const0_rtx
);
3956 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3957 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
3958 XEXP (op0
, 0), const0_rtx
);
3977 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
3978 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
3979 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
3980 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
3981 For floating-point comparisons, assume that the operands were ordered. */
3984 comparison_result (enum rtx_code code
, int known_results
)
3990 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
3993 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
3997 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4000 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4004 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4007 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4010 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4012 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4015 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4017 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4020 return const_true_rtx
;
4028 /* Check if the given comparison (done in the given MODE) is actually a
4029 tautology or a contradiction.
4030 If no simplification is possible, this function returns zero.
4031 Otherwise, it returns either const_true_rtx or const0_rtx. */
4034 simplify_const_relational_operation (enum rtx_code code
,
4035 enum machine_mode mode
,
4042 gcc_assert (mode
!= VOIDmode
4043 || (GET_MODE (op0
) == VOIDmode
4044 && GET_MODE (op1
) == VOIDmode
));
4046 /* If op0 is a compare, extract the comparison arguments from it. */
4047 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4049 op1
= XEXP (op0
, 1);
4050 op0
= XEXP (op0
, 0);
4052 if (GET_MODE (op0
) != VOIDmode
)
4053 mode
= GET_MODE (op0
);
4054 else if (GET_MODE (op1
) != VOIDmode
)
4055 mode
= GET_MODE (op1
);
4060 /* We can't simplify MODE_CC values since we don't know what the
4061 actual comparison is. */
4062 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4065 /* Make sure the constant is second. */
4066 if (swap_commutative_operands_p (op0
, op1
))
4068 tem
= op0
, op0
= op1
, op1
= tem
;
4069 code
= swap_condition (code
);
4072 trueop0
= avoid_constant_pool_reference (op0
);
4073 trueop1
= avoid_constant_pool_reference (op1
);
4075 /* For integer comparisons of A and B maybe we can simplify A - B and can
4076 then simplify a comparison of that with zero. If A and B are both either
4077 a register or a CONST_INT, this can't help; testing for these cases will
4078 prevent infinite recursion here and speed things up.
4080 We can only do this for EQ and NE comparisons as otherwise we may
4081 lose or introduce overflow which we cannot disregard as undefined as
4082 we do not know the signedness of the operation on either the left or
4083 the right hand side of the comparison. */
4085 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4086 && (code
== EQ
|| code
== NE
)
4087 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
4088 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
4089 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4090 /* We cannot do this if tem is a nonzero address. */
4091 && ! nonzero_address_p (tem
))
4092 return simplify_const_relational_operation (signed_condition (code
),
4093 mode
, tem
, const0_rtx
);
4095 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4096 return const_true_rtx
;
4098 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4101 /* For modes without NaNs, if the two operands are equal, we know the
4102 result except if they have side-effects. Even with NaNs we know
4103 the result of unordered comparisons and, if signaling NaNs are
4104 irrelevant, also the result of LT/GT/LTGT. */
4105 if ((! HONOR_NANS (GET_MODE (trueop0
))
4106 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4107 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4108 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4109 && rtx_equal_p (trueop0
, trueop1
)
4110 && ! side_effects_p (trueop0
))
4111 return comparison_result (code
, CMP_EQ
);
4113 /* If the operands are floating-point constants, see if we can fold
4115 if (GET_CODE (trueop0
) == CONST_DOUBLE
4116 && GET_CODE (trueop1
) == CONST_DOUBLE
4117 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4119 REAL_VALUE_TYPE d0
, d1
;
4121 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4122 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4124 /* Comparisons are unordered iff at least one of the values is NaN. */
4125 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4135 return const_true_rtx
;
4148 return comparison_result (code
,
4149 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4150 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4153 /* Otherwise, see if the operands are both integers. */
4154 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4155 && (GET_CODE (trueop0
) == CONST_DOUBLE
4156 || GET_CODE (trueop0
) == CONST_INT
)
4157 && (GET_CODE (trueop1
) == CONST_DOUBLE
4158 || GET_CODE (trueop1
) == CONST_INT
))
4160 int width
= GET_MODE_BITSIZE (mode
);
4161 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4162 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4164 /* Get the two words comprising each integer constant. */
4165 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
4167 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4168 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4172 l0u
= l0s
= INTVAL (trueop0
);
4173 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4176 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
4178 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4179 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4183 l1u
= l1s
= INTVAL (trueop1
);
4184 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4187 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4188 we have to sign or zero-extend the values. */
4189 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4191 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4192 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4194 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4195 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
4197 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4198 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
4200 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4201 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4203 if (h0u
== h1u
&& l0u
== l1u
)
4204 return comparison_result (code
, CMP_EQ
);
4208 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4209 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4210 return comparison_result (code
, cr
);
4214 /* Optimize comparisons with upper and lower bounds. */
4215 if (SCALAR_INT_MODE_P (mode
)
4216 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
4217 && GET_CODE (trueop1
) == CONST_INT
)
4220 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4221 HOST_WIDE_INT val
= INTVAL (trueop1
);
4222 HOST_WIDE_INT mmin
, mmax
;
4232 /* Get a reduced range if the sign bit is zero. */
4233 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4240 rtx mmin_rtx
, mmax_rtx
;
4241 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4243 mmin
= INTVAL (mmin_rtx
);
4244 mmax
= INTVAL (mmax_rtx
);
4247 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4249 mmin
>>= (sign_copies
- 1);
4250 mmax
>>= (sign_copies
- 1);
4256 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4258 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4259 return const_true_rtx
;
4260 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4265 return const_true_rtx
;
4270 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4272 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4273 return const_true_rtx
;
4274 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4279 return const_true_rtx
;
4285 /* x == y is always false for y out of range. */
4286 if (val
< mmin
|| val
> mmax
)
4290 /* x > y is always false for y >= mmax, always true for y < mmin. */
4292 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4294 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4295 return const_true_rtx
;
4301 return const_true_rtx
;
4304 /* x < y is always false for y <= mmin, always true for y > mmax. */
4306 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4308 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4309 return const_true_rtx
;
4315 return const_true_rtx
;
4319 /* x != y is always true for y out of range. */
4320 if (val
< mmin
|| val
> mmax
)
4321 return const_true_rtx
;
4329 /* Optimize integer comparisons with zero. */
4330 if (trueop1
== const0_rtx
)
4332 /* Some addresses are known to be nonzero. We don't know
4333 their sign, but equality comparisons are known. */
4334 if (nonzero_address_p (trueop0
))
4336 if (code
== EQ
|| code
== LEU
)
4338 if (code
== NE
|| code
== GTU
)
4339 return const_true_rtx
;
4342 /* See if the first operand is an IOR with a constant. If so, we
4343 may be able to determine the result of this comparison. */
4344 if (GET_CODE (op0
) == IOR
)
4346 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4347 if (GET_CODE (inner_const
) == CONST_INT
&& inner_const
!= const0_rtx
)
4349 int sign_bitnum
= GET_MODE_BITSIZE (mode
) - 1;
4350 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4351 && (INTVAL (inner_const
)
4352 & ((HOST_WIDE_INT
) 1 << sign_bitnum
)));
4361 return const_true_rtx
;
4365 return const_true_rtx
;
4379 /* Optimize comparison of ABS with zero. */
4380 if (trueop1
== CONST0_RTX (mode
)
4381 && (GET_CODE (trueop0
) == ABS
4382 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4383 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4388 /* Optimize abs(x) < 0.0. */
4389 if (!HONOR_SNANS (mode
)
4390 && (!INTEGRAL_MODE_P (mode
)
4391 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4393 if (INTEGRAL_MODE_P (mode
)
4394 && (issue_strict_overflow_warning
4395 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4396 warning (OPT_Wstrict_overflow
,
4397 ("assuming signed overflow does not occur when "
4398 "assuming abs (x) < 0 is false"));
4404 /* Optimize abs(x) >= 0.0. */
4405 if (!HONOR_NANS (mode
)
4406 && (!INTEGRAL_MODE_P (mode
)
4407 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4409 if (INTEGRAL_MODE_P (mode
)
4410 && (issue_strict_overflow_warning
4411 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4412 warning (OPT_Wstrict_overflow
,
4413 ("assuming signed overflow does not occur when "
4414 "assuming abs (x) >= 0 is true"));
4415 return const_true_rtx
;
4420 /* Optimize ! (abs(x) < 0.0). */
4421 return const_true_rtx
;
4431 /* Simplify CODE, an operation with result mode MODE and three operands,
4432 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4433 a constant. Return 0 if no simplifications is possible. */
4436 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4437 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4440 unsigned int width
= GET_MODE_BITSIZE (mode
);
4442 /* VOIDmode means "infinite" precision. */
4444 width
= HOST_BITS_PER_WIDE_INT
;
4450 if (GET_CODE (op0
) == CONST_INT
4451 && GET_CODE (op1
) == CONST_INT
4452 && GET_CODE (op2
) == CONST_INT
4453 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4454 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4456 /* Extracting a bit-field from a constant */
4457 HOST_WIDE_INT val
= INTVAL (op0
);
4459 if (BITS_BIG_ENDIAN
)
4460 val
>>= (GET_MODE_BITSIZE (op0_mode
)
4461 - INTVAL (op2
) - INTVAL (op1
));
4463 val
>>= INTVAL (op2
);
4465 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
4467 /* First zero-extend. */
4468 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
4469 /* If desired, propagate sign bit. */
4470 if (code
== SIGN_EXTRACT
4471 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
4472 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
4475 /* Clear the bits that don't belong in our mode,
4476 unless they and our sign bit are all one.
4477 So we get either a reasonable negative value or a reasonable
4478 unsigned value for this mode. */
4479 if (width
< HOST_BITS_PER_WIDE_INT
4480 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
4481 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
4482 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4484 return gen_int_mode (val
, mode
);
4489 if (GET_CODE (op0
) == CONST_INT
)
4490 return op0
!= const0_rtx
? op1
: op2
;
4492 /* Convert c ? a : a into "a". */
4493 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4496 /* Convert a != b ? a : b into "a". */
4497 if (GET_CODE (op0
) == NE
4498 && ! side_effects_p (op0
)
4499 && ! HONOR_NANS (mode
)
4500 && ! HONOR_SIGNED_ZEROS (mode
)
4501 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4502 && rtx_equal_p (XEXP (op0
, 1), op2
))
4503 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4504 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4507 /* Convert a == b ? a : b into "b". */
4508 if (GET_CODE (op0
) == EQ
4509 && ! side_effects_p (op0
)
4510 && ! HONOR_NANS (mode
)
4511 && ! HONOR_SIGNED_ZEROS (mode
)
4512 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4513 && rtx_equal_p (XEXP (op0
, 1), op2
))
4514 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4515 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4518 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
4520 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
4521 ? GET_MODE (XEXP (op0
, 1))
4522 : GET_MODE (XEXP (op0
, 0)));
4525 /* Look for happy constants in op1 and op2. */
4526 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
4528 HOST_WIDE_INT t
= INTVAL (op1
);
4529 HOST_WIDE_INT f
= INTVAL (op2
);
4531 if (t
== STORE_FLAG_VALUE
&& f
== 0)
4532 code
= GET_CODE (op0
);
4533 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
4536 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
4544 return simplify_gen_relational (code
, mode
, cmp_mode
,
4545 XEXP (op0
, 0), XEXP (op0
, 1));
4548 if (cmp_mode
== VOIDmode
)
4549 cmp_mode
= op0_mode
;
4550 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
4551 cmp_mode
, XEXP (op0
, 0),
4554 /* See if any simplifications were possible. */
4557 if (GET_CODE (temp
) == CONST_INT
)
4558 return temp
== const0_rtx
? op2
: op1
;
4560 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
4566 gcc_assert (GET_MODE (op0
) == mode
);
4567 gcc_assert (GET_MODE (op1
) == mode
);
4568 gcc_assert (VECTOR_MODE_P (mode
));
4569 op2
= avoid_constant_pool_reference (op2
);
4570 if (GET_CODE (op2
) == CONST_INT
)
4572 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
4573 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
4574 int mask
= (1 << n_elts
) - 1;
4576 if (!(INTVAL (op2
) & mask
))
4578 if ((INTVAL (op2
) & mask
) == mask
)
4581 op0
= avoid_constant_pool_reference (op0
);
4582 op1
= avoid_constant_pool_reference (op1
);
4583 if (GET_CODE (op0
) == CONST_VECTOR
4584 && GET_CODE (op1
) == CONST_VECTOR
)
4586 rtvec v
= rtvec_alloc (n_elts
);
4589 for (i
= 0; i
< n_elts
; i
++)
4590 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
4591 ? CONST_VECTOR_ELT (op0
, i
)
4592 : CONST_VECTOR_ELT (op1
, i
));
4593 return gen_rtx_CONST_VECTOR (mode
, v
);
4605 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4607 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4609 Works by unpacking OP into a collection of 8-bit values
4610 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4611 and then repacking them again for OUTERMODE. */
4614 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
4615 enum machine_mode innermode
, unsigned int byte
)
4617 /* We support up to 512-bit values (for V8DFmode). */
4621 value_mask
= (1 << value_bit
) - 1
4623 unsigned char value
[max_bitsize
/ value_bit
];
4632 rtvec result_v
= NULL
;
4633 enum mode_class outer_class
;
4634 enum machine_mode outer_submode
;
4636 /* Some ports misuse CCmode. */
4637 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
4640 /* We have no way to represent a complex constant at the rtl level. */
4641 if (COMPLEX_MODE_P (outermode
))
4644 /* Unpack the value. */
4646 if (GET_CODE (op
) == CONST_VECTOR
)
4648 num_elem
= CONST_VECTOR_NUNITS (op
);
4649 elems
= &CONST_VECTOR_ELT (op
, 0);
4650 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
4656 elem_bitsize
= max_bitsize
;
4658 /* If this asserts, it is too complicated; reducing value_bit may help. */
4659 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
4660 /* I don't know how to handle endianness of sub-units. */
4661 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
4663 for (elem
= 0; elem
< num_elem
; elem
++)
4666 rtx el
= elems
[elem
];
4668 /* Vectors are kept in target memory order. (This is probably
4671 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4672 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4674 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4675 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4676 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4677 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4678 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4681 switch (GET_CODE (el
))
4685 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4687 *vp
++ = INTVAL (el
) >> i
;
4688 /* CONST_INTs are always logically sign-extended. */
4689 for (; i
< elem_bitsize
; i
+= value_bit
)
4690 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
4694 if (GET_MODE (el
) == VOIDmode
)
4696 /* If this triggers, someone should have generated a
4697 CONST_INT instead. */
4698 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
4700 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4701 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
4702 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
4705 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
4708 /* It shouldn't matter what's done here, so fill it with
4710 for (; i
< elem_bitsize
; i
+= value_bit
)
4715 long tmp
[max_bitsize
/ 32];
4716 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
4718 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
4719 gcc_assert (bitsize
<= elem_bitsize
);
4720 gcc_assert (bitsize
% value_bit
== 0);
4722 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
4725 /* real_to_target produces its result in words affected by
4726 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4727 and use WORDS_BIG_ENDIAN instead; see the documentation
4728 of SUBREG in rtl.texi. */
4729 for (i
= 0; i
< bitsize
; i
+= value_bit
)
4732 if (WORDS_BIG_ENDIAN
)
4733 ibase
= bitsize
- 1 - i
;
4736 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
4739 /* It shouldn't matter what's done here, so fill it with
4741 for (; i
< elem_bitsize
; i
+= value_bit
)
4747 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4749 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4750 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
4754 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4755 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
4756 for (; i
< 2 * HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4758 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
4759 >> (i
- HOST_BITS_PER_WIDE_INT
);
4760 for (; i
< elem_bitsize
; i
+= value_bit
)
4770 /* Now, pick the right byte to start with. */
4771 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4772 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4773 will already have offset 0. */
4774 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
4776 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
4778 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4779 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4780 byte
= (subword_byte
% UNITS_PER_WORD
4781 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4784 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4785 so if it's become negative it will instead be very large.) */
4786 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4788 /* Convert from bytes to chunks of size value_bit. */
4789 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
4791 /* Re-pack the value. */
4793 if (VECTOR_MODE_P (outermode
))
4795 num_elem
= GET_MODE_NUNITS (outermode
);
4796 result_v
= rtvec_alloc (num_elem
);
4797 elems
= &RTVEC_ELT (result_v
, 0);
4798 outer_submode
= GET_MODE_INNER (outermode
);
4804 outer_submode
= outermode
;
4807 outer_class
= GET_MODE_CLASS (outer_submode
);
4808 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
4810 gcc_assert (elem_bitsize
% value_bit
== 0);
4811 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
4813 for (elem
= 0; elem
< num_elem
; elem
++)
4817 /* Vectors are stored in target memory order. (This is probably
4820 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4821 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4823 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4824 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4825 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4826 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4827 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4830 switch (outer_class
)
4833 case MODE_PARTIAL_INT
:
4835 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
4838 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4840 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
4841 for (; i
< elem_bitsize
; i
+= value_bit
)
4842 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
4843 << (i
- HOST_BITS_PER_WIDE_INT
));
4845 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4847 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4848 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
4849 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
4850 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
4857 case MODE_DECIMAL_FLOAT
:
4860 long tmp
[max_bitsize
/ 32];
4862 /* real_from_target wants its input in words affected by
4863 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4864 and use WORDS_BIG_ENDIAN instead; see the documentation
4865 of SUBREG in rtl.texi. */
4866 for (i
= 0; i
< max_bitsize
/ 32; i
++)
4868 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4871 if (WORDS_BIG_ENDIAN
)
4872 ibase
= elem_bitsize
- 1 - i
;
4875 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
4878 real_from_target (&r
, tmp
, outer_submode
);
4879 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
4891 f
.mode
= outer_submode
;
4894 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4896 f
.data
.low
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
4897 for (; i
< elem_bitsize
; i
+= value_bit
)
4898 f
.data
.high
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
4899 << (i
- HOST_BITS_PER_WIDE_INT
));
4901 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
4909 if (VECTOR_MODE_P (outermode
))
4910 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
4915 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4916 Return 0 if no simplifications are possible. */
4918 simplify_subreg (enum machine_mode outermode
, rtx op
,
4919 enum machine_mode innermode
, unsigned int byte
)
4921 /* Little bit of sanity checking. */
4922 gcc_assert (innermode
!= VOIDmode
);
4923 gcc_assert (outermode
!= VOIDmode
);
4924 gcc_assert (innermode
!= BLKmode
);
4925 gcc_assert (outermode
!= BLKmode
);
4927 gcc_assert (GET_MODE (op
) == innermode
4928 || GET_MODE (op
) == VOIDmode
);
4930 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
4931 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4933 if (outermode
== innermode
&& !byte
)
4936 if (GET_CODE (op
) == CONST_INT
4937 || GET_CODE (op
) == CONST_DOUBLE
4938 || GET_CODE (op
) == CONST_FIXED
4939 || GET_CODE (op
) == CONST_VECTOR
)
4940 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
4942 /* Changing mode twice with SUBREG => just change it once,
4943 or not at all if changing back op starting mode. */
4944 if (GET_CODE (op
) == SUBREG
)
4946 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
4947 int final_offset
= byte
+ SUBREG_BYTE (op
);
4950 if (outermode
== innermostmode
4951 && byte
== 0 && SUBREG_BYTE (op
) == 0)
4952 return SUBREG_REG (op
);
4954 /* The SUBREG_BYTE represents offset, as if the value were stored
4955 in memory. Irritating exception is paradoxical subreg, where
4956 we define SUBREG_BYTE to be 0. On big endian machines, this
4957 value should be negative. For a moment, undo this exception. */
4958 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
4960 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
4961 if (WORDS_BIG_ENDIAN
)
4962 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4963 if (BYTES_BIG_ENDIAN
)
4964 final_offset
+= difference
% UNITS_PER_WORD
;
4966 if (SUBREG_BYTE (op
) == 0
4967 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
4969 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
4970 if (WORDS_BIG_ENDIAN
)
4971 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4972 if (BYTES_BIG_ENDIAN
)
4973 final_offset
+= difference
% UNITS_PER_WORD
;
4976 /* See whether resulting subreg will be paradoxical. */
4977 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
4979 /* In nonparadoxical subregs we can't handle negative offsets. */
4980 if (final_offset
< 0)
4982 /* Bail out in case resulting subreg would be incorrect. */
4983 if (final_offset
% GET_MODE_SIZE (outermode
)
4984 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
4990 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
4992 /* In paradoxical subreg, see if we are still looking on lower part.
4993 If so, our SUBREG_BYTE will be 0. */
4994 if (WORDS_BIG_ENDIAN
)
4995 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4996 if (BYTES_BIG_ENDIAN
)
4997 offset
+= difference
% UNITS_PER_WORD
;
4998 if (offset
== final_offset
)
5004 /* Recurse for further possible simplifications. */
5005 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5009 if (validate_subreg (outermode
, innermostmode
,
5010 SUBREG_REG (op
), final_offset
))
5011 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5015 /* Merge implicit and explicit truncations. */
5017 if (GET_CODE (op
) == TRUNCATE
5018 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
5019 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
5020 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
5021 GET_MODE (XEXP (op
, 0)));
5023 /* SUBREG of a hard register => just change the register number
5024 and/or mode. If the hard register is not valid in that mode,
5025 suppress this simplification. If the hard register is the stack,
5026 frame, or argument pointer, leave this as a SUBREG. */
5029 && REGNO (op
) < FIRST_PSEUDO_REGISTER
5030 #ifdef CANNOT_CHANGE_MODE_CLASS
5031 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
5032 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
5033 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
5035 && ((reload_completed
&& !frame_pointer_needed
)
5036 || (REGNO (op
) != FRAME_POINTER_REGNUM
5037 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
5038 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
5041 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
5042 && REGNO (op
) != ARG_POINTER_REGNUM
5044 && REGNO (op
) != STACK_POINTER_REGNUM
5045 && subreg_offset_representable_p (REGNO (op
), innermode
,
5048 unsigned int regno
= REGNO (op
);
5049 unsigned int final_regno
5050 = regno
+ subreg_regno_offset (regno
, innermode
, byte
, outermode
);
5052 /* ??? We do allow it if the current REG is not valid for
5053 its mode. This is a kludge to work around how float/complex
5054 arguments are passed on 32-bit SPARC and should be fixed. */
5055 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
5056 || ! HARD_REGNO_MODE_OK (regno
, innermode
))
5059 int final_offset
= byte
;
5061 /* Adjust offset for paradoxical subregs. */
5063 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5065 int difference
= (GET_MODE_SIZE (innermode
)
5066 - GET_MODE_SIZE (outermode
));
5067 if (WORDS_BIG_ENDIAN
)
5068 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5069 if (BYTES_BIG_ENDIAN
)
5070 final_offset
+= difference
% UNITS_PER_WORD
;
5073 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5075 /* Propagate original regno. We don't have any way to specify
5076 the offset inside original regno, so do so only for lowpart.
5077 The information is used only by alias analysis that can not
5078 grog partial register anyway. */
5080 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5081 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5086 /* If we have a SUBREG of a register that we are replacing and we are
5087 replacing it with a MEM, make a new MEM and try replacing the
5088 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5089 or if we would be widening it. */
5092 && ! mode_dependent_address_p (XEXP (op
, 0))
5093 /* Allow splitting of volatile memory references in case we don't
5094 have instruction to move the whole thing. */
5095 && (! MEM_VOLATILE_P (op
)
5096 || ! have_insn_for (SET
, innermode
))
5097 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5098 return adjust_address_nv (op
, outermode
, byte
);
5100 /* Handle complex values represented as CONCAT
5101 of real and imaginary part. */
5102 if (GET_CODE (op
) == CONCAT
)
5104 unsigned int part_size
, final_offset
;
5107 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5108 if (byte
< part_size
)
5110 part
= XEXP (op
, 0);
5111 final_offset
= byte
;
5115 part
= XEXP (op
, 1);
5116 final_offset
= byte
- part_size
;
5119 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5122 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5125 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5126 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5130 /* Optimize SUBREG truncations of zero and sign extended values. */
5131 if ((GET_CODE (op
) == ZERO_EXTEND
5132 || GET_CODE (op
) == SIGN_EXTEND
)
5133 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
5135 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5137 /* If we're requesting the lowpart of a zero or sign extension,
5138 there are three possibilities. If the outermode is the same
5139 as the origmode, we can omit both the extension and the subreg.
5140 If the outermode is not larger than the origmode, we can apply
5141 the truncation without the extension. Finally, if the outermode
5142 is larger than the origmode, but both are integer modes, we
5143 can just extend to the appropriate mode. */
5146 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
5147 if (outermode
== origmode
)
5148 return XEXP (op
, 0);
5149 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
5150 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
5151 subreg_lowpart_offset (outermode
,
5153 if (SCALAR_INT_MODE_P (outermode
))
5154 return simplify_gen_unary (GET_CODE (op
), outermode
,
5155 XEXP (op
, 0), origmode
);
5158 /* A SUBREG resulting from a zero extension may fold to zero if
5159 it extracts higher bits that the ZERO_EXTEND's source bits. */
5160 if (GET_CODE (op
) == ZERO_EXTEND
5161 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
5162 return CONST0_RTX (outermode
);
5165 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5166 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5167 the outer subreg is effectively a truncation to the original mode. */
5168 if ((GET_CODE (op
) == LSHIFTRT
5169 || GET_CODE (op
) == ASHIFTRT
)
5170 && SCALAR_INT_MODE_P (outermode
)
5171 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5172 to avoid the possibility that an outer LSHIFTRT shifts by more
5173 than the sign extension's sign_bit_copies and introduces zeros
5174 into the high bits of the result. */
5175 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
5176 && GET_CODE (XEXP (op
, 1)) == CONST_INT
5177 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
5178 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5179 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5180 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5181 return simplify_gen_binary (ASHIFTRT
, outermode
,
5182 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5184 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5185 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5186 the outer subreg is effectively a truncation to the original mode. */
5187 if ((GET_CODE (op
) == LSHIFTRT
5188 || GET_CODE (op
) == ASHIFTRT
)
5189 && SCALAR_INT_MODE_P (outermode
)
5190 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5191 && GET_CODE (XEXP (op
, 1)) == CONST_INT
5192 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5193 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5194 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5195 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5196 return simplify_gen_binary (LSHIFTRT
, outermode
,
5197 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5199 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5200 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5201 the outer subreg is effectively a truncation to the original mode. */
5202 if (GET_CODE (op
) == ASHIFT
5203 && SCALAR_INT_MODE_P (outermode
)
5204 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5205 && GET_CODE (XEXP (op
, 1)) == CONST_INT
5206 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5207 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
5208 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5209 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5210 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5211 return simplify_gen_binary (ASHIFT
, outermode
,
5212 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5217 /* Make a SUBREG operation or equivalent if it folds. */
5220 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5221 enum machine_mode innermode
, unsigned int byte
)
5225 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5229 if (GET_CODE (op
) == SUBREG
5230 || GET_CODE (op
) == CONCAT
5231 || GET_MODE (op
) == VOIDmode
)
5234 if (validate_subreg (outermode
, innermode
, op
, byte
))
5235 return gen_rtx_SUBREG (outermode
, op
, byte
);
5240 /* Simplify X, an rtx expression.
5242 Return the simplified expression or NULL if no simplifications
5245 This is the preferred entry point into the simplification routines;
5246 however, we still allow passes to call the more specific routines.
5248 Right now GCC has three (yes, three) major bodies of RTL simplification
5249 code that need to be unified.
5251 1. fold_rtx in cse.c. This code uses various CSE specific
5252 information to aid in RTL simplification.
5254 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5255 it uses combine specific information to aid in RTL
5258 3. The routines in this file.
5261 Long term we want to only have one body of simplification code; to
5262 get to that state I recommend the following steps:
5264 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5265 which are not pass dependent state into these routines.
5267 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5268 use this routine whenever possible.
5270 3. Allow for pass dependent state to be provided to these
5271 routines and add simplifications based on the pass dependent
5272 state. Remove code from cse.c & combine.c that becomes
5275 It will take time, but ultimately the compiler will be easier to
5276 maintain and improve. It's totally silly that when we add a
5277 simplification that it needs to be added to 4 places (3 for RTL
5278 simplification and 1 for tree simplification. */
5281 simplify_rtx (const_rtx x
)
5283 const enum rtx_code code
= GET_CODE (x
);
5284 const enum machine_mode mode
= GET_MODE (x
);
5286 switch (GET_RTX_CLASS (code
))
5289 return simplify_unary_operation (code
, mode
,
5290 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5291 case RTX_COMM_ARITH
:
5292 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5293 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5295 /* Fall through.... */
5298 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5301 case RTX_BITFIELD_OPS
:
5302 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5303 XEXP (x
, 0), XEXP (x
, 1),
5307 case RTX_COMM_COMPARE
:
5308 return simplify_relational_operation (code
, mode
,
5309 ((GET_MODE (XEXP (x
, 0))
5311 ? GET_MODE (XEXP (x
, 0))
5312 : GET_MODE (XEXP (x
, 1))),
5318 return simplify_subreg (mode
, SUBREG_REG (x
),
5319 GET_MODE (SUBREG_REG (x
)),
5326 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5327 if (GET_CODE (XEXP (x
, 0)) == HIGH
5328 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))