1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
53 static bool plus_minus_operand_p (const_rtx
);
54 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
61 enum machine_mode
, rtx
, rtx
);
62 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
63 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode
, const_rtx i
)
71 return gen_int_mode (- INTVAL (i
), mode
);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
80 unsigned HOST_WIDE_INT val
;
83 if (GET_MODE_CLASS (mode
) != MODE_INT
)
86 width
= GET_MODE_BITSIZE (mode
);
90 if (width
<= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x
) == CONST_INT
)
93 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x
) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x
) == 0)
97 val
= CONST_DOUBLE_HIGH (x
);
98 width
-= HOST_BITS_PER_WIDE_INT
;
103 if (width
< HOST_BITS_PER_WIDE_INT
)
104 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
105 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
117 /* If this simplifies, do it. */
118 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0
, op1
))
125 tem
= op0
, op0
= op1
, op1
= tem
;
127 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x
)
136 enum machine_mode cmode
;
137 HOST_WIDE_INT offset
= 0;
139 switch (GET_CODE (x
))
145 /* Handle float extensions of constant pool references. */
147 c
= avoid_constant_pool_reference (tmp
);
148 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
152 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
161 if (GET_MODE (x
) == BLKmode
)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr
= targetm
.delegitimize_address (addr
);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr
) == CONST
171 && GET_CODE (XEXP (addr
, 0)) == PLUS
172 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST_INT
)
174 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
175 addr
= XEXP (XEXP (addr
, 0), 0);
178 if (GET_CODE (addr
) == LO_SUM
)
179 addr
= XEXP (addr
, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr
) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr
))
186 c
= get_pool_constant (addr
);
187 cmode
= get_pool_mode (addr
);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset
!= 0 || cmode
!= GET_MODE (x
))
194 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
195 if (tem
&& CONSTANT_P (tem
))
205 /* Make a unary operation by first seeing if it folds and otherwise making
206 the specified operation. */
209 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
210 enum machine_mode op_mode
)
214 /* If this simplifies, use it. */
215 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
218 return gen_rtx_fmt_e (code
, mode
, op
);
221 /* Likewise for ternary operations. */
224 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
225 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
229 /* If this simplifies, use it. */
230 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
234 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
237 /* Likewise, for relational operations.
238 CMP_MODE specifies mode comparison is done in. */
241 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
242 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
246 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
250 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
253 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
257 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
259 enum rtx_code code
= GET_CODE (x
);
260 enum machine_mode mode
= GET_MODE (x
);
261 enum machine_mode op_mode
;
264 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
265 to build a new expression substituting recursively. If we can't do
266 anything, return our input. */
271 switch (GET_RTX_CLASS (code
))
275 op_mode
= GET_MODE (op0
);
276 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
277 if (op0
== XEXP (x
, 0))
279 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
283 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
284 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
285 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
287 return simplify_gen_binary (code
, mode
, op0
, op1
);
290 case RTX_COMM_COMPARE
:
293 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
294 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
295 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
296 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
298 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
301 case RTX_BITFIELD_OPS
:
303 op_mode
= GET_MODE (op0
);
304 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
305 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
306 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
307 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
309 if (op_mode
== VOIDmode
)
310 op_mode
= GET_MODE (op0
);
311 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
314 /* The only case we try to handle is a SUBREG. */
317 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
318 if (op0
== SUBREG_REG (x
))
320 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
321 GET_MODE (SUBREG_REG (x
)),
323 return op0
? op0
: x
;
330 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
331 if (op0
== XEXP (x
, 0))
333 return replace_equiv_address_nv (x
, op0
);
335 else if (code
== LO_SUM
)
337 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
338 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
340 /* (lo_sum (high x) x) -> x */
341 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
344 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
346 return gen_rtx_LO_SUM (mode
, op0
, op1
);
348 else if (code
== REG
)
350 if (rtx_equal_p (x
, old_rtx
))
361 /* Try to simplify a unary operation CODE whose output mode is to be
362 MODE with input operand OP whose mode was originally OP_MODE.
363 Return zero if no simplification can be made. */
365 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
366 rtx op
, enum machine_mode op_mode
)
370 if (GET_CODE (op
) == CONST
)
373 trueop
= avoid_constant_pool_reference (op
);
375 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
379 return simplify_unary_operation_1 (code
, mode
, op
);
382 /* Perform some simplifications we can do even if the operands
385 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
387 enum rtx_code reversed
;
393 /* (not (not X)) == X. */
394 if (GET_CODE (op
) == NOT
)
397 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
398 comparison is all ones. */
399 if (COMPARISON_P (op
)
400 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
401 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
402 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
403 XEXP (op
, 0), XEXP (op
, 1));
405 /* (not (plus X -1)) can become (neg X). */
406 if (GET_CODE (op
) == PLUS
407 && XEXP (op
, 1) == constm1_rtx
)
408 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
410 /* Similarly, (not (neg X)) is (plus X -1). */
411 if (GET_CODE (op
) == NEG
)
412 return plus_constant (XEXP (op
, 0), -1);
414 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
415 if (GET_CODE (op
) == XOR
416 && GET_CODE (XEXP (op
, 1)) == CONST_INT
417 && (temp
= simplify_unary_operation (NOT
, mode
,
418 XEXP (op
, 1), mode
)) != 0)
419 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
421 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
422 if (GET_CODE (op
) == PLUS
423 && GET_CODE (XEXP (op
, 1)) == CONST_INT
424 && mode_signbit_p (mode
, XEXP (op
, 1))
425 && (temp
= simplify_unary_operation (NOT
, mode
,
426 XEXP (op
, 1), mode
)) != 0)
427 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
430 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
431 operands other than 1, but that is not valid. We could do a
432 similar simplification for (not (lshiftrt C X)) where C is
433 just the sign bit, but this doesn't seem common enough to
435 if (GET_CODE (op
) == ASHIFT
436 && XEXP (op
, 0) == const1_rtx
)
438 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
439 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
446 if (STORE_FLAG_VALUE
== -1
447 && GET_CODE (op
) == ASHIFTRT
448 && GET_CODE (XEXP (op
, 1)) == CONST_INT
449 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
450 return simplify_gen_relational (GE
, mode
, VOIDmode
,
451 XEXP (op
, 0), const0_rtx
);
454 if (GET_CODE (op
) == SUBREG
455 && subreg_lowpart_p (op
)
456 && (GET_MODE_SIZE (GET_MODE (op
))
457 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
458 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
459 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
461 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
464 x
= gen_rtx_ROTATE (inner_mode
,
465 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
467 XEXP (SUBREG_REG (op
), 1));
468 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
471 /* Apply De Morgan's laws to reduce number of patterns for machines
472 with negating logical insns (and-not, nand, etc.). If result has
473 only one NOT, put it first, since that is how the patterns are
476 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
478 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
479 enum machine_mode op_mode
;
481 op_mode
= GET_MODE (in1
);
482 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
484 op_mode
= GET_MODE (in2
);
485 if (op_mode
== VOIDmode
)
487 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
489 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
492 in2
= in1
; in1
= tem
;
495 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
501 /* (neg (neg X)) == X. */
502 if (GET_CODE (op
) == NEG
)
505 /* (neg (plus X 1)) can become (not X). */
506 if (GET_CODE (op
) == PLUS
507 && XEXP (op
, 1) == const1_rtx
)
508 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
510 /* Similarly, (neg (not X)) is (plus X 1). */
511 if (GET_CODE (op
) == NOT
)
512 return plus_constant (XEXP (op
, 0), 1);
514 /* (neg (minus X Y)) can become (minus Y X). This transformation
515 isn't safe for modes with signed zeros, since if X and Y are
516 both +0, (minus Y X) is the same as (minus X Y). If the
517 rounding mode is towards +infinity (or -infinity) then the two
518 expressions will be rounded differently. */
519 if (GET_CODE (op
) == MINUS
520 && !HONOR_SIGNED_ZEROS (mode
)
521 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
522 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
524 if (GET_CODE (op
) == PLUS
525 && !HONOR_SIGNED_ZEROS (mode
)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
528 /* (neg (plus A C)) is simplified to (minus -C A). */
529 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
530 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
532 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
534 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
537 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
538 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
539 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
542 /* (neg (mult A B)) becomes (mult (neg A) B).
543 This works even for floating-point values. */
544 if (GET_CODE (op
) == MULT
545 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
547 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
548 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
551 /* NEG commutes with ASHIFT since it is multiplication. Only do
552 this if we can then eliminate the NEG (e.g., if the operand
554 if (GET_CODE (op
) == ASHIFT
)
556 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
558 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
561 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
562 C is equal to the width of MODE minus 1. */
563 if (GET_CODE (op
) == ASHIFTRT
564 && GET_CODE (XEXP (op
, 1)) == CONST_INT
565 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
566 return simplify_gen_binary (LSHIFTRT
, mode
,
567 XEXP (op
, 0), XEXP (op
, 1));
569 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
570 C is equal to the width of MODE minus 1. */
571 if (GET_CODE (op
) == LSHIFTRT
572 && GET_CODE (XEXP (op
, 1)) == CONST_INT
573 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
574 return simplify_gen_binary (ASHIFTRT
, mode
,
575 XEXP (op
, 0), XEXP (op
, 1));
577 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
578 if (GET_CODE (op
) == XOR
579 && XEXP (op
, 1) == const1_rtx
580 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
581 return plus_constant (XEXP (op
, 0), -1);
583 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
584 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
585 if (GET_CODE (op
) == LT
586 && XEXP (op
, 1) == const0_rtx
587 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
589 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
590 int isize
= GET_MODE_BITSIZE (inner
);
591 if (STORE_FLAG_VALUE
== 1)
593 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
594 GEN_INT (isize
- 1));
597 if (GET_MODE_BITSIZE (mode
) > isize
)
598 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
599 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
601 else if (STORE_FLAG_VALUE
== -1)
603 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
604 GEN_INT (isize
- 1));
607 if (GET_MODE_BITSIZE (mode
) > isize
)
608 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
609 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
615 /* We can't handle truncation to a partial integer mode here
616 because we don't know the real bitsize of the partial
618 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
622 if ((GET_CODE (op
) == SIGN_EXTEND
623 || GET_CODE (op
) == ZERO_EXTEND
)
624 && GET_MODE (XEXP (op
, 0)) == mode
)
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
628 (OP:SI foo:SI) if OP is NEG or ABS. */
629 if ((GET_CODE (op
) == ABS
630 || GET_CODE (op
) == NEG
)
631 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
632 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
633 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
634 return simplify_gen_unary (GET_CODE (op
), mode
,
635 XEXP (XEXP (op
, 0), 0), mode
);
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is
639 if (GET_CODE (op
) == SUBREG
640 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
641 && subreg_lowpart_p (op
))
642 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
643 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
645 /* If we know that the value is already truncated, we can
646 replace the TRUNCATE with a SUBREG. Note that this is also
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
648 modes we just have to apply a different definition for
649 truncation. But don't do this for an (LSHIFTRT (MULT ...))
650 since this will cause problems with the umulXi3_highpart
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
653 GET_MODE_BITSIZE (GET_MODE (op
)))
654 ? (num_sign_bit_copies (op
, GET_MODE (op
))
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op
))
656 - GET_MODE_BITSIZE (mode
)))
657 : truncated_to_mode (mode
, op
))
658 && ! (GET_CODE (op
) == LSHIFTRT
659 && GET_CODE (XEXP (op
, 0)) == MULT
))
660 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
662 /* A truncate of a comparison can be replaced with a subreg if
663 STORE_FLAG_VALUE permits. This is like the previous test,
664 but it works even if the comparison is done in a mode larger
665 than HOST_BITS_PER_WIDE_INT. */
666 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
668 && ((HOST_WIDE_INT
) STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
669 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
673 if (DECIMAL_FLOAT_MODE_P (mode
))
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
677 if (GET_CODE (op
) == FLOAT_EXTEND
678 && GET_MODE (XEXP (op
, 0)) == mode
)
681 /* (float_truncate:SF (float_truncate:DF foo:XF))
682 = (float_truncate:SF foo:XF).
683 This may eliminate double rounding, so it is unsafe.
685 (float_truncate:SF (float_extend:XF foo:DF))
686 = (float_truncate:SF foo:DF).
688 (float_truncate:DF (float_extend:XF foo:SF))
689 = (float_extend:SF foo:DF). */
690 if ((GET_CODE (op
) == FLOAT_TRUNCATE
691 && flag_unsafe_math_optimizations
)
692 || GET_CODE (op
) == FLOAT_EXTEND
)
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
695 > GET_MODE_SIZE (mode
)
696 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
700 /* (float_truncate (float x)) is (float x) */
701 if (GET_CODE (op
) == FLOAT
702 && (flag_unsafe_math_optimizations
703 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
704 && ((unsigned)significand_size (GET_MODE (op
))
705 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
706 - num_sign_bit_copies (XEXP (op
, 0),
707 GET_MODE (XEXP (op
, 0))))))))
708 return simplify_gen_unary (FLOAT
, mode
,
710 GET_MODE (XEXP (op
, 0)));
712 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
713 (OP:SF foo:SF) if OP is NEG or ABS. */
714 if ((GET_CODE (op
) == ABS
715 || GET_CODE (op
) == NEG
)
716 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
717 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
718 return simplify_gen_unary (GET_CODE (op
), mode
,
719 XEXP (XEXP (op
, 0), 0), mode
);
721 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
722 is (float_truncate:SF x). */
723 if (GET_CODE (op
) == SUBREG
724 && subreg_lowpart_p (op
)
725 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
726 return SUBREG_REG (op
);
730 if (DECIMAL_FLOAT_MODE_P (mode
))
733 /* (float_extend (float_extend x)) is (float_extend x)
735 (float_extend (float x)) is (float x) assuming that double
736 rounding can't happen.
738 if (GET_CODE (op
) == FLOAT_EXTEND
739 || (GET_CODE (op
) == FLOAT
740 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
741 && ((unsigned)significand_size (GET_MODE (op
))
742 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
743 - num_sign_bit_copies (XEXP (op
, 0),
744 GET_MODE (XEXP (op
, 0)))))))
745 return simplify_gen_unary (GET_CODE (op
), mode
,
747 GET_MODE (XEXP (op
, 0)));
752 /* (abs (neg <foo>)) -> (abs <foo>) */
753 if (GET_CODE (op
) == NEG
)
754 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
755 GET_MODE (XEXP (op
, 0)));
757 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
759 if (GET_MODE (op
) == VOIDmode
)
762 /* If operand is something known to be positive, ignore the ABS. */
763 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
764 || ((GET_MODE_BITSIZE (GET_MODE (op
))
765 <= HOST_BITS_PER_WIDE_INT
)
766 && ((nonzero_bits (op
, GET_MODE (op
))
768 << (GET_MODE_BITSIZE (GET_MODE (op
)) - 1)))
772 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
773 if (num_sign_bit_copies (op
, mode
) == GET_MODE_BITSIZE (mode
))
774 return gen_rtx_NEG (mode
, op
);
779 /* (ffs (*_extend <X>)) = (ffs <X>) */
780 if (GET_CODE (op
) == SIGN_EXTEND
781 || GET_CODE (op
) == ZERO_EXTEND
)
782 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
783 GET_MODE (XEXP (op
, 0)));
787 switch (GET_CODE (op
))
791 /* (popcount (zero_extend <X>)) = (popcount <X>) */
792 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
793 GET_MODE (XEXP (op
, 0)));
797 /* Rotations don't affect popcount. */
798 if (!side_effects_p (XEXP (op
, 1)))
799 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
800 GET_MODE (XEXP (op
, 0)));
809 switch (GET_CODE (op
))
815 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
816 GET_MODE (XEXP (op
, 0)));
820 /* Rotations don't affect parity. */
821 if (!side_effects_p (XEXP (op
, 1)))
822 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
823 GET_MODE (XEXP (op
, 0)));
832 /* (bswap (bswap x)) -> x. */
833 if (GET_CODE (op
) == BSWAP
)
838 /* (float (sign_extend <X>)) = (float <X>). */
839 if (GET_CODE (op
) == SIGN_EXTEND
)
840 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
841 GET_MODE (XEXP (op
, 0)));
845 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
846 becomes just the MINUS if its mode is MODE. This allows
847 folding switch statements on machines using casesi (such as
849 if (GET_CODE (op
) == TRUNCATE
850 && GET_MODE (XEXP (op
, 0)) == mode
851 && GET_CODE (XEXP (op
, 0)) == MINUS
852 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
853 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
856 /* Check for a sign extension of a subreg of a promoted
857 variable, where the promotion is sign-extended, and the
858 target mode is the same as the variable's promotion. */
859 if (GET_CODE (op
) == SUBREG
860 && SUBREG_PROMOTED_VAR_P (op
)
861 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
862 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
863 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
865 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
866 if (! POINTERS_EXTEND_UNSIGNED
867 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
869 || (GET_CODE (op
) == SUBREG
870 && REG_P (SUBREG_REG (op
))
871 && REG_POINTER (SUBREG_REG (op
))
872 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
873 return convert_memory_address (Pmode
, op
);
878 /* Check for a zero extension of a subreg of a promoted
879 variable, where the promotion is zero-extended, and the
880 target mode is the same as the variable's promotion. */
881 if (GET_CODE (op
) == SUBREG
882 && SUBREG_PROMOTED_VAR_P (op
)
883 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
884 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
885 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
887 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
888 if (POINTERS_EXTEND_UNSIGNED
> 0
889 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
891 || (GET_CODE (op
) == SUBREG
892 && REG_P (SUBREG_REG (op
))
893 && REG_POINTER (SUBREG_REG (op
))
894 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
895 return convert_memory_address (Pmode
, op
);
906 /* Try to compute the value of a unary operation CODE whose output mode is to
907 be MODE with input operand OP whose mode was originally OP_MODE.
908 Return zero if the value cannot be computed. */
910 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
911 rtx op
, enum machine_mode op_mode
)
913 unsigned int width
= GET_MODE_BITSIZE (mode
);
915 if (code
== VEC_DUPLICATE
)
917 gcc_assert (VECTOR_MODE_P (mode
));
918 if (GET_MODE (op
) != VOIDmode
)
920 if (!VECTOR_MODE_P (GET_MODE (op
)))
921 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
923 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
926 if (GET_CODE (op
) == CONST_INT
|| GET_CODE (op
) == CONST_DOUBLE
927 || GET_CODE (op
) == CONST_VECTOR
)
929 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
930 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
931 rtvec v
= rtvec_alloc (n_elts
);
934 if (GET_CODE (op
) != CONST_VECTOR
)
935 for (i
= 0; i
< n_elts
; i
++)
936 RTVEC_ELT (v
, i
) = op
;
939 enum machine_mode inmode
= GET_MODE (op
);
940 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
941 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
943 gcc_assert (in_n_elts
< n_elts
);
944 gcc_assert ((n_elts
% in_n_elts
) == 0);
945 for (i
= 0; i
< n_elts
; i
++)
946 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
948 return gen_rtx_CONST_VECTOR (mode
, v
);
952 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
954 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
955 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
956 enum machine_mode opmode
= GET_MODE (op
);
957 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
958 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
959 rtvec v
= rtvec_alloc (n_elts
);
962 gcc_assert (op_n_elts
== n_elts
);
963 for (i
= 0; i
< n_elts
; i
++)
965 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
966 CONST_VECTOR_ELT (op
, i
),
967 GET_MODE_INNER (opmode
));
970 RTVEC_ELT (v
, i
) = x
;
972 return gen_rtx_CONST_VECTOR (mode
, v
);
975 /* The order of these tests is critical so that, for example, we don't
976 check the wrong mode (input vs. output) for a conversion operation,
977 such as FIX. At some point, this should be simplified. */
979 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
980 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
982 HOST_WIDE_INT hv
, lv
;
985 if (GET_CODE (op
) == CONST_INT
)
986 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
988 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
990 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
991 d
= real_value_truncate (mode
, d
);
992 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
994 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
995 && (GET_CODE (op
) == CONST_DOUBLE
996 || GET_CODE (op
) == CONST_INT
))
998 HOST_WIDE_INT hv
, lv
;
1001 if (GET_CODE (op
) == CONST_INT
)
1002 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1004 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1006 if (op_mode
== VOIDmode
)
1008 /* We don't know how to interpret negative-looking numbers in
1009 this case, so don't try to fold those. */
1013 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
1016 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1018 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1019 d
= real_value_truncate (mode
, d
);
1020 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1023 if (GET_CODE (op
) == CONST_INT
1024 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1026 HOST_WIDE_INT arg0
= INTVAL (op
);
1040 val
= (arg0
>= 0 ? arg0
: - arg0
);
1044 /* Don't use ffs here. Instead, get low order bit and then its
1045 number. If arg0 is zero, this will return 0, as desired. */
1046 arg0
&= GET_MODE_MASK (mode
);
1047 val
= exact_log2 (arg0
& (- arg0
)) + 1;
1051 arg0
&= GET_MODE_MASK (mode
);
1052 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1055 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
1059 arg0
&= GET_MODE_MASK (mode
);
1062 /* Even if the value at zero is undefined, we have to come
1063 up with some replacement. Seems good enough. */
1064 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1065 val
= GET_MODE_BITSIZE (mode
);
1068 val
= exact_log2 (arg0
& -arg0
);
1072 arg0
&= GET_MODE_MASK (mode
);
1075 val
++, arg0
&= arg0
- 1;
1079 arg0
&= GET_MODE_MASK (mode
);
1082 val
++, arg0
&= arg0
- 1;
1091 for (s
= 0; s
< width
; s
+= 8)
1093 unsigned int d
= width
- s
- 8;
1094 unsigned HOST_WIDE_INT byte
;
1095 byte
= (arg0
>> s
) & 0xff;
1106 /* When zero-extending a CONST_INT, we need to know its
1108 gcc_assert (op_mode
!= VOIDmode
);
1109 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1111 /* If we were really extending the mode,
1112 we would have to distinguish between zero-extension
1113 and sign-extension. */
1114 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1117 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1118 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1124 if (op_mode
== VOIDmode
)
1126 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1128 /* If we were really extending the mode,
1129 we would have to distinguish between zero-extension
1130 and sign-extension. */
1131 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1134 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1137 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1139 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
1140 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1148 case FLOAT_TRUNCATE
:
1159 return gen_int_mode (val
, mode
);
1162 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1163 for a DImode operation on a CONST_INT. */
1164 else if (GET_MODE (op
) == VOIDmode
1165 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1166 && (GET_CODE (op
) == CONST_DOUBLE
1167 || GET_CODE (op
) == CONST_INT
))
1169 unsigned HOST_WIDE_INT l1
, lv
;
1170 HOST_WIDE_INT h1
, hv
;
1172 if (GET_CODE (op
) == CONST_DOUBLE
)
1173 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1175 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1185 neg_double (l1
, h1
, &lv
, &hv
);
1190 neg_double (l1
, h1
, &lv
, &hv
);
1202 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
1205 lv
= exact_log2 (l1
& -l1
) + 1;
1211 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
1212 - HOST_BITS_PER_WIDE_INT
;
1214 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
1215 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1216 lv
= GET_MODE_BITSIZE (mode
);
1222 lv
= exact_log2 (l1
& -l1
);
1224 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
1225 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1226 lv
= GET_MODE_BITSIZE (mode
);
1254 for (s
= 0; s
< width
; s
+= 8)
1256 unsigned int d
= width
- s
- 8;
1257 unsigned HOST_WIDE_INT byte
;
1259 if (s
< HOST_BITS_PER_WIDE_INT
)
1260 byte
= (l1
>> s
) & 0xff;
1262 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1264 if (d
< HOST_BITS_PER_WIDE_INT
)
1267 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1273 /* This is just a change-of-mode, so do nothing. */
1278 gcc_assert (op_mode
!= VOIDmode
);
1280 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1284 lv
= l1
& GET_MODE_MASK (op_mode
);
1288 if (op_mode
== VOIDmode
1289 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1293 lv
= l1
& GET_MODE_MASK (op_mode
);
1294 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
1295 && (lv
& ((HOST_WIDE_INT
) 1
1296 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
1297 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1299 hv
= HWI_SIGN_EXTEND (lv
);
1310 return immed_double_const (lv
, hv
, mode
);
1313 else if (GET_CODE (op
) == CONST_DOUBLE
1314 && SCALAR_FLOAT_MODE_P (mode
))
1316 REAL_VALUE_TYPE d
, t
;
1317 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1322 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1324 real_sqrt (&t
, mode
, &d
);
1328 d
= REAL_VALUE_ABS (d
);
1331 d
= REAL_VALUE_NEGATE (d
);
1333 case FLOAT_TRUNCATE
:
1334 d
= real_value_truncate (mode
, d
);
1337 /* All this does is change the mode. */
1340 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1347 real_to_target (tmp
, &d
, GET_MODE (op
));
1348 for (i
= 0; i
< 4; i
++)
1350 real_from_target (&d
, tmp
, mode
);
1356 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1359 else if (GET_CODE (op
) == CONST_DOUBLE
1360 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1361 && GET_MODE_CLASS (mode
) == MODE_INT
1362 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1364 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1365 operators are intentionally left unspecified (to ease implementation
1366 by target backends), for consistency, this routine implements the
1367 same semantics for constant folding as used by the middle-end. */
1369 /* This was formerly used only for non-IEEE float.
1370 eggert@twinsun.com says it is safe for IEEE also. */
1371 HOST_WIDE_INT xh
, xl
, th
, tl
;
1372 REAL_VALUE_TYPE x
, t
;
1373 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1377 if (REAL_VALUE_ISNAN (x
))
1380 /* Test against the signed upper bound. */
1381 if (width
> HOST_BITS_PER_WIDE_INT
)
1383 th
= ((unsigned HOST_WIDE_INT
) 1
1384 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1390 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1392 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1393 if (REAL_VALUES_LESS (t
, x
))
1400 /* Test against the signed lower bound. */
1401 if (width
> HOST_BITS_PER_WIDE_INT
)
1403 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1409 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1411 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1412 if (REAL_VALUES_LESS (x
, t
))
1418 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1422 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1425 /* Test against the unsigned upper bound. */
1426 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1431 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1433 th
= ((unsigned HOST_WIDE_INT
) 1
1434 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1440 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1442 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1443 if (REAL_VALUES_LESS (t
, x
))
1450 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1456 return immed_double_const (xl
, xh
, mode
);
1462 /* Subroutine of simplify_binary_operation to simplify a commutative,
1463 associative binary operation CODE with result mode MODE, operating
1464 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1465 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1466 canonicalization is possible. */
1469 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1474 /* Linearize the operator to the left. */
1475 if (GET_CODE (op1
) == code
)
1477 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1478 if (GET_CODE (op0
) == code
)
1480 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1481 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1484 /* "a op (b op c)" becomes "(b op c) op a". */
1485 if (! swap_commutative_operands_p (op1
, op0
))
1486 return simplify_gen_binary (code
, mode
, op1
, op0
);
1493 if (GET_CODE (op0
) == code
)
1495 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1496 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1498 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1499 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1502 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1503 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1505 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1507 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1508 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1510 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1517 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1518 and OP1. Return 0 if no simplification is possible.
1520 Don't use this for relational operations such as EQ or LT.
1521 Use simplify_relational_operation instead. */
1523 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1526 rtx trueop0
, trueop1
;
1529 /* Relational operations don't work here. We must know the mode
1530 of the operands in order to do the comparison correctly.
1531 Assuming a full word can give incorrect results.
1532 Consider comparing 128 with -128 in QImode. */
1533 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1534 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1536 /* Make sure the constant is second. */
1537 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1538 && swap_commutative_operands_p (op0
, op1
))
1540 tem
= op0
, op0
= op1
, op1
= tem
;
1543 trueop0
= avoid_constant_pool_reference (op0
);
1544 trueop1
= avoid_constant_pool_reference (op1
);
1546 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1549 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1552 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1553 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1554 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1555 actual constants. */
1558 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1559 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1561 rtx tem
, reversed
, opleft
, opright
;
1563 unsigned int width
= GET_MODE_BITSIZE (mode
);
1565 /* Even if we can't compute a constant result,
1566 there are some cases worth simplifying. */
1571 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1572 when x is NaN, infinite, or finite and nonzero. They aren't
1573 when x is -0 and the rounding mode is not towards -infinity,
1574 since (-0) + 0 is then 0. */
1575 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1578 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1579 transformations are safe even for IEEE. */
1580 if (GET_CODE (op0
) == NEG
)
1581 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1582 else if (GET_CODE (op1
) == NEG
)
1583 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1585 /* (~a) + 1 -> -a */
1586 if (INTEGRAL_MODE_P (mode
)
1587 && GET_CODE (op0
) == NOT
1588 && trueop1
== const1_rtx
)
1589 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1591 /* Handle both-operands-constant cases. We can only add
1592 CONST_INTs to constants since the sum of relocatable symbols
1593 can't be handled by most assemblers. Don't add CONST_INT
1594 to CONST_INT since overflow won't be computed properly if wider
1595 than HOST_BITS_PER_WIDE_INT. */
1597 if ((GET_CODE (op0
) == CONST
1598 || GET_CODE (op0
) == SYMBOL_REF
1599 || GET_CODE (op0
) == LABEL_REF
)
1600 && GET_CODE (op1
) == CONST_INT
)
1601 return plus_constant (op0
, INTVAL (op1
));
1602 else if ((GET_CODE (op1
) == CONST
1603 || GET_CODE (op1
) == SYMBOL_REF
1604 || GET_CODE (op1
) == LABEL_REF
)
1605 && GET_CODE (op0
) == CONST_INT
)
1606 return plus_constant (op1
, INTVAL (op0
));
1608 /* See if this is something like X * C - X or vice versa or
1609 if the multiplication is written as a shift. If so, we can
1610 distribute and make a new multiply, shift, or maybe just
1611 have X (if C is 2 in the example above). But don't make
1612 something more expensive than we had before. */
1614 if (SCALAR_INT_MODE_P (mode
))
1616 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1617 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1618 rtx lhs
= op0
, rhs
= op1
;
1620 if (GET_CODE (lhs
) == NEG
)
1624 lhs
= XEXP (lhs
, 0);
1626 else if (GET_CODE (lhs
) == MULT
1627 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1629 coeff0l
= INTVAL (XEXP (lhs
, 1));
1630 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1631 lhs
= XEXP (lhs
, 0);
1633 else if (GET_CODE (lhs
) == ASHIFT
1634 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1635 && INTVAL (XEXP (lhs
, 1)) >= 0
1636 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1638 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1640 lhs
= XEXP (lhs
, 0);
1643 if (GET_CODE (rhs
) == NEG
)
1647 rhs
= XEXP (rhs
, 0);
1649 else if (GET_CODE (rhs
) == MULT
1650 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1652 coeff1l
= INTVAL (XEXP (rhs
, 1));
1653 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1654 rhs
= XEXP (rhs
, 0);
1656 else if (GET_CODE (rhs
) == ASHIFT
1657 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1658 && INTVAL (XEXP (rhs
, 1)) >= 0
1659 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1661 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1663 rhs
= XEXP (rhs
, 0);
1666 if (rtx_equal_p (lhs
, rhs
))
1668 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1670 unsigned HOST_WIDE_INT l
;
1672 bool speed
= optimize_function_for_speed_p (cfun
);
1674 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1675 coeff
= immed_double_const (l
, h
, mode
);
1677 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1678 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
1683 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1684 if ((GET_CODE (op1
) == CONST_INT
1685 || GET_CODE (op1
) == CONST_DOUBLE
)
1686 && GET_CODE (op0
) == XOR
1687 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1688 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1689 && mode_signbit_p (mode
, op1
))
1690 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1691 simplify_gen_binary (XOR
, mode
, op1
,
1694 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1695 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1696 && GET_CODE (op0
) == MULT
1697 && GET_CODE (XEXP (op0
, 0)) == NEG
)
1701 in1
= XEXP (XEXP (op0
, 0), 0);
1702 in2
= XEXP (op0
, 1);
1703 return simplify_gen_binary (MINUS
, mode
, op1
,
1704 simplify_gen_binary (MULT
, mode
,
1708 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1709 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1711 if (COMPARISON_P (op0
)
1712 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
1713 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
1714 && (reversed
= reversed_comparison (op0
, mode
)))
1716 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
1718 /* If one of the operands is a PLUS or a MINUS, see if we can
1719 simplify this by the associative law.
1720 Don't use the associative law for floating point.
1721 The inaccuracy makes it nonassociative,
1722 and subtle programs can break if operations are associated. */
1724 if (INTEGRAL_MODE_P (mode
)
1725 && (plus_minus_operand_p (op0
)
1726 || plus_minus_operand_p (op1
))
1727 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1730 /* Reassociate floating point addition only when the user
1731 specifies associative math operations. */
1732 if (FLOAT_MODE_P (mode
)
1733 && flag_associative_math
)
1735 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1743 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1744 using cc0, in which case we want to leave it as a COMPARE
1745 so we can distinguish it from a register-register-copy.
1747 In IEEE floating point, x-0 is not the same as x. */
1748 if (!(HONOR_SIGNED_ZEROS (mode
)
1749 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1750 && trueop1
== CONST0_RTX (mode
))
1754 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1755 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1756 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1757 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1759 rtx xop00
= XEXP (op0
, 0);
1760 rtx xop10
= XEXP (op1
, 0);
1763 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1765 if (REG_P (xop00
) && REG_P (xop10
)
1766 && GET_MODE (xop00
) == GET_MODE (xop10
)
1767 && REGNO (xop00
) == REGNO (xop10
)
1768 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1769 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1776 /* We can't assume x-x is 0 even with non-IEEE floating point,
1777 but since it is zero except in very strange circumstances, we
1778 will treat it as zero with -ffinite-math-only. */
1779 if (rtx_equal_p (trueop0
, trueop1
)
1780 && ! side_effects_p (op0
)
1781 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
1782 return CONST0_RTX (mode
);
1784 /* Change subtraction from zero into negation. (0 - x) is the
1785 same as -x when x is NaN, infinite, or finite and nonzero.
1786 But if the mode has signed zeros, and does not round towards
1787 -infinity, then 0 - 0 is 0, not -0. */
1788 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1789 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1791 /* (-1 - a) is ~a. */
1792 if (trueop0
== constm1_rtx
)
1793 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1795 /* Subtracting 0 has no effect unless the mode has signed zeros
1796 and supports rounding towards -infinity. In such a case,
1798 if (!(HONOR_SIGNED_ZEROS (mode
)
1799 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1800 && trueop1
== CONST0_RTX (mode
))
1803 /* See if this is something like X * C - X or vice versa or
1804 if the multiplication is written as a shift. If so, we can
1805 distribute and make a new multiply, shift, or maybe just
1806 have X (if C is 2 in the example above). But don't make
1807 something more expensive than we had before. */
1809 if (SCALAR_INT_MODE_P (mode
))
1811 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1812 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1813 rtx lhs
= op0
, rhs
= op1
;
1815 if (GET_CODE (lhs
) == NEG
)
1819 lhs
= XEXP (lhs
, 0);
1821 else if (GET_CODE (lhs
) == MULT
1822 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1824 coeff0l
= INTVAL (XEXP (lhs
, 1));
1825 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1826 lhs
= XEXP (lhs
, 0);
1828 else if (GET_CODE (lhs
) == ASHIFT
1829 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1830 && INTVAL (XEXP (lhs
, 1)) >= 0
1831 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1833 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1835 lhs
= XEXP (lhs
, 0);
1838 if (GET_CODE (rhs
) == NEG
)
1842 rhs
= XEXP (rhs
, 0);
1844 else if (GET_CODE (rhs
) == MULT
1845 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1847 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1848 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1849 rhs
= XEXP (rhs
, 0);
1851 else if (GET_CODE (rhs
) == ASHIFT
1852 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1853 && INTVAL (XEXP (rhs
, 1)) >= 0
1854 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1856 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
1858 rhs
= XEXP (rhs
, 0);
1861 if (rtx_equal_p (lhs
, rhs
))
1863 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1865 unsigned HOST_WIDE_INT l
;
1867 bool speed
= optimize_function_for_speed_p (cfun
);
1869 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
1870 coeff
= immed_double_const (l
, h
, mode
);
1872 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1873 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
1878 /* (a - (-b)) -> (a + b). True even for IEEE. */
1879 if (GET_CODE (op1
) == NEG
)
1880 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1882 /* (-x - c) may be simplified as (-c - x). */
1883 if (GET_CODE (op0
) == NEG
1884 && (GET_CODE (op1
) == CONST_INT
1885 || GET_CODE (op1
) == CONST_DOUBLE
))
1887 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1889 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1892 /* Don't let a relocatable value get a negative coeff. */
1893 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1894 return simplify_gen_binary (PLUS
, mode
,
1896 neg_const_int (mode
, op1
));
1898 /* (x - (x & y)) -> (x & ~y) */
1899 if (GET_CODE (op1
) == AND
)
1901 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1903 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1904 GET_MODE (XEXP (op1
, 1)));
1905 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1907 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1909 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1910 GET_MODE (XEXP (op1
, 0)));
1911 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1915 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1916 by reversing the comparison code if valid. */
1917 if (STORE_FLAG_VALUE
== 1
1918 && trueop0
== const1_rtx
1919 && COMPARISON_P (op1
)
1920 && (reversed
= reversed_comparison (op1
, mode
)))
1923 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1924 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1925 && GET_CODE (op1
) == MULT
1926 && GET_CODE (XEXP (op1
, 0)) == NEG
)
1930 in1
= XEXP (XEXP (op1
, 0), 0);
1931 in2
= XEXP (op1
, 1);
1932 return simplify_gen_binary (PLUS
, mode
,
1933 simplify_gen_binary (MULT
, mode
,
1938 /* Canonicalize (minus (neg A) (mult B C)) to
1939 (minus (mult (neg B) C) A). */
1940 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1941 && GET_CODE (op1
) == MULT
1942 && GET_CODE (op0
) == NEG
)
1946 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
1947 in2
= XEXP (op1
, 1);
1948 return simplify_gen_binary (MINUS
, mode
,
1949 simplify_gen_binary (MULT
, mode
,
1954 /* If one of the operands is a PLUS or a MINUS, see if we can
1955 simplify this by the associative law. This will, for example,
1956 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1957 Don't use the associative law for floating point.
1958 The inaccuracy makes it nonassociative,
1959 and subtle programs can break if operations are associated. */
1961 if (INTEGRAL_MODE_P (mode
)
1962 && (plus_minus_operand_p (op0
)
1963 || plus_minus_operand_p (op1
))
1964 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1969 if (trueop1
== constm1_rtx
)
1970 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1972 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1973 x is NaN, since x * 0 is then also NaN. Nor is it valid
1974 when the mode has signed zeros, since multiplying a negative
1975 number by 0 will give -0, not 0. */
1976 if (!HONOR_NANS (mode
)
1977 && !HONOR_SIGNED_ZEROS (mode
)
1978 && trueop1
== CONST0_RTX (mode
)
1979 && ! side_effects_p (op0
))
1982 /* In IEEE floating point, x*1 is not equivalent to x for
1984 if (!HONOR_SNANS (mode
)
1985 && trueop1
== CONST1_RTX (mode
))
1988 /* Convert multiply by constant power of two into shift unless
1989 we are still generating RTL. This test is a kludge. */
1990 if (GET_CODE (trueop1
) == CONST_INT
1991 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1992 /* If the mode is larger than the host word size, and the
1993 uppermost bit is set, then this isn't a power of two due
1994 to implicit sign extension. */
1995 && (width
<= HOST_BITS_PER_WIDE_INT
1996 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1997 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1999 /* Likewise for multipliers wider than a word. */
2000 if (GET_CODE (trueop1
) == CONST_DOUBLE
2001 && (GET_MODE (trueop1
) == VOIDmode
2002 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
2003 && GET_MODE (op0
) == mode
2004 && CONST_DOUBLE_LOW (trueop1
) == 0
2005 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
2006 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2007 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2009 /* x*2 is x+x and x*(-1) is -x */
2010 if (GET_CODE (trueop1
) == CONST_DOUBLE
2011 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2012 && GET_MODE (op0
) == mode
)
2015 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2017 if (REAL_VALUES_EQUAL (d
, dconst2
))
2018 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2020 if (!HONOR_SNANS (mode
)
2021 && REAL_VALUES_EQUAL (d
, dconstm1
))
2022 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2025 /* Optimize -x * -x as x * x. */
2026 if (FLOAT_MODE_P (mode
)
2027 && GET_CODE (op0
) == NEG
2028 && GET_CODE (op1
) == NEG
2029 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2030 && !side_effects_p (XEXP (op0
, 0)))
2031 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2033 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2034 if (SCALAR_FLOAT_MODE_P (mode
)
2035 && GET_CODE (op0
) == ABS
2036 && GET_CODE (op1
) == ABS
2037 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2038 && !side_effects_p (XEXP (op0
, 0)))
2039 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2041 /* Reassociate multiplication, but for floating point MULTs
2042 only when the user specifies unsafe math optimizations. */
2043 if (! FLOAT_MODE_P (mode
)
2044 || flag_unsafe_math_optimizations
)
2046 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2053 if (trueop1
== const0_rtx
)
2055 if (GET_CODE (trueop1
) == CONST_INT
2056 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2057 == GET_MODE_MASK (mode
)))
2059 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2061 /* A | (~A) -> -1 */
2062 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2063 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2064 && ! side_effects_p (op0
)
2065 && SCALAR_INT_MODE_P (mode
))
2068 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2069 if (GET_CODE (op1
) == CONST_INT
2070 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2071 && (nonzero_bits (op0
, mode
) & ~INTVAL (op1
)) == 0)
2074 /* Canonicalize (X & C1) | C2. */
2075 if (GET_CODE (op0
) == AND
2076 && GET_CODE (trueop1
) == CONST_INT
2077 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
)
2079 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2080 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2081 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2083 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2085 && !side_effects_p (XEXP (op0
, 0)))
2088 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2089 if (((c1
|c2
) & mask
) == mask
)
2090 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2092 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2093 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2095 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2096 gen_int_mode (c1
& ~c2
, mode
));
2097 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2101 /* Convert (A & B) | A to A. */
2102 if (GET_CODE (op0
) == AND
2103 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2104 || rtx_equal_p (XEXP (op0
, 1), op1
))
2105 && ! side_effects_p (XEXP (op0
, 0))
2106 && ! side_effects_p (XEXP (op0
, 1)))
2109 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2110 mode size to (rotate A CX). */
2112 if (GET_CODE (op1
) == ASHIFT
2113 || GET_CODE (op1
) == SUBREG
)
2124 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2125 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2126 && GET_CODE (XEXP (opleft
, 1)) == CONST_INT
2127 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2128 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2129 == GET_MODE_BITSIZE (mode
)))
2130 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2132 /* Same, but for ashift that has been "simplified" to a wider mode
2133 by simplify_shift_const. */
2135 if (GET_CODE (opleft
) == SUBREG
2136 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2137 && GET_CODE (opright
) == LSHIFTRT
2138 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2139 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2140 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2141 && (GET_MODE_SIZE (GET_MODE (opleft
))
2142 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2143 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2144 SUBREG_REG (XEXP (opright
, 0)))
2145 && GET_CODE (XEXP (SUBREG_REG (opleft
), 1)) == CONST_INT
2146 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2147 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2148 == GET_MODE_BITSIZE (mode
)))
2149 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2150 XEXP (SUBREG_REG (opleft
), 1));
2152 /* If we have (ior (and (X C1) C2)), simplify this by making
2153 C1 as small as possible if C1 actually changes. */
2154 if (GET_CODE (op1
) == CONST_INT
2155 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2156 || INTVAL (op1
) > 0)
2157 && GET_CODE (op0
) == AND
2158 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2159 && GET_CODE (op1
) == CONST_INT
2160 && (INTVAL (XEXP (op0
, 1)) & INTVAL (op1
)) != 0)
2161 return simplify_gen_binary (IOR
, mode
,
2163 (AND
, mode
, XEXP (op0
, 0),
2164 GEN_INT (INTVAL (XEXP (op0
, 1))
2168 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2169 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2170 the PLUS does not affect any of the bits in OP1: then we can do
2171 the IOR as a PLUS and we can associate. This is valid if OP1
2172 can be safely shifted left C bits. */
2173 if (GET_CODE (trueop1
) == CONST_INT
&& GET_CODE (op0
) == ASHIFTRT
2174 && GET_CODE (XEXP (op0
, 0)) == PLUS
2175 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == CONST_INT
2176 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2177 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2179 int count
= INTVAL (XEXP (op0
, 1));
2180 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2182 if (mask
>> count
== INTVAL (trueop1
)
2183 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2184 return simplify_gen_binary (ASHIFTRT
, mode
,
2185 plus_constant (XEXP (op0
, 0), mask
),
2189 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2195 if (trueop1
== const0_rtx
)
2197 if (GET_CODE (trueop1
) == CONST_INT
2198 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2199 == GET_MODE_MASK (mode
)))
2200 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2201 if (rtx_equal_p (trueop0
, trueop1
)
2202 && ! side_effects_p (op0
)
2203 && GET_MODE_CLASS (mode
) != MODE_CC
)
2204 return CONST0_RTX (mode
);
2206 /* Canonicalize XOR of the most significant bit to PLUS. */
2207 if ((GET_CODE (op1
) == CONST_INT
2208 || GET_CODE (op1
) == CONST_DOUBLE
)
2209 && mode_signbit_p (mode
, op1
))
2210 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2211 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2212 if ((GET_CODE (op1
) == CONST_INT
2213 || GET_CODE (op1
) == CONST_DOUBLE
)
2214 && GET_CODE (op0
) == PLUS
2215 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
2216 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2217 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2218 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2219 simplify_gen_binary (XOR
, mode
, op1
,
2222 /* If we are XORing two things that have no bits in common,
2223 convert them into an IOR. This helps to detect rotation encoded
2224 using those methods and possibly other simplifications. */
2226 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2227 && (nonzero_bits (op0
, mode
)
2228 & nonzero_bits (op1
, mode
)) == 0)
2229 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2231 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2232 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2235 int num_negated
= 0;
2237 if (GET_CODE (op0
) == NOT
)
2238 num_negated
++, op0
= XEXP (op0
, 0);
2239 if (GET_CODE (op1
) == NOT
)
2240 num_negated
++, op1
= XEXP (op1
, 0);
2242 if (num_negated
== 2)
2243 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2244 else if (num_negated
== 1)
2245 return simplify_gen_unary (NOT
, mode
,
2246 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2250 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2251 correspond to a machine insn or result in further simplifications
2252 if B is a constant. */
2254 if (GET_CODE (op0
) == AND
2255 && rtx_equal_p (XEXP (op0
, 1), op1
)
2256 && ! side_effects_p (op1
))
2257 return simplify_gen_binary (AND
, mode
,
2258 simplify_gen_unary (NOT
, mode
,
2259 XEXP (op0
, 0), mode
),
2262 else if (GET_CODE (op0
) == AND
2263 && rtx_equal_p (XEXP (op0
, 0), op1
)
2264 && ! side_effects_p (op1
))
2265 return simplify_gen_binary (AND
, mode
,
2266 simplify_gen_unary (NOT
, mode
,
2267 XEXP (op0
, 1), mode
),
2270 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2271 comparison if STORE_FLAG_VALUE is 1. */
2272 if (STORE_FLAG_VALUE
== 1
2273 && trueop1
== const1_rtx
2274 && COMPARISON_P (op0
)
2275 && (reversed
= reversed_comparison (op0
, mode
)))
2278 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2279 is (lt foo (const_int 0)), so we can perform the above
2280 simplification if STORE_FLAG_VALUE is 1. */
2282 if (STORE_FLAG_VALUE
== 1
2283 && trueop1
== const1_rtx
2284 && GET_CODE (op0
) == LSHIFTRT
2285 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2286 && INTVAL (XEXP (op0
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
2287 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2289 /* (xor (comparison foo bar) (const_int sign-bit))
2290 when STORE_FLAG_VALUE is the sign bit. */
2291 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2292 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
2293 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
2294 && trueop1
== const_true_rtx
2295 && COMPARISON_P (op0
)
2296 && (reversed
= reversed_comparison (op0
, mode
)))
2299 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2305 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2307 if (GET_CODE (trueop1
) == CONST_INT
2308 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
2310 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2311 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2312 /* If we are turning off bits already known off in OP0, we need
2314 if ((nzop0
& ~val1
) == 0)
2316 /* If we are clearing all the nonzero bits, the result is zero. */
2317 if ((val1
& nzop0
) == 0 && !side_effects_p (op0
))
2318 return CONST0_RTX (mode
);
2320 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2321 && GET_MODE_CLASS (mode
) != MODE_CC
)
2324 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2325 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2326 && ! side_effects_p (op0
)
2327 && GET_MODE_CLASS (mode
) != MODE_CC
)
2328 return CONST0_RTX (mode
);
2330 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2331 there are no nonzero bits of C outside of X's mode. */
2332 if ((GET_CODE (op0
) == SIGN_EXTEND
2333 || GET_CODE (op0
) == ZERO_EXTEND
)
2334 && GET_CODE (trueop1
) == CONST_INT
2335 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2336 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2337 & INTVAL (trueop1
)) == 0)
2339 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2340 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2341 gen_int_mode (INTVAL (trueop1
),
2343 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2346 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2347 if (GET_CODE (op0
) == IOR
2348 && GET_CODE (trueop1
) == CONST_INT
2349 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
)
2351 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2352 return simplify_gen_binary (IOR
, mode
,
2353 simplify_gen_binary (AND
, mode
,
2354 XEXP (op0
, 0), op1
),
2355 gen_int_mode (tmp
, mode
));
2358 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2359 insn (and may simplify more). */
2360 if (GET_CODE (op0
) == XOR
2361 && rtx_equal_p (XEXP (op0
, 0), op1
)
2362 && ! side_effects_p (op1
))
2363 return simplify_gen_binary (AND
, mode
,
2364 simplify_gen_unary (NOT
, mode
,
2365 XEXP (op0
, 1), mode
),
2368 if (GET_CODE (op0
) == XOR
2369 && rtx_equal_p (XEXP (op0
, 1), op1
)
2370 && ! side_effects_p (op1
))
2371 return simplify_gen_binary (AND
, mode
,
2372 simplify_gen_unary (NOT
, mode
,
2373 XEXP (op0
, 0), mode
),
2376 /* Similarly for (~(A ^ B)) & A. */
2377 if (GET_CODE (op0
) == NOT
2378 && GET_CODE (XEXP (op0
, 0)) == XOR
2379 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2380 && ! side_effects_p (op1
))
2381 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2383 if (GET_CODE (op0
) == NOT
2384 && GET_CODE (XEXP (op0
, 0)) == XOR
2385 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2386 && ! side_effects_p (op1
))
2387 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2389 /* Convert (A | B) & A to A. */
2390 if (GET_CODE (op0
) == IOR
2391 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2392 || rtx_equal_p (XEXP (op0
, 1), op1
))
2393 && ! side_effects_p (XEXP (op0
, 0))
2394 && ! side_effects_p (XEXP (op0
, 1)))
2397 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2398 ((A & N) + B) & M -> (A + B) & M
2399 Similarly if (N & M) == 0,
2400 ((A | N) + B) & M -> (A + B) & M
2401 and for - instead of + and/or ^ instead of |.
2402 Also, if (N & M) == 0, then
2403 (A +- N) & M -> A & M. */
2404 if (GET_CODE (trueop1
) == CONST_INT
2405 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2406 && ~INTVAL (trueop1
)
2407 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
2408 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2413 pmop
[0] = XEXP (op0
, 0);
2414 pmop
[1] = XEXP (op0
, 1);
2416 if (GET_CODE (pmop
[1]) == CONST_INT
2417 && (INTVAL (pmop
[1]) & INTVAL (trueop1
)) == 0)
2418 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2420 for (which
= 0; which
< 2; which
++)
2423 switch (GET_CODE (tem
))
2426 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2427 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
2428 == INTVAL (trueop1
))
2429 pmop
[which
] = XEXP (tem
, 0);
2433 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2434 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
2435 pmop
[which
] = XEXP (tem
, 0);
2442 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2444 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2446 return simplify_gen_binary (code
, mode
, tem
, op1
);
2450 /* (and X (ior (not X) Y) -> (and X Y) */
2451 if (GET_CODE (op1
) == IOR
2452 && GET_CODE (XEXP (op1
, 0)) == NOT
2453 && op0
== XEXP (XEXP (op1
, 0), 0))
2454 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2456 /* (and (ior (not X) Y) X) -> (and X Y) */
2457 if (GET_CODE (op0
) == IOR
2458 && GET_CODE (XEXP (op0
, 0)) == NOT
2459 && op1
== XEXP (XEXP (op0
, 0), 0))
2460 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2462 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2468 /* 0/x is 0 (or x&0 if x has side-effects). */
2469 if (trueop0
== CONST0_RTX (mode
))
2471 if (side_effects_p (op1
))
2472 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2476 if (trueop1
== CONST1_RTX (mode
))
2477 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2478 /* Convert divide by power of two into shift. */
2479 if (GET_CODE (trueop1
) == CONST_INT
2480 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
2481 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2485 /* Handle floating point and integers separately. */
2486 if (SCALAR_FLOAT_MODE_P (mode
))
2488 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2489 safe for modes with NaNs, since 0.0 / 0.0 will then be
2490 NaN rather than 0.0. Nor is it safe for modes with signed
2491 zeros, since dividing 0 by a negative number gives -0.0 */
2492 if (trueop0
== CONST0_RTX (mode
)
2493 && !HONOR_NANS (mode
)
2494 && !HONOR_SIGNED_ZEROS (mode
)
2495 && ! side_effects_p (op1
))
2498 if (trueop1
== CONST1_RTX (mode
)
2499 && !HONOR_SNANS (mode
))
2502 if (GET_CODE (trueop1
) == CONST_DOUBLE
2503 && trueop1
!= CONST0_RTX (mode
))
2506 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2509 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2510 && !HONOR_SNANS (mode
))
2511 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2513 /* Change FP division by a constant into multiplication.
2514 Only do this with -freciprocal-math. */
2515 if (flag_reciprocal_math
2516 && !REAL_VALUES_EQUAL (d
, dconst0
))
2518 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2519 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2520 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2526 /* 0/x is 0 (or x&0 if x has side-effects). */
2527 if (trueop0
== CONST0_RTX (mode
))
2529 if (side_effects_p (op1
))
2530 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2534 if (trueop1
== CONST1_RTX (mode
))
2535 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2537 if (trueop1
== constm1_rtx
)
2539 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2540 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2546 /* 0%x is 0 (or x&0 if x has side-effects). */
2547 if (trueop0
== CONST0_RTX (mode
))
2549 if (side_effects_p (op1
))
2550 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2553 /* x%1 is 0 (of x&0 if x has side-effects). */
2554 if (trueop1
== CONST1_RTX (mode
))
2556 if (side_effects_p (op0
))
2557 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2558 return CONST0_RTX (mode
);
2560 /* Implement modulus by power of two as AND. */
2561 if (GET_CODE (trueop1
) == CONST_INT
2562 && exact_log2 (INTVAL (trueop1
)) > 0)
2563 return simplify_gen_binary (AND
, mode
, op0
,
2564 GEN_INT (INTVAL (op1
) - 1));
2568 /* 0%x is 0 (or x&0 if x has side-effects). */
2569 if (trueop0
== CONST0_RTX (mode
))
2571 if (side_effects_p (op1
))
2572 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2575 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2576 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
2578 if (side_effects_p (op0
))
2579 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2580 return CONST0_RTX (mode
);
2587 if (trueop1
== CONST0_RTX (mode
))
2589 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2591 /* Rotating ~0 always results in ~0. */
2592 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
2593 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2594 && ! side_effects_p (op1
))
2597 if (SHIFT_COUNT_TRUNCATED
&& GET_CODE (op1
) == CONST_INT
)
2599 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
2600 if (val
!= INTVAL (op1
))
2601 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
2608 if (trueop1
== CONST0_RTX (mode
))
2610 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2612 goto canonicalize_shift
;
2615 if (trueop1
== CONST0_RTX (mode
))
2617 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2619 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2620 if (GET_CODE (op0
) == CLZ
2621 && GET_CODE (trueop1
) == CONST_INT
2622 && STORE_FLAG_VALUE
== 1
2623 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
2625 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2626 unsigned HOST_WIDE_INT zero_val
= 0;
2628 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
2629 && zero_val
== GET_MODE_BITSIZE (imode
)
2630 && INTVAL (trueop1
) == exact_log2 (zero_val
))
2631 return simplify_gen_relational (EQ
, mode
, imode
,
2632 XEXP (op0
, 0), const0_rtx
);
2634 goto canonicalize_shift
;
2637 if (width
<= HOST_BITS_PER_WIDE_INT
2638 && GET_CODE (trueop1
) == CONST_INT
2639 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2640 && ! side_effects_p (op0
))
2642 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2644 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2650 if (width
<= HOST_BITS_PER_WIDE_INT
2651 && GET_CODE (trueop1
) == CONST_INT
2652 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2653 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2654 && ! side_effects_p (op0
))
2656 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2658 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2664 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2666 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2668 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2674 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2676 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2678 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2691 /* ??? There are simplifications that can be done. */
2695 if (!VECTOR_MODE_P (mode
))
2697 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2698 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2699 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2700 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2701 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
2703 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2704 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2707 /* Extract a scalar element from a nested VEC_SELECT expression
2708 (with optional nested VEC_CONCAT expression). Some targets
2709 (i386) extract scalar element from a vector using chain of
2710 nested VEC_SELECT expressions. When input operand is a memory
2711 operand, this operation can be simplified to a simple scalar
2712 load from an offseted memory address. */
2713 if (GET_CODE (trueop0
) == VEC_SELECT
)
2715 rtx op0
= XEXP (trueop0
, 0);
2716 rtx op1
= XEXP (trueop0
, 1);
2718 enum machine_mode opmode
= GET_MODE (op0
);
2719 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
2720 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
2722 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
2728 gcc_assert (GET_CODE (op1
) == PARALLEL
);
2729 gcc_assert (i
< n_elts
);
2731 /* Select element, pointed by nested selector. */
2732 elem
= INTVAL (XVECEXP (op1
, 0, i
));
2734 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2735 if (GET_CODE (op0
) == VEC_CONCAT
)
2737 rtx op00
= XEXP (op0
, 0);
2738 rtx op01
= XEXP (op0
, 1);
2740 enum machine_mode mode00
, mode01
;
2741 int n_elts00
, n_elts01
;
2743 mode00
= GET_MODE (op00
);
2744 mode01
= GET_MODE (op01
);
2746 /* Find out number of elements of each operand. */
2747 if (VECTOR_MODE_P (mode00
))
2749 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
2750 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
2755 if (VECTOR_MODE_P (mode01
))
2757 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
2758 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
2763 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
2765 /* Select correct operand of VEC_CONCAT
2766 and adjust selector. */
2767 if (elem
< n_elts01
)
2778 vec
= rtvec_alloc (1);
2779 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
2781 tmp
= gen_rtx_fmt_ee (code
, mode
,
2782 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
2788 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2789 gcc_assert (GET_MODE_INNER (mode
)
2790 == GET_MODE_INNER (GET_MODE (trueop0
)));
2791 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2793 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2795 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2796 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2797 rtvec v
= rtvec_alloc (n_elts
);
2800 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2801 for (i
= 0; i
< n_elts
; i
++)
2803 rtx x
= XVECEXP (trueop1
, 0, i
);
2805 gcc_assert (GET_CODE (x
) == CONST_INT
);
2806 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2810 return gen_rtx_CONST_VECTOR (mode
, v
);
2814 if (XVECLEN (trueop1
, 0) == 1
2815 && GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
2816 && GET_CODE (trueop0
) == VEC_CONCAT
)
2819 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
2821 /* Try to find the element in the VEC_CONCAT. */
2822 while (GET_MODE (vec
) != mode
2823 && GET_CODE (vec
) == VEC_CONCAT
)
2825 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
2826 if (offset
< vec_size
)
2827 vec
= XEXP (vec
, 0);
2831 vec
= XEXP (vec
, 1);
2833 vec
= avoid_constant_pool_reference (vec
);
2836 if (GET_MODE (vec
) == mode
)
2843 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2844 ? GET_MODE (trueop0
)
2845 : GET_MODE_INNER (mode
));
2846 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2847 ? GET_MODE (trueop1
)
2848 : GET_MODE_INNER (mode
));
2850 gcc_assert (VECTOR_MODE_P (mode
));
2851 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2852 == GET_MODE_SIZE (mode
));
2854 if (VECTOR_MODE_P (op0_mode
))
2855 gcc_assert (GET_MODE_INNER (mode
)
2856 == GET_MODE_INNER (op0_mode
));
2858 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2860 if (VECTOR_MODE_P (op1_mode
))
2861 gcc_assert (GET_MODE_INNER (mode
)
2862 == GET_MODE_INNER (op1_mode
));
2864 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2866 if ((GET_CODE (trueop0
) == CONST_VECTOR
2867 || GET_CODE (trueop0
) == CONST_INT
2868 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2869 && (GET_CODE (trueop1
) == CONST_VECTOR
2870 || GET_CODE (trueop1
) == CONST_INT
2871 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2873 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2874 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2875 rtvec v
= rtvec_alloc (n_elts
);
2877 unsigned in_n_elts
= 1;
2879 if (VECTOR_MODE_P (op0_mode
))
2880 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2881 for (i
= 0; i
< n_elts
; i
++)
2885 if (!VECTOR_MODE_P (op0_mode
))
2886 RTVEC_ELT (v
, i
) = trueop0
;
2888 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2892 if (!VECTOR_MODE_P (op1_mode
))
2893 RTVEC_ELT (v
, i
) = trueop1
;
2895 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2900 return gen_rtx_CONST_VECTOR (mode
, v
);
2913 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2916 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
2918 unsigned int width
= GET_MODE_BITSIZE (mode
);
2920 if (VECTOR_MODE_P (mode
)
2921 && code
!= VEC_CONCAT
2922 && GET_CODE (op0
) == CONST_VECTOR
2923 && GET_CODE (op1
) == CONST_VECTOR
)
2925 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2926 enum machine_mode op0mode
= GET_MODE (op0
);
2927 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
2928 enum machine_mode op1mode
= GET_MODE (op1
);
2929 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
2930 rtvec v
= rtvec_alloc (n_elts
);
2933 gcc_assert (op0_n_elts
== n_elts
);
2934 gcc_assert (op1_n_elts
== n_elts
);
2935 for (i
= 0; i
< n_elts
; i
++)
2937 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
2938 CONST_VECTOR_ELT (op0
, i
),
2939 CONST_VECTOR_ELT (op1
, i
));
2942 RTVEC_ELT (v
, i
) = x
;
2945 return gen_rtx_CONST_VECTOR (mode
, v
);
2948 if (VECTOR_MODE_P (mode
)
2949 && code
== VEC_CONCAT
2950 && (CONST_INT_P (op0
)
2951 || GET_CODE (op0
) == CONST_DOUBLE
2952 || GET_CODE (op0
) == CONST_FIXED
)
2953 && (CONST_INT_P (op1
)
2954 || GET_CODE (op1
) == CONST_DOUBLE
2955 || GET_CODE (op1
) == CONST_FIXED
))
2957 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2958 rtvec v
= rtvec_alloc (n_elts
);
2960 gcc_assert (n_elts
>= 2);
2963 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
2964 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
2966 RTVEC_ELT (v
, 0) = op0
;
2967 RTVEC_ELT (v
, 1) = op1
;
2971 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
2972 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
2975 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
2976 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
2977 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
2979 for (i
= 0; i
< op0_n_elts
; ++i
)
2980 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
2981 for (i
= 0; i
< op1_n_elts
; ++i
)
2982 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
2985 return gen_rtx_CONST_VECTOR (mode
, v
);
2988 if (SCALAR_FLOAT_MODE_P (mode
)
2989 && GET_CODE (op0
) == CONST_DOUBLE
2990 && GET_CODE (op1
) == CONST_DOUBLE
2991 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3002 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3004 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3006 for (i
= 0; i
< 4; i
++)
3023 real_from_target (&r
, tmp0
, mode
);
3024 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3028 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3031 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3032 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3033 real_convert (&f0
, mode
, &f0
);
3034 real_convert (&f1
, mode
, &f1
);
3036 if (HONOR_SNANS (mode
)
3037 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3041 && REAL_VALUES_EQUAL (f1
, dconst0
)
3042 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3045 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3046 && flag_trapping_math
3047 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3049 int s0
= REAL_VALUE_NEGATIVE (f0
);
3050 int s1
= REAL_VALUE_NEGATIVE (f1
);
3055 /* Inf + -Inf = NaN plus exception. */
3060 /* Inf - Inf = NaN plus exception. */
3065 /* Inf / Inf = NaN plus exception. */
3072 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3073 && flag_trapping_math
3074 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3075 || (REAL_VALUE_ISINF (f1
)
3076 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3077 /* Inf * 0 = NaN plus exception. */
3080 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3082 real_convert (&result
, mode
, &value
);
3084 /* Don't constant fold this floating point operation if
3085 the result has overflowed and flag_trapping_math. */
3087 if (flag_trapping_math
3088 && MODE_HAS_INFINITIES (mode
)
3089 && REAL_VALUE_ISINF (result
)
3090 && !REAL_VALUE_ISINF (f0
)
3091 && !REAL_VALUE_ISINF (f1
))
3092 /* Overflow plus exception. */
3095 /* Don't constant fold this floating point operation if the
3096 result may dependent upon the run-time rounding mode and
3097 flag_rounding_math is set, or if GCC's software emulation
3098 is unable to accurately represent the result. */
3100 if ((flag_rounding_math
3101 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3102 && (inexact
|| !real_identical (&result
, &value
)))
3105 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3109 /* We can fold some multi-word operations. */
3110 if (GET_MODE_CLASS (mode
) == MODE_INT
3111 && width
== HOST_BITS_PER_WIDE_INT
* 2
3112 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
3113 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
3115 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
3116 HOST_WIDE_INT h1
, h2
, hv
, ht
;
3118 if (GET_CODE (op0
) == CONST_DOUBLE
)
3119 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
3121 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
3123 if (GET_CODE (op1
) == CONST_DOUBLE
)
3124 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
3126 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
3131 /* A - B == A + (-B). */
3132 neg_double (l2
, h2
, &lv
, &hv
);
3135 /* Fall through.... */
3138 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3142 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3146 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3147 &lv
, &hv
, <
, &ht
))
3152 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3153 <
, &ht
, &lv
, &hv
))
3158 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3159 &lv
, &hv
, <
, &ht
))
3164 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3165 <
, &ht
, &lv
, &hv
))
3170 lv
= l1
& l2
, hv
= h1
& h2
;
3174 lv
= l1
| l2
, hv
= h1
| h2
;
3178 lv
= l1
^ l2
, hv
= h1
^ h2
;
3184 && ((unsigned HOST_WIDE_INT
) l1
3185 < (unsigned HOST_WIDE_INT
) l2
)))
3194 && ((unsigned HOST_WIDE_INT
) l1
3195 > (unsigned HOST_WIDE_INT
) l2
)))
3202 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
3204 && ((unsigned HOST_WIDE_INT
) l1
3205 < (unsigned HOST_WIDE_INT
) l2
)))
3212 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
3214 && ((unsigned HOST_WIDE_INT
) l1
3215 > (unsigned HOST_WIDE_INT
) l2
)))
3221 case LSHIFTRT
: case ASHIFTRT
:
3223 case ROTATE
: case ROTATERT
:
3224 if (SHIFT_COUNT_TRUNCATED
)
3225 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
3227 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
3230 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3231 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
3233 else if (code
== ASHIFT
)
3234 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
3235 else if (code
== ROTATE
)
3236 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3237 else /* code == ROTATERT */
3238 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3245 return immed_double_const (lv
, hv
, mode
);
3248 if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
3249 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3251 /* Get the integer argument values in two forms:
3252 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3254 arg0
= INTVAL (op0
);
3255 arg1
= INTVAL (op1
);
3257 if (width
< HOST_BITS_PER_WIDE_INT
)
3259 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3260 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3263 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3264 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3267 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3268 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3276 /* Compute the value of the arithmetic. */
3281 val
= arg0s
+ arg1s
;
3285 val
= arg0s
- arg1s
;
3289 val
= arg0s
* arg1s
;
3294 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3297 val
= arg0s
/ arg1s
;
3302 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3305 val
= arg0s
% arg1s
;
3310 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3313 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3318 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3321 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3339 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3340 the value is in range. We can't return any old value for
3341 out-of-range arguments because either the middle-end (via
3342 shift_truncation_mask) or the back-end might be relying on
3343 target-specific knowledge. Nor can we rely on
3344 shift_truncation_mask, since the shift might not be part of an
3345 ashlM3, lshrM3 or ashrM3 instruction. */
3346 if (SHIFT_COUNT_TRUNCATED
)
3347 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3348 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3351 val
= (code
== ASHIFT
3352 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3353 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3355 /* Sign-extend the result for arithmetic right shifts. */
3356 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3357 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
3365 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3366 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3374 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3375 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3379 /* Do nothing here. */
3383 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3387 val
= ((unsigned HOST_WIDE_INT
) arg0
3388 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3392 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3396 val
= ((unsigned HOST_WIDE_INT
) arg0
3397 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3410 /* ??? There are simplifications that can be done. */
3417 return gen_int_mode (val
, mode
);
3425 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3428 Rather than test for specific case, we do this by a brute-force method
3429 and do all possible simplifications until no more changes occur. Then
3430 we rebuild the operation. */
3432 struct simplify_plus_minus_op_data
3439 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3443 result
= (commutative_operand_precedence (y
)
3444 - commutative_operand_precedence (x
));
3448 /* Group together equal REGs to do more simplification. */
3449 if (REG_P (x
) && REG_P (y
))
3450 return REGNO (x
) > REGNO (y
);
3456 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3459 struct simplify_plus_minus_op_data ops
[8];
3461 int n_ops
= 2, input_ops
= 2;
3462 int changed
, n_constants
= 0, canonicalized
= 0;
3465 memset (ops
, 0, sizeof ops
);
3467 /* Set up the two operands and then expand them until nothing has been
3468 changed. If we run out of room in our array, give up; this should
3469 almost never happen. */
3474 ops
[1].neg
= (code
== MINUS
);
3480 for (i
= 0; i
< n_ops
; i
++)
3482 rtx this_op
= ops
[i
].op
;
3483 int this_neg
= ops
[i
].neg
;
3484 enum rtx_code this_code
= GET_CODE (this_op
);
3493 ops
[n_ops
].op
= XEXP (this_op
, 1);
3494 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3497 ops
[i
].op
= XEXP (this_op
, 0);
3500 canonicalized
|= this_neg
;
3504 ops
[i
].op
= XEXP (this_op
, 0);
3505 ops
[i
].neg
= ! this_neg
;
3512 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3513 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3514 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3516 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3517 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3518 ops
[n_ops
].neg
= this_neg
;
3526 /* ~a -> (-a - 1) */
3529 ops
[n_ops
].op
= constm1_rtx
;
3530 ops
[n_ops
++].neg
= this_neg
;
3531 ops
[i
].op
= XEXP (this_op
, 0);
3532 ops
[i
].neg
= !this_neg
;
3542 ops
[i
].op
= neg_const_int (mode
, this_op
);
3556 if (n_constants
> 1)
3559 gcc_assert (n_ops
>= 2);
3561 /* If we only have two operands, we can avoid the loops. */
3564 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3567 /* Get the two operands. Be careful with the order, especially for
3568 the cases where code == MINUS. */
3569 if (ops
[0].neg
&& ops
[1].neg
)
3571 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3574 else if (ops
[0].neg
)
3585 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
3588 /* Now simplify each pair of operands until nothing changes. */
3591 /* Insertion sort is good enough for an eight-element array. */
3592 for (i
= 1; i
< n_ops
; i
++)
3594 struct simplify_plus_minus_op_data save
;
3596 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
3602 ops
[j
+ 1] = ops
[j
];
3603 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
3608 for (i
= n_ops
- 1; i
> 0; i
--)
3609 for (j
= i
- 1; j
>= 0; j
--)
3611 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
3612 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
3614 if (lhs
!= 0 && rhs
!= 0)
3616 enum rtx_code ncode
= PLUS
;
3622 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3624 else if (swap_commutative_operands_p (lhs
, rhs
))
3625 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3627 if ((GET_CODE (lhs
) == CONST
|| GET_CODE (lhs
) == CONST_INT
)
3628 && (GET_CODE (rhs
) == CONST
|| GET_CODE (rhs
) == CONST_INT
))
3630 rtx tem_lhs
, tem_rhs
;
3632 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
3633 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
3634 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
3636 if (tem
&& !CONSTANT_P (tem
))
3637 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
3640 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
3642 /* Reject "simplifications" that just wrap the two
3643 arguments in a CONST. Failure to do so can result
3644 in infinite recursion with simplify_binary_operation
3645 when it calls us to simplify CONST operations. */
3647 && ! (GET_CODE (tem
) == CONST
3648 && GET_CODE (XEXP (tem
, 0)) == ncode
3649 && XEXP (XEXP (tem
, 0), 0) == lhs
3650 && XEXP (XEXP (tem
, 0), 1) == rhs
))
3653 if (GET_CODE (tem
) == NEG
)
3654 tem
= XEXP (tem
, 0), lneg
= !lneg
;
3655 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
3656 tem
= neg_const_int (mode
, tem
), lneg
= 0;
3660 ops
[j
].op
= NULL_RTX
;
3667 /* If nothing changed, fail. */
3671 /* Pack all the operands to the lower-numbered entries. */
3672 for (i
= 0, j
= 0; j
< n_ops
; j
++)
3682 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3684 && GET_CODE (ops
[1].op
) == CONST_INT
3685 && CONSTANT_P (ops
[0].op
)
3687 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
3689 /* We suppressed creation of trivial CONST expressions in the
3690 combination loop to avoid recursion. Create one manually now.
3691 The combination loop should have ensured that there is exactly
3692 one CONST_INT, and the sort will have ensured that it is last
3693 in the array and that any other constant will be next-to-last. */
3696 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
3697 && CONSTANT_P (ops
[n_ops
- 2].op
))
3699 rtx value
= ops
[n_ops
- 1].op
;
3700 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
3701 value
= neg_const_int (mode
, value
);
3702 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
3706 /* Put a non-negated operand first, if possible. */
3708 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
3711 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
3720 /* Now make the result by performing the requested operations. */
3722 for (i
= 1; i
< n_ops
; i
++)
3723 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
3724 mode
, result
, ops
[i
].op
);
3729 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3731 plus_minus_operand_p (const_rtx x
)
3733 return GET_CODE (x
) == PLUS
3734 || GET_CODE (x
) == MINUS
3735 || (GET_CODE (x
) == CONST
3736 && GET_CODE (XEXP (x
, 0)) == PLUS
3737 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
3738 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
3741 /* Like simplify_binary_operation except used for relational operators.
3742 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3743 not also be VOIDmode.
3745 CMP_MODE specifies in which mode the comparison is done in, so it is
3746 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3747 the operands or, if both are VOIDmode, the operands are compared in
3748 "infinite precision". */
3750 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
3751 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3753 rtx tem
, trueop0
, trueop1
;
3755 if (cmp_mode
== VOIDmode
)
3756 cmp_mode
= GET_MODE (op0
);
3757 if (cmp_mode
== VOIDmode
)
3758 cmp_mode
= GET_MODE (op1
);
3760 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
3763 if (SCALAR_FLOAT_MODE_P (mode
))
3765 if (tem
== const0_rtx
)
3766 return CONST0_RTX (mode
);
3767 #ifdef FLOAT_STORE_FLAG_VALUE
3769 REAL_VALUE_TYPE val
;
3770 val
= FLOAT_STORE_FLAG_VALUE (mode
);
3771 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
3777 if (VECTOR_MODE_P (mode
))
3779 if (tem
== const0_rtx
)
3780 return CONST0_RTX (mode
);
3781 #ifdef VECTOR_STORE_FLAG_VALUE
3786 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
3787 if (val
== NULL_RTX
)
3789 if (val
== const1_rtx
)
3790 return CONST1_RTX (mode
);
3792 units
= GET_MODE_NUNITS (mode
);
3793 v
= rtvec_alloc (units
);
3794 for (i
= 0; i
< units
; i
++)
3795 RTVEC_ELT (v
, i
) = val
;
3796 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
3806 /* For the following tests, ensure const0_rtx is op1. */
3807 if (swap_commutative_operands_p (op0
, op1
)
3808 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
3809 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
3811 /* If op0 is a compare, extract the comparison arguments from it. */
3812 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3813 return simplify_relational_operation (code
, mode
, VOIDmode
,
3814 XEXP (op0
, 0), XEXP (op0
, 1));
3816 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
3820 trueop0
= avoid_constant_pool_reference (op0
);
3821 trueop1
= avoid_constant_pool_reference (op1
);
3822 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
3826 /* This part of simplify_relational_operation is only used when CMP_MODE
3827 is not in class MODE_CC (i.e. it is a real comparison).
3829 MODE is the mode of the result, while CMP_MODE specifies in which
3830 mode the comparison is done in, so it is the mode of the operands. */
3833 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
3834 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3836 enum rtx_code op0code
= GET_CODE (op0
);
3838 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
3840 /* If op0 is a comparison, extract the comparison arguments
3844 if (GET_MODE (op0
) == mode
)
3845 return simplify_rtx (op0
);
3847 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
3848 XEXP (op0
, 0), XEXP (op0
, 1));
3850 else if (code
== EQ
)
3852 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
3853 if (new_code
!= UNKNOWN
)
3854 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
3855 XEXP (op0
, 0), XEXP (op0
, 1));
3859 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
3860 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
3861 if ((code
== LTU
|| code
== GEU
)
3862 && GET_CODE (op0
) == PLUS
3863 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
3864 && (rtx_equal_p (op1
, XEXP (op0
, 0))
3865 || rtx_equal_p (op1
, XEXP (op0
, 1))))
3868 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
3869 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
3870 cmp_mode
, XEXP (op0
, 0), new_cmp
);
3873 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
3874 if ((code
== LTU
|| code
== GEU
)
3875 && GET_CODE (op0
) == PLUS
3876 && rtx_equal_p (op1
, XEXP (op0
, 1))
3877 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
3878 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
3879 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
, XEXP (op0
, 0));
3881 if (op1
== const0_rtx
)
3883 /* Canonicalize (GTU x 0) as (NE x 0). */
3885 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
3886 /* Canonicalize (LEU x 0) as (EQ x 0). */
3888 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
3890 else if (op1
== const1_rtx
)
3895 /* Canonicalize (GE x 1) as (GT x 0). */
3896 return simplify_gen_relational (GT
, mode
, cmp_mode
,
3899 /* Canonicalize (GEU x 1) as (NE x 0). */
3900 return simplify_gen_relational (NE
, mode
, cmp_mode
,
3903 /* Canonicalize (LT x 1) as (LE x 0). */
3904 return simplify_gen_relational (LE
, mode
, cmp_mode
,
3907 /* Canonicalize (LTU x 1) as (EQ x 0). */
3908 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
3914 else if (op1
== constm1_rtx
)
3916 /* Canonicalize (LE x -1) as (LT x 0). */
3918 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
3919 /* Canonicalize (GT x -1) as (GE x 0). */
3921 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
3924 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3925 if ((code
== EQ
|| code
== NE
)
3926 && (op0code
== PLUS
|| op0code
== MINUS
)
3928 && CONSTANT_P (XEXP (op0
, 1))
3929 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
3931 rtx x
= XEXP (op0
, 0);
3932 rtx c
= XEXP (op0
, 1);
3934 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
3936 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
3939 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3940 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3942 && op1
== const0_rtx
3943 && GET_MODE_CLASS (mode
) == MODE_INT
3944 && cmp_mode
!= VOIDmode
3945 /* ??? Work-around BImode bugs in the ia64 backend. */
3947 && cmp_mode
!= BImode
3948 && nonzero_bits (op0
, cmp_mode
) == 1
3949 && STORE_FLAG_VALUE
== 1)
3950 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
3951 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
3952 : lowpart_subreg (mode
, op0
, cmp_mode
);
3954 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3955 if ((code
== EQ
|| code
== NE
)
3956 && op1
== const0_rtx
3958 return simplify_gen_relational (code
, mode
, cmp_mode
,
3959 XEXP (op0
, 0), XEXP (op0
, 1));
3961 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3962 if ((code
== EQ
|| code
== NE
)
3964 && rtx_equal_p (XEXP (op0
, 0), op1
)
3965 && !side_effects_p (XEXP (op0
, 0)))
3966 return simplify_gen_relational (code
, mode
, cmp_mode
,
3967 XEXP (op0
, 1), const0_rtx
);
3969 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3970 if ((code
== EQ
|| code
== NE
)
3972 && rtx_equal_p (XEXP (op0
, 1), op1
)
3973 && !side_effects_p (XEXP (op0
, 1)))
3974 return simplify_gen_relational (code
, mode
, cmp_mode
,
3975 XEXP (op0
, 0), const0_rtx
);
3977 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3978 if ((code
== EQ
|| code
== NE
)
3980 && (GET_CODE (op1
) == CONST_INT
3981 || GET_CODE (op1
) == CONST_DOUBLE
)
3982 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
3983 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
3984 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
3985 simplify_gen_binary (XOR
, cmp_mode
,
3986 XEXP (op0
, 1), op1
));
3988 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
3994 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3995 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
3996 XEXP (op0
, 0), const0_rtx
);
4001 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4002 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4003 XEXP (op0
, 0), const0_rtx
);
4022 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4023 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4024 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4025 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4026 For floating-point comparisons, assume that the operands were ordered. */
4029 comparison_result (enum rtx_code code
, int known_results
)
4035 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4038 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4042 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4045 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4049 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4052 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4055 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4057 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4060 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4062 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4065 return const_true_rtx
;
4073 /* Check if the given comparison (done in the given MODE) is actually a
4074 tautology or a contradiction.
4075 If no simplification is possible, this function returns zero.
4076 Otherwise, it returns either const_true_rtx or const0_rtx. */
4079 simplify_const_relational_operation (enum rtx_code code
,
4080 enum machine_mode mode
,
4087 gcc_assert (mode
!= VOIDmode
4088 || (GET_MODE (op0
) == VOIDmode
4089 && GET_MODE (op1
) == VOIDmode
));
4091 /* If op0 is a compare, extract the comparison arguments from it. */
4092 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4094 op1
= XEXP (op0
, 1);
4095 op0
= XEXP (op0
, 0);
4097 if (GET_MODE (op0
) != VOIDmode
)
4098 mode
= GET_MODE (op0
);
4099 else if (GET_MODE (op1
) != VOIDmode
)
4100 mode
= GET_MODE (op1
);
4105 /* We can't simplify MODE_CC values since we don't know what the
4106 actual comparison is. */
4107 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4110 /* Make sure the constant is second. */
4111 if (swap_commutative_operands_p (op0
, op1
))
4113 tem
= op0
, op0
= op1
, op1
= tem
;
4114 code
= swap_condition (code
);
4117 trueop0
= avoid_constant_pool_reference (op0
);
4118 trueop1
= avoid_constant_pool_reference (op1
);
4120 /* For integer comparisons of A and B maybe we can simplify A - B and can
4121 then simplify a comparison of that with zero. If A and B are both either
4122 a register or a CONST_INT, this can't help; testing for these cases will
4123 prevent infinite recursion here and speed things up.
4125 We can only do this for EQ and NE comparisons as otherwise we may
4126 lose or introduce overflow which we cannot disregard as undefined as
4127 we do not know the signedness of the operation on either the left or
4128 the right hand side of the comparison. */
4130 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4131 && (code
== EQ
|| code
== NE
)
4132 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
4133 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
4134 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4135 /* We cannot do this if tem is a nonzero address. */
4136 && ! nonzero_address_p (tem
))
4137 return simplify_const_relational_operation (signed_condition (code
),
4138 mode
, tem
, const0_rtx
);
4140 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4141 return const_true_rtx
;
4143 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4146 /* For modes without NaNs, if the two operands are equal, we know the
4147 result except if they have side-effects. Even with NaNs we know
4148 the result of unordered comparisons and, if signaling NaNs are
4149 irrelevant, also the result of LT/GT/LTGT. */
4150 if ((! HONOR_NANS (GET_MODE (trueop0
))
4151 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4152 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4153 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4154 && rtx_equal_p (trueop0
, trueop1
)
4155 && ! side_effects_p (trueop0
))
4156 return comparison_result (code
, CMP_EQ
);
4158 /* If the operands are floating-point constants, see if we can fold
4160 if (GET_CODE (trueop0
) == CONST_DOUBLE
4161 && GET_CODE (trueop1
) == CONST_DOUBLE
4162 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4164 REAL_VALUE_TYPE d0
, d1
;
4166 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4167 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4169 /* Comparisons are unordered iff at least one of the values is NaN. */
4170 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4180 return const_true_rtx
;
4193 return comparison_result (code
,
4194 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4195 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4198 /* Otherwise, see if the operands are both integers. */
4199 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4200 && (GET_CODE (trueop0
) == CONST_DOUBLE
4201 || GET_CODE (trueop0
) == CONST_INT
)
4202 && (GET_CODE (trueop1
) == CONST_DOUBLE
4203 || GET_CODE (trueop1
) == CONST_INT
))
4205 int width
= GET_MODE_BITSIZE (mode
);
4206 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4207 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4209 /* Get the two words comprising each integer constant. */
4210 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
4212 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4213 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4217 l0u
= l0s
= INTVAL (trueop0
);
4218 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4221 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
4223 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4224 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4228 l1u
= l1s
= INTVAL (trueop1
);
4229 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4232 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4233 we have to sign or zero-extend the values. */
4234 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4236 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4237 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4239 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4240 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
4242 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4243 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
4245 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4246 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4248 if (h0u
== h1u
&& l0u
== l1u
)
4249 return comparison_result (code
, CMP_EQ
);
4253 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4254 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4255 return comparison_result (code
, cr
);
4259 /* Optimize comparisons with upper and lower bounds. */
4260 if (SCALAR_INT_MODE_P (mode
)
4261 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
4262 && GET_CODE (trueop1
) == CONST_INT
)
4265 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4266 HOST_WIDE_INT val
= INTVAL (trueop1
);
4267 HOST_WIDE_INT mmin
, mmax
;
4277 /* Get a reduced range if the sign bit is zero. */
4278 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4285 rtx mmin_rtx
, mmax_rtx
;
4286 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4288 mmin
= INTVAL (mmin_rtx
);
4289 mmax
= INTVAL (mmax_rtx
);
4292 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4294 mmin
>>= (sign_copies
- 1);
4295 mmax
>>= (sign_copies
- 1);
4301 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4303 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4304 return const_true_rtx
;
4305 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4310 return const_true_rtx
;
4315 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4317 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4318 return const_true_rtx
;
4319 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4324 return const_true_rtx
;
4330 /* x == y is always false for y out of range. */
4331 if (val
< mmin
|| val
> mmax
)
4335 /* x > y is always false for y >= mmax, always true for y < mmin. */
4337 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4339 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4340 return const_true_rtx
;
4346 return const_true_rtx
;
4349 /* x < y is always false for y <= mmin, always true for y > mmax. */
4351 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4353 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4354 return const_true_rtx
;
4360 return const_true_rtx
;
4364 /* x != y is always true for y out of range. */
4365 if (val
< mmin
|| val
> mmax
)
4366 return const_true_rtx
;
4374 /* Optimize integer comparisons with zero. */
4375 if (trueop1
== const0_rtx
)
4377 /* Some addresses are known to be nonzero. We don't know
4378 their sign, but equality comparisons are known. */
4379 if (nonzero_address_p (trueop0
))
4381 if (code
== EQ
|| code
== LEU
)
4383 if (code
== NE
|| code
== GTU
)
4384 return const_true_rtx
;
4387 /* See if the first operand is an IOR with a constant. If so, we
4388 may be able to determine the result of this comparison. */
4389 if (GET_CODE (op0
) == IOR
)
4391 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4392 if (GET_CODE (inner_const
) == CONST_INT
&& inner_const
!= const0_rtx
)
4394 int sign_bitnum
= GET_MODE_BITSIZE (mode
) - 1;
4395 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4396 && (INTVAL (inner_const
)
4397 & ((HOST_WIDE_INT
) 1 << sign_bitnum
)));
4406 return const_true_rtx
;
4410 return const_true_rtx
;
4424 /* Optimize comparison of ABS with zero. */
4425 if (trueop1
== CONST0_RTX (mode
)
4426 && (GET_CODE (trueop0
) == ABS
4427 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4428 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4433 /* Optimize abs(x) < 0.0. */
4434 if (!HONOR_SNANS (mode
)
4435 && (!INTEGRAL_MODE_P (mode
)
4436 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4438 if (INTEGRAL_MODE_P (mode
)
4439 && (issue_strict_overflow_warning
4440 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4441 warning (OPT_Wstrict_overflow
,
4442 ("assuming signed overflow does not occur when "
4443 "assuming abs (x) < 0 is false"));
4449 /* Optimize abs(x) >= 0.0. */
4450 if (!HONOR_NANS (mode
)
4451 && (!INTEGRAL_MODE_P (mode
)
4452 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4454 if (INTEGRAL_MODE_P (mode
)
4455 && (issue_strict_overflow_warning
4456 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4457 warning (OPT_Wstrict_overflow
,
4458 ("assuming signed overflow does not occur when "
4459 "assuming abs (x) >= 0 is true"));
4460 return const_true_rtx
;
4465 /* Optimize ! (abs(x) < 0.0). */
4466 return const_true_rtx
;
4476 /* Simplify CODE, an operation with result mode MODE and three operands,
4477 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4478 a constant. Return 0 if no simplifications is possible. */
4481 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4482 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4485 unsigned int width
= GET_MODE_BITSIZE (mode
);
4487 /* VOIDmode means "infinite" precision. */
4489 width
= HOST_BITS_PER_WIDE_INT
;
4495 if (GET_CODE (op0
) == CONST_INT
4496 && GET_CODE (op1
) == CONST_INT
4497 && GET_CODE (op2
) == CONST_INT
4498 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4499 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4501 /* Extracting a bit-field from a constant */
4502 HOST_WIDE_INT val
= INTVAL (op0
);
4504 if (BITS_BIG_ENDIAN
)
4505 val
>>= (GET_MODE_BITSIZE (op0_mode
)
4506 - INTVAL (op2
) - INTVAL (op1
));
4508 val
>>= INTVAL (op2
);
4510 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
4512 /* First zero-extend. */
4513 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
4514 /* If desired, propagate sign bit. */
4515 if (code
== SIGN_EXTRACT
4516 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
4517 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
4520 /* Clear the bits that don't belong in our mode,
4521 unless they and our sign bit are all one.
4522 So we get either a reasonable negative value or a reasonable
4523 unsigned value for this mode. */
4524 if (width
< HOST_BITS_PER_WIDE_INT
4525 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
4526 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
4527 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4529 return gen_int_mode (val
, mode
);
4534 if (GET_CODE (op0
) == CONST_INT
)
4535 return op0
!= const0_rtx
? op1
: op2
;
4537 /* Convert c ? a : a into "a". */
4538 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4541 /* Convert a != b ? a : b into "a". */
4542 if (GET_CODE (op0
) == NE
4543 && ! side_effects_p (op0
)
4544 && ! HONOR_NANS (mode
)
4545 && ! HONOR_SIGNED_ZEROS (mode
)
4546 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4547 && rtx_equal_p (XEXP (op0
, 1), op2
))
4548 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4549 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4552 /* Convert a == b ? a : b into "b". */
4553 if (GET_CODE (op0
) == EQ
4554 && ! side_effects_p (op0
)
4555 && ! HONOR_NANS (mode
)
4556 && ! HONOR_SIGNED_ZEROS (mode
)
4557 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4558 && rtx_equal_p (XEXP (op0
, 1), op2
))
4559 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4560 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4563 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
4565 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
4566 ? GET_MODE (XEXP (op0
, 1))
4567 : GET_MODE (XEXP (op0
, 0)));
4570 /* Look for happy constants in op1 and op2. */
4571 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
4573 HOST_WIDE_INT t
= INTVAL (op1
);
4574 HOST_WIDE_INT f
= INTVAL (op2
);
4576 if (t
== STORE_FLAG_VALUE
&& f
== 0)
4577 code
= GET_CODE (op0
);
4578 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
4581 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
4589 return simplify_gen_relational (code
, mode
, cmp_mode
,
4590 XEXP (op0
, 0), XEXP (op0
, 1));
4593 if (cmp_mode
== VOIDmode
)
4594 cmp_mode
= op0_mode
;
4595 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
4596 cmp_mode
, XEXP (op0
, 0),
4599 /* See if any simplifications were possible. */
4602 if (GET_CODE (temp
) == CONST_INT
)
4603 return temp
== const0_rtx
? op2
: op1
;
4605 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
4611 gcc_assert (GET_MODE (op0
) == mode
);
4612 gcc_assert (GET_MODE (op1
) == mode
);
4613 gcc_assert (VECTOR_MODE_P (mode
));
4614 op2
= avoid_constant_pool_reference (op2
);
4615 if (GET_CODE (op2
) == CONST_INT
)
4617 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
4618 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
4619 int mask
= (1 << n_elts
) - 1;
4621 if (!(INTVAL (op2
) & mask
))
4623 if ((INTVAL (op2
) & mask
) == mask
)
4626 op0
= avoid_constant_pool_reference (op0
);
4627 op1
= avoid_constant_pool_reference (op1
);
4628 if (GET_CODE (op0
) == CONST_VECTOR
4629 && GET_CODE (op1
) == CONST_VECTOR
)
4631 rtvec v
= rtvec_alloc (n_elts
);
4634 for (i
= 0; i
< n_elts
; i
++)
4635 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
4636 ? CONST_VECTOR_ELT (op0
, i
)
4637 : CONST_VECTOR_ELT (op1
, i
));
4638 return gen_rtx_CONST_VECTOR (mode
, v
);
4650 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4652 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4654 Works by unpacking OP into a collection of 8-bit values
4655 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4656 and then repacking them again for OUTERMODE. */
4659 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
4660 enum machine_mode innermode
, unsigned int byte
)
4662 /* We support up to 512-bit values (for V8DFmode). */
4666 value_mask
= (1 << value_bit
) - 1
4668 unsigned char value
[max_bitsize
/ value_bit
];
4677 rtvec result_v
= NULL
;
4678 enum mode_class outer_class
;
4679 enum machine_mode outer_submode
;
4681 /* Some ports misuse CCmode. */
4682 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
4685 /* We have no way to represent a complex constant at the rtl level. */
4686 if (COMPLEX_MODE_P (outermode
))
4689 /* Unpack the value. */
4691 if (GET_CODE (op
) == CONST_VECTOR
)
4693 num_elem
= CONST_VECTOR_NUNITS (op
);
4694 elems
= &CONST_VECTOR_ELT (op
, 0);
4695 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
4701 elem_bitsize
= max_bitsize
;
4703 /* If this asserts, it is too complicated; reducing value_bit may help. */
4704 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
4705 /* I don't know how to handle endianness of sub-units. */
4706 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
4708 for (elem
= 0; elem
< num_elem
; elem
++)
4711 rtx el
= elems
[elem
];
4713 /* Vectors are kept in target memory order. (This is probably
4716 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4717 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4719 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4720 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4721 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4722 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4723 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4726 switch (GET_CODE (el
))
4730 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4732 *vp
++ = INTVAL (el
) >> i
;
4733 /* CONST_INTs are always logically sign-extended. */
4734 for (; i
< elem_bitsize
; i
+= value_bit
)
4735 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
4739 if (GET_MODE (el
) == VOIDmode
)
4741 /* If this triggers, someone should have generated a
4742 CONST_INT instead. */
4743 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
4745 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4746 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
4747 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
4750 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
4753 /* It shouldn't matter what's done here, so fill it with
4755 for (; i
< elem_bitsize
; i
+= value_bit
)
4760 long tmp
[max_bitsize
/ 32];
4761 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
4763 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
4764 gcc_assert (bitsize
<= elem_bitsize
);
4765 gcc_assert (bitsize
% value_bit
== 0);
4767 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
4770 /* real_to_target produces its result in words affected by
4771 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4772 and use WORDS_BIG_ENDIAN instead; see the documentation
4773 of SUBREG in rtl.texi. */
4774 for (i
= 0; i
< bitsize
; i
+= value_bit
)
4777 if (WORDS_BIG_ENDIAN
)
4778 ibase
= bitsize
- 1 - i
;
4781 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
4784 /* It shouldn't matter what's done here, so fill it with
4786 for (; i
< elem_bitsize
; i
+= value_bit
)
4792 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4794 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4795 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
4799 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4800 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
4801 for (; i
< 2 * HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4803 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
4804 >> (i
- HOST_BITS_PER_WIDE_INT
);
4805 for (; i
< elem_bitsize
; i
+= value_bit
)
4815 /* Now, pick the right byte to start with. */
4816 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4817 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4818 will already have offset 0. */
4819 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
4821 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
4823 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4824 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4825 byte
= (subword_byte
% UNITS_PER_WORD
4826 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4829 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4830 so if it's become negative it will instead be very large.) */
4831 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4833 /* Convert from bytes to chunks of size value_bit. */
4834 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
4836 /* Re-pack the value. */
4838 if (VECTOR_MODE_P (outermode
))
4840 num_elem
= GET_MODE_NUNITS (outermode
);
4841 result_v
= rtvec_alloc (num_elem
);
4842 elems
= &RTVEC_ELT (result_v
, 0);
4843 outer_submode
= GET_MODE_INNER (outermode
);
4849 outer_submode
= outermode
;
4852 outer_class
= GET_MODE_CLASS (outer_submode
);
4853 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
4855 gcc_assert (elem_bitsize
% value_bit
== 0);
4856 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
4858 for (elem
= 0; elem
< num_elem
; elem
++)
4862 /* Vectors are stored in target memory order. (This is probably
4865 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4866 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4868 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4869 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4870 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4871 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4872 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4875 switch (outer_class
)
4878 case MODE_PARTIAL_INT
:
4880 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
4883 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4885 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
4886 for (; i
< elem_bitsize
; i
+= value_bit
)
4887 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
4888 << (i
- HOST_BITS_PER_WIDE_INT
));
4890 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4892 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4893 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
4894 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
4895 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
4902 case MODE_DECIMAL_FLOAT
:
4905 long tmp
[max_bitsize
/ 32];
4907 /* real_from_target wants its input in words affected by
4908 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4909 and use WORDS_BIG_ENDIAN instead; see the documentation
4910 of SUBREG in rtl.texi. */
4911 for (i
= 0; i
< max_bitsize
/ 32; i
++)
4913 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4916 if (WORDS_BIG_ENDIAN
)
4917 ibase
= elem_bitsize
- 1 - i
;
4920 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
4923 real_from_target (&r
, tmp
, outer_submode
);
4924 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
4936 f
.mode
= outer_submode
;
4939 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4941 f
.data
.low
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
4942 for (; i
< elem_bitsize
; i
+= value_bit
)
4943 f
.data
.high
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
4944 << (i
- HOST_BITS_PER_WIDE_INT
));
4946 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
4954 if (VECTOR_MODE_P (outermode
))
4955 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
4960 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4961 Return 0 if no simplifications are possible. */
4963 simplify_subreg (enum machine_mode outermode
, rtx op
,
4964 enum machine_mode innermode
, unsigned int byte
)
4966 /* Little bit of sanity checking. */
4967 gcc_assert (innermode
!= VOIDmode
);
4968 gcc_assert (outermode
!= VOIDmode
);
4969 gcc_assert (innermode
!= BLKmode
);
4970 gcc_assert (outermode
!= BLKmode
);
4972 gcc_assert (GET_MODE (op
) == innermode
4973 || GET_MODE (op
) == VOIDmode
);
4975 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
4976 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4978 if (outermode
== innermode
&& !byte
)
4981 if (GET_CODE (op
) == CONST_INT
4982 || GET_CODE (op
) == CONST_DOUBLE
4983 || GET_CODE (op
) == CONST_FIXED
4984 || GET_CODE (op
) == CONST_VECTOR
)
4985 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
4987 /* Changing mode twice with SUBREG => just change it once,
4988 or not at all if changing back op starting mode. */
4989 if (GET_CODE (op
) == SUBREG
)
4991 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
4992 int final_offset
= byte
+ SUBREG_BYTE (op
);
4995 if (outermode
== innermostmode
4996 && byte
== 0 && SUBREG_BYTE (op
) == 0)
4997 return SUBREG_REG (op
);
4999 /* The SUBREG_BYTE represents offset, as if the value were stored
5000 in memory. Irritating exception is paradoxical subreg, where
5001 we define SUBREG_BYTE to be 0. On big endian machines, this
5002 value should be negative. For a moment, undo this exception. */
5003 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5005 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5006 if (WORDS_BIG_ENDIAN
)
5007 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5008 if (BYTES_BIG_ENDIAN
)
5009 final_offset
+= difference
% UNITS_PER_WORD
;
5011 if (SUBREG_BYTE (op
) == 0
5012 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5014 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5015 if (WORDS_BIG_ENDIAN
)
5016 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5017 if (BYTES_BIG_ENDIAN
)
5018 final_offset
+= difference
% UNITS_PER_WORD
;
5021 /* See whether resulting subreg will be paradoxical. */
5022 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5024 /* In nonparadoxical subregs we can't handle negative offsets. */
5025 if (final_offset
< 0)
5027 /* Bail out in case resulting subreg would be incorrect. */
5028 if (final_offset
% GET_MODE_SIZE (outermode
)
5029 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5035 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5037 /* In paradoxical subreg, see if we are still looking on lower part.
5038 If so, our SUBREG_BYTE will be 0. */
5039 if (WORDS_BIG_ENDIAN
)
5040 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5041 if (BYTES_BIG_ENDIAN
)
5042 offset
+= difference
% UNITS_PER_WORD
;
5043 if (offset
== final_offset
)
5049 /* Recurse for further possible simplifications. */
5050 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5054 if (validate_subreg (outermode
, innermostmode
,
5055 SUBREG_REG (op
), final_offset
))
5057 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5058 if (SUBREG_PROMOTED_VAR_P (op
)
5059 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5060 && GET_MODE_CLASS (outermode
) == MODE_INT
5061 && IN_RANGE (GET_MODE_SIZE (outermode
),
5062 GET_MODE_SIZE (innermode
),
5063 GET_MODE_SIZE (innermostmode
))
5064 && subreg_lowpart_p (newx
))
5066 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5067 SUBREG_PROMOTED_UNSIGNED_SET
5068 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5075 /* Merge implicit and explicit truncations. */
5077 if (GET_CODE (op
) == TRUNCATE
5078 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
5079 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
5080 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
5081 GET_MODE (XEXP (op
, 0)));
5083 /* SUBREG of a hard register => just change the register number
5084 and/or mode. If the hard register is not valid in that mode,
5085 suppress this simplification. If the hard register is the stack,
5086 frame, or argument pointer, leave this as a SUBREG. */
5088 if (REG_P (op
) && HARD_REGISTER_P (op
))
5090 unsigned int regno
, final_regno
;
5093 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5094 if (HARD_REGISTER_NUM_P (final_regno
))
5097 int final_offset
= byte
;
5099 /* Adjust offset for paradoxical subregs. */
5101 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5103 int difference
= (GET_MODE_SIZE (innermode
)
5104 - GET_MODE_SIZE (outermode
));
5105 if (WORDS_BIG_ENDIAN
)
5106 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5107 if (BYTES_BIG_ENDIAN
)
5108 final_offset
+= difference
% UNITS_PER_WORD
;
5111 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5113 /* Propagate original regno. We don't have any way to specify
5114 the offset inside original regno, so do so only for lowpart.
5115 The information is used only by alias analysis that can not
5116 grog partial register anyway. */
5118 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5119 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5124 /* If we have a SUBREG of a register that we are replacing and we are
5125 replacing it with a MEM, make a new MEM and try replacing the
5126 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5127 or if we would be widening it. */
5130 && ! mode_dependent_address_p (XEXP (op
, 0))
5131 /* Allow splitting of volatile memory references in case we don't
5132 have instruction to move the whole thing. */
5133 && (! MEM_VOLATILE_P (op
)
5134 || ! have_insn_for (SET
, innermode
))
5135 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5136 return adjust_address_nv (op
, outermode
, byte
);
5138 /* Handle complex values represented as CONCAT
5139 of real and imaginary part. */
5140 if (GET_CODE (op
) == CONCAT
)
5142 unsigned int part_size
, final_offset
;
5145 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5146 if (byte
< part_size
)
5148 part
= XEXP (op
, 0);
5149 final_offset
= byte
;
5153 part
= XEXP (op
, 1);
5154 final_offset
= byte
- part_size
;
5157 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5160 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5163 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5164 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5168 /* Optimize SUBREG truncations of zero and sign extended values. */
5169 if ((GET_CODE (op
) == ZERO_EXTEND
5170 || GET_CODE (op
) == SIGN_EXTEND
)
5171 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
5173 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5175 /* If we're requesting the lowpart of a zero or sign extension,
5176 there are three possibilities. If the outermode is the same
5177 as the origmode, we can omit both the extension and the subreg.
5178 If the outermode is not larger than the origmode, we can apply
5179 the truncation without the extension. Finally, if the outermode
5180 is larger than the origmode, but both are integer modes, we
5181 can just extend to the appropriate mode. */
5184 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
5185 if (outermode
== origmode
)
5186 return XEXP (op
, 0);
5187 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
5188 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
5189 subreg_lowpart_offset (outermode
,
5191 if (SCALAR_INT_MODE_P (outermode
))
5192 return simplify_gen_unary (GET_CODE (op
), outermode
,
5193 XEXP (op
, 0), origmode
);
5196 /* A SUBREG resulting from a zero extension may fold to zero if
5197 it extracts higher bits that the ZERO_EXTEND's source bits. */
5198 if (GET_CODE (op
) == ZERO_EXTEND
5199 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
5200 return CONST0_RTX (outermode
);
5203 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5204 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5205 the outer subreg is effectively a truncation to the original mode. */
5206 if ((GET_CODE (op
) == LSHIFTRT
5207 || GET_CODE (op
) == ASHIFTRT
)
5208 && SCALAR_INT_MODE_P (outermode
)
5209 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5210 to avoid the possibility that an outer LSHIFTRT shifts by more
5211 than the sign extension's sign_bit_copies and introduces zeros
5212 into the high bits of the result. */
5213 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
5214 && GET_CODE (XEXP (op
, 1)) == CONST_INT
5215 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
5216 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5217 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5218 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5219 return simplify_gen_binary (ASHIFTRT
, outermode
,
5220 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5222 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5223 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5224 the outer subreg is effectively a truncation to the original mode. */
5225 if ((GET_CODE (op
) == LSHIFTRT
5226 || GET_CODE (op
) == ASHIFTRT
)
5227 && SCALAR_INT_MODE_P (outermode
)
5228 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5229 && GET_CODE (XEXP (op
, 1)) == CONST_INT
5230 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5231 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5232 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5233 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5234 return simplify_gen_binary (LSHIFTRT
, outermode
,
5235 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5237 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5238 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5239 the outer subreg is effectively a truncation to the original mode. */
5240 if (GET_CODE (op
) == ASHIFT
5241 && SCALAR_INT_MODE_P (outermode
)
5242 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5243 && GET_CODE (XEXP (op
, 1)) == CONST_INT
5244 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5245 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
5246 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5247 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5248 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5249 return simplify_gen_binary (ASHIFT
, outermode
,
5250 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5252 /* Recognize a word extraction from a multi-word subreg. */
5253 if ((GET_CODE (op
) == LSHIFTRT
5254 || GET_CODE (op
) == ASHIFTRT
)
5255 && SCALAR_INT_MODE_P (outermode
)
5256 && GET_MODE_BITSIZE (outermode
) >= BITS_PER_WORD
5257 && GET_MODE_BITSIZE (innermode
) >= (2 * GET_MODE_BITSIZE (outermode
))
5258 && GET_CODE (XEXP (op
, 1)) == CONST_INT
5259 && (INTVAL (XEXP (op
, 1)) & (GET_MODE_BITSIZE (outermode
) - 1)) == 0
5260 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5261 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5263 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5264 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
,
5266 ? byte
- shifted_bytes
: byte
+ shifted_bytes
));
5272 /* Make a SUBREG operation or equivalent if it folds. */
5275 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5276 enum machine_mode innermode
, unsigned int byte
)
5280 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5284 if (GET_CODE (op
) == SUBREG
5285 || GET_CODE (op
) == CONCAT
5286 || GET_MODE (op
) == VOIDmode
)
5289 if (validate_subreg (outermode
, innermode
, op
, byte
))
5290 return gen_rtx_SUBREG (outermode
, op
, byte
);
5295 /* Simplify X, an rtx expression.
5297 Return the simplified expression or NULL if no simplifications
5300 This is the preferred entry point into the simplification routines;
5301 however, we still allow passes to call the more specific routines.
5303 Right now GCC has three (yes, three) major bodies of RTL simplification
5304 code that need to be unified.
5306 1. fold_rtx in cse.c. This code uses various CSE specific
5307 information to aid in RTL simplification.
5309 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5310 it uses combine specific information to aid in RTL
5313 3. The routines in this file.
5316 Long term we want to only have one body of simplification code; to
5317 get to that state I recommend the following steps:
5319 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5320 which are not pass dependent state into these routines.
5322 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5323 use this routine whenever possible.
5325 3. Allow for pass dependent state to be provided to these
5326 routines and add simplifications based on the pass dependent
5327 state. Remove code from cse.c & combine.c that becomes
5330 It will take time, but ultimately the compiler will be easier to
5331 maintain and improve. It's totally silly that when we add a
5332 simplification that it needs to be added to 4 places (3 for RTL
5333 simplification and 1 for tree simplification. */
5336 simplify_rtx (const_rtx x
)
5338 const enum rtx_code code
= GET_CODE (x
);
5339 const enum machine_mode mode
= GET_MODE (x
);
5341 switch (GET_RTX_CLASS (code
))
5344 return simplify_unary_operation (code
, mode
,
5345 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5346 case RTX_COMM_ARITH
:
5347 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5348 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5350 /* Fall through.... */
5353 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5356 case RTX_BITFIELD_OPS
:
5357 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5358 XEXP (x
, 0), XEXP (x
, 1),
5362 case RTX_COMM_COMPARE
:
5363 return simplify_relational_operation (code
, mode
,
5364 ((GET_MODE (XEXP (x
, 0))
5366 ? GET_MODE (XEXP (x
, 0))
5367 : GET_MODE (XEXP (x
, 1))),
5373 return simplify_subreg (mode
, SUBREG_REG (x
),
5374 GET_MODE (SUBREG_REG (x
)),
5381 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5382 if (GET_CODE (XEXP (x
, 0)) == HIGH
5383 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))