1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
53 static bool plus_minus_operand_p (const_rtx
);
54 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
61 enum machine_mode
, rtx
, rtx
);
62 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
63 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode
, const_rtx i
)
71 return gen_int_mode (- INTVAL (i
), mode
);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
80 unsigned HOST_WIDE_INT val
;
83 if (GET_MODE_CLASS (mode
) != MODE_INT
)
86 width
= GET_MODE_BITSIZE (mode
);
90 if (width
<= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x
) == CONST_INT
)
93 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x
) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x
) == 0)
97 val
= CONST_DOUBLE_HIGH (x
);
98 width
-= HOST_BITS_PER_WIDE_INT
;
103 if (width
< HOST_BITS_PER_WIDE_INT
)
104 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
105 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
117 /* If this simplifies, do it. */
118 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0
, op1
))
125 tem
= op0
, op0
= op1
, op1
= tem
;
127 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x
)
136 enum machine_mode cmode
;
137 HOST_WIDE_INT offset
= 0;
139 switch (GET_CODE (x
))
145 /* Handle float extensions of constant pool references. */
147 c
= avoid_constant_pool_reference (tmp
);
148 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
152 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
161 if (GET_MODE (x
) == BLKmode
)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr
= targetm
.delegitimize_address (addr
);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr
) == CONST
171 && GET_CODE (XEXP (addr
, 0)) == PLUS
172 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST_INT
)
174 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
175 addr
= XEXP (XEXP (addr
, 0), 0);
178 if (GET_CODE (addr
) == LO_SUM
)
179 addr
= XEXP (addr
, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr
) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr
))
186 c
= get_pool_constant (addr
);
187 cmode
= get_pool_mode (addr
);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset
!= 0 || cmode
!= GET_MODE (x
))
194 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
195 if (tem
&& CONSTANT_P (tem
))
205 /* Make a unary operation by first seeing if it folds and otherwise making
206 the specified operation. */
209 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
210 enum machine_mode op_mode
)
214 /* If this simplifies, use it. */
215 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
218 return gen_rtx_fmt_e (code
, mode
, op
);
221 /* Likewise for ternary operations. */
224 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
225 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
229 /* If this simplifies, use it. */
230 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
234 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
237 /* Likewise, for relational operations.
238 CMP_MODE specifies mode comparison is done in. */
241 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
242 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
246 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
250 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
253 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
257 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
259 enum rtx_code code
= GET_CODE (x
);
260 enum machine_mode mode
= GET_MODE (x
);
261 enum machine_mode op_mode
;
264 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
265 to build a new expression substituting recursively. If we can't do
266 anything, return our input. */
271 switch (GET_RTX_CLASS (code
))
275 op_mode
= GET_MODE (op0
);
276 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
277 if (op0
== XEXP (x
, 0))
279 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
283 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
284 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
285 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
287 return simplify_gen_binary (code
, mode
, op0
, op1
);
290 case RTX_COMM_COMPARE
:
293 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
294 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
295 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
296 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
298 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
301 case RTX_BITFIELD_OPS
:
303 op_mode
= GET_MODE (op0
);
304 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
305 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
306 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
307 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
309 if (op_mode
== VOIDmode
)
310 op_mode
= GET_MODE (op0
);
311 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
314 /* The only case we try to handle is a SUBREG. */
317 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
318 if (op0
== SUBREG_REG (x
))
320 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
321 GET_MODE (SUBREG_REG (x
)),
323 return op0
? op0
: x
;
330 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
331 if (op0
== XEXP (x
, 0))
333 return replace_equiv_address_nv (x
, op0
);
335 else if (code
== LO_SUM
)
337 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
338 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
340 /* (lo_sum (high x) x) -> x */
341 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
344 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
346 return gen_rtx_LO_SUM (mode
, op0
, op1
);
348 else if (code
== REG
)
350 if (rtx_equal_p (x
, old_rtx
))
361 /* Try to simplify a unary operation CODE whose output mode is to be
362 MODE with input operand OP whose mode was originally OP_MODE.
363 Return zero if no simplification can be made. */
365 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
366 rtx op
, enum machine_mode op_mode
)
370 if (GET_CODE (op
) == CONST
)
373 trueop
= avoid_constant_pool_reference (op
);
375 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
379 return simplify_unary_operation_1 (code
, mode
, op
);
382 /* Perform some simplifications we can do even if the operands
385 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
387 enum rtx_code reversed
;
393 /* (not (not X)) == X. */
394 if (GET_CODE (op
) == NOT
)
397 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
398 comparison is all ones. */
399 if (COMPARISON_P (op
)
400 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
401 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
402 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
403 XEXP (op
, 0), XEXP (op
, 1));
405 /* (not (plus X -1)) can become (neg X). */
406 if (GET_CODE (op
) == PLUS
407 && XEXP (op
, 1) == constm1_rtx
)
408 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
410 /* Similarly, (not (neg X)) is (plus X -1). */
411 if (GET_CODE (op
) == NEG
)
412 return plus_constant (XEXP (op
, 0), -1);
414 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
415 if (GET_CODE (op
) == XOR
416 && GET_CODE (XEXP (op
, 1)) == CONST_INT
417 && (temp
= simplify_unary_operation (NOT
, mode
,
418 XEXP (op
, 1), mode
)) != 0)
419 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
421 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
422 if (GET_CODE (op
) == PLUS
423 && GET_CODE (XEXP (op
, 1)) == CONST_INT
424 && mode_signbit_p (mode
, XEXP (op
, 1))
425 && (temp
= simplify_unary_operation (NOT
, mode
,
426 XEXP (op
, 1), mode
)) != 0)
427 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
430 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
431 operands other than 1, but that is not valid. We could do a
432 similar simplification for (not (lshiftrt C X)) where C is
433 just the sign bit, but this doesn't seem common enough to
435 if (GET_CODE (op
) == ASHIFT
436 && XEXP (op
, 0) == const1_rtx
)
438 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
439 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
446 if (STORE_FLAG_VALUE
== -1
447 && GET_CODE (op
) == ASHIFTRT
448 && GET_CODE (XEXP (op
, 1)) == CONST_INT
449 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
450 return simplify_gen_relational (GE
, mode
, VOIDmode
,
451 XEXP (op
, 0), const0_rtx
);
454 if (GET_CODE (op
) == SUBREG
455 && subreg_lowpart_p (op
)
456 && (GET_MODE_SIZE (GET_MODE (op
))
457 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
458 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
459 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
461 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
464 x
= gen_rtx_ROTATE (inner_mode
,
465 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
467 XEXP (SUBREG_REG (op
), 1));
468 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
471 /* Apply De Morgan's laws to reduce number of patterns for machines
472 with negating logical insns (and-not, nand, etc.). If result has
473 only one NOT, put it first, since that is how the patterns are
476 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
478 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
479 enum machine_mode op_mode
;
481 op_mode
= GET_MODE (in1
);
482 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
484 op_mode
= GET_MODE (in2
);
485 if (op_mode
== VOIDmode
)
487 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
489 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
492 in2
= in1
; in1
= tem
;
495 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
501 /* (neg (neg X)) == X. */
502 if (GET_CODE (op
) == NEG
)
505 /* (neg (plus X 1)) can become (not X). */
506 if (GET_CODE (op
) == PLUS
507 && XEXP (op
, 1) == const1_rtx
)
508 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
510 /* Similarly, (neg (not X)) is (plus X 1). */
511 if (GET_CODE (op
) == NOT
)
512 return plus_constant (XEXP (op
, 0), 1);
514 /* (neg (minus X Y)) can become (minus Y X). This transformation
515 isn't safe for modes with signed zeros, since if X and Y are
516 both +0, (minus Y X) is the same as (minus X Y). If the
517 rounding mode is towards +infinity (or -infinity) then the two
518 expressions will be rounded differently. */
519 if (GET_CODE (op
) == MINUS
520 && !HONOR_SIGNED_ZEROS (mode
)
521 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
522 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
524 if (GET_CODE (op
) == PLUS
525 && !HONOR_SIGNED_ZEROS (mode
)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
528 /* (neg (plus A C)) is simplified to (minus -C A). */
529 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
530 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
532 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
534 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
537 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
538 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
539 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
542 /* (neg (mult A B)) becomes (mult (neg A) B).
543 This works even for floating-point values. */
544 if (GET_CODE (op
) == MULT
545 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
547 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
548 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
551 /* NEG commutes with ASHIFT since it is multiplication. Only do
552 this if we can then eliminate the NEG (e.g., if the operand
554 if (GET_CODE (op
) == ASHIFT
)
556 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
558 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
561 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
562 C is equal to the width of MODE minus 1. */
563 if (GET_CODE (op
) == ASHIFTRT
564 && GET_CODE (XEXP (op
, 1)) == CONST_INT
565 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
566 return simplify_gen_binary (LSHIFTRT
, mode
,
567 XEXP (op
, 0), XEXP (op
, 1));
569 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
570 C is equal to the width of MODE minus 1. */
571 if (GET_CODE (op
) == LSHIFTRT
572 && GET_CODE (XEXP (op
, 1)) == CONST_INT
573 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
574 return simplify_gen_binary (ASHIFTRT
, mode
,
575 XEXP (op
, 0), XEXP (op
, 1));
577 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
578 if (GET_CODE (op
) == XOR
579 && XEXP (op
, 1) == const1_rtx
580 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
581 return plus_constant (XEXP (op
, 0), -1);
583 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
584 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
585 if (GET_CODE (op
) == LT
586 && XEXP (op
, 1) == const0_rtx
587 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
589 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
590 int isize
= GET_MODE_BITSIZE (inner
);
591 if (STORE_FLAG_VALUE
== 1)
593 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
594 GEN_INT (isize
- 1));
597 if (GET_MODE_BITSIZE (mode
) > isize
)
598 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
599 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
601 else if (STORE_FLAG_VALUE
== -1)
603 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
604 GEN_INT (isize
- 1));
607 if (GET_MODE_BITSIZE (mode
) > isize
)
608 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
609 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
615 /* We can't handle truncation to a partial integer mode here
616 because we don't know the real bitsize of the partial
618 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
622 if ((GET_CODE (op
) == SIGN_EXTEND
623 || GET_CODE (op
) == ZERO_EXTEND
)
624 && GET_MODE (XEXP (op
, 0)) == mode
)
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
628 (OP:SI foo:SI) if OP is NEG or ABS. */
629 if ((GET_CODE (op
) == ABS
630 || GET_CODE (op
) == NEG
)
631 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
632 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
633 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
634 return simplify_gen_unary (GET_CODE (op
), mode
,
635 XEXP (XEXP (op
, 0), 0), mode
);
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is
639 if (GET_CODE (op
) == SUBREG
640 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
641 && subreg_lowpart_p (op
))
642 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
643 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
645 /* If we know that the value is already truncated, we can
646 replace the TRUNCATE with a SUBREG. Note that this is also
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
648 modes we just have to apply a different definition for
649 truncation. But don't do this for an (LSHIFTRT (MULT ...))
650 since this will cause problems with the umulXi3_highpart
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
653 GET_MODE_BITSIZE (GET_MODE (op
)))
654 ? (num_sign_bit_copies (op
, GET_MODE (op
))
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op
))
656 - GET_MODE_BITSIZE (mode
)))
657 : truncated_to_mode (mode
, op
))
658 && ! (GET_CODE (op
) == LSHIFTRT
659 && GET_CODE (XEXP (op
, 0)) == MULT
))
660 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
662 /* A truncate of a comparison can be replaced with a subreg if
663 STORE_FLAG_VALUE permits. This is like the previous test,
664 but it works even if the comparison is done in a mode larger
665 than HOST_BITS_PER_WIDE_INT. */
666 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
668 && ((HOST_WIDE_INT
) STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
669 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
673 if (DECIMAL_FLOAT_MODE_P (mode
))
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
677 if (GET_CODE (op
) == FLOAT_EXTEND
678 && GET_MODE (XEXP (op
, 0)) == mode
)
681 /* (float_truncate:SF (float_truncate:DF foo:XF))
682 = (float_truncate:SF foo:XF).
683 This may eliminate double rounding, so it is unsafe.
685 (float_truncate:SF (float_extend:XF foo:DF))
686 = (float_truncate:SF foo:DF).
688 (float_truncate:DF (float_extend:XF foo:SF))
689 = (float_extend:SF foo:DF). */
690 if ((GET_CODE (op
) == FLOAT_TRUNCATE
691 && flag_unsafe_math_optimizations
)
692 || GET_CODE (op
) == FLOAT_EXTEND
)
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
695 > GET_MODE_SIZE (mode
)
696 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
700 /* (float_truncate (float x)) is (float x) */
701 if (GET_CODE (op
) == FLOAT
702 && (flag_unsafe_math_optimizations
703 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
704 && ((unsigned)significand_size (GET_MODE (op
))
705 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
706 - num_sign_bit_copies (XEXP (op
, 0),
707 GET_MODE (XEXP (op
, 0))))))))
708 return simplify_gen_unary (FLOAT
, mode
,
710 GET_MODE (XEXP (op
, 0)));
712 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
713 (OP:SF foo:SF) if OP is NEG or ABS. */
714 if ((GET_CODE (op
) == ABS
715 || GET_CODE (op
) == NEG
)
716 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
717 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
718 return simplify_gen_unary (GET_CODE (op
), mode
,
719 XEXP (XEXP (op
, 0), 0), mode
);
721 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
722 is (float_truncate:SF x). */
723 if (GET_CODE (op
) == SUBREG
724 && subreg_lowpart_p (op
)
725 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
726 return SUBREG_REG (op
);
730 if (DECIMAL_FLOAT_MODE_P (mode
))
733 /* (float_extend (float_extend x)) is (float_extend x)
735 (float_extend (float x)) is (float x) assuming that double
736 rounding can't happen.
738 if (GET_CODE (op
) == FLOAT_EXTEND
739 || (GET_CODE (op
) == FLOAT
740 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
741 && ((unsigned)significand_size (GET_MODE (op
))
742 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
743 - num_sign_bit_copies (XEXP (op
, 0),
744 GET_MODE (XEXP (op
, 0)))))))
745 return simplify_gen_unary (GET_CODE (op
), mode
,
747 GET_MODE (XEXP (op
, 0)));
752 /* (abs (neg <foo>)) -> (abs <foo>) */
753 if (GET_CODE (op
) == NEG
)
754 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
755 GET_MODE (XEXP (op
, 0)));
757 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
759 if (GET_MODE (op
) == VOIDmode
)
762 /* If operand is something known to be positive, ignore the ABS. */
763 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
764 || ((GET_MODE_BITSIZE (GET_MODE (op
))
765 <= HOST_BITS_PER_WIDE_INT
)
766 && ((nonzero_bits (op
, GET_MODE (op
))
768 << (GET_MODE_BITSIZE (GET_MODE (op
)) - 1)))
772 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
773 if (num_sign_bit_copies (op
, mode
) == GET_MODE_BITSIZE (mode
))
774 return gen_rtx_NEG (mode
, op
);
779 /* (ffs (*_extend <X>)) = (ffs <X>) */
780 if (GET_CODE (op
) == SIGN_EXTEND
781 || GET_CODE (op
) == ZERO_EXTEND
)
782 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
783 GET_MODE (XEXP (op
, 0)));
787 switch (GET_CODE (op
))
791 /* (popcount (zero_extend <X>)) = (popcount <X>) */
792 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
793 GET_MODE (XEXP (op
, 0)));
797 /* Rotations don't affect popcount. */
798 if (!side_effects_p (XEXP (op
, 1)))
799 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
800 GET_MODE (XEXP (op
, 0)));
809 switch (GET_CODE (op
))
815 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
816 GET_MODE (XEXP (op
, 0)));
820 /* Rotations don't affect parity. */
821 if (!side_effects_p (XEXP (op
, 1)))
822 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
823 GET_MODE (XEXP (op
, 0)));
832 /* (bswap (bswap x)) -> x. */
833 if (GET_CODE (op
) == BSWAP
)
838 /* (float (sign_extend <X>)) = (float <X>). */
839 if (GET_CODE (op
) == SIGN_EXTEND
)
840 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
841 GET_MODE (XEXP (op
, 0)));
845 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
846 becomes just the MINUS if its mode is MODE. This allows
847 folding switch statements on machines using casesi (such as
849 if (GET_CODE (op
) == TRUNCATE
850 && GET_MODE (XEXP (op
, 0)) == mode
851 && GET_CODE (XEXP (op
, 0)) == MINUS
852 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
853 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
856 /* Check for a sign extension of a subreg of a promoted
857 variable, where the promotion is sign-extended, and the
858 target mode is the same as the variable's promotion. */
859 if (GET_CODE (op
) == SUBREG
860 && SUBREG_PROMOTED_VAR_P (op
)
861 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
862 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
863 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
865 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
866 if (! POINTERS_EXTEND_UNSIGNED
867 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
869 || (GET_CODE (op
) == SUBREG
870 && REG_P (SUBREG_REG (op
))
871 && REG_POINTER (SUBREG_REG (op
))
872 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
873 return convert_memory_address (Pmode
, op
);
878 /* Check for a zero extension of a subreg of a promoted
879 variable, where the promotion is zero-extended, and the
880 target mode is the same as the variable's promotion. */
881 if (GET_CODE (op
) == SUBREG
882 && SUBREG_PROMOTED_VAR_P (op
)
883 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
884 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
885 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
887 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
888 if (POINTERS_EXTEND_UNSIGNED
> 0
889 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
891 || (GET_CODE (op
) == SUBREG
892 && REG_P (SUBREG_REG (op
))
893 && REG_POINTER (SUBREG_REG (op
))
894 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
895 return convert_memory_address (Pmode
, op
);
906 /* Try to compute the value of a unary operation CODE whose output mode is to
907 be MODE with input operand OP whose mode was originally OP_MODE.
908 Return zero if the value cannot be computed. */
910 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
911 rtx op
, enum machine_mode op_mode
)
913 unsigned int width
= GET_MODE_BITSIZE (mode
);
915 if (code
== VEC_DUPLICATE
)
917 gcc_assert (VECTOR_MODE_P (mode
));
918 if (GET_MODE (op
) != VOIDmode
)
920 if (!VECTOR_MODE_P (GET_MODE (op
)))
921 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
923 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
926 if (GET_CODE (op
) == CONST_INT
|| GET_CODE (op
) == CONST_DOUBLE
927 || GET_CODE (op
) == CONST_VECTOR
)
929 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
930 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
931 rtvec v
= rtvec_alloc (n_elts
);
934 if (GET_CODE (op
) != CONST_VECTOR
)
935 for (i
= 0; i
< n_elts
; i
++)
936 RTVEC_ELT (v
, i
) = op
;
939 enum machine_mode inmode
= GET_MODE (op
);
940 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
941 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
943 gcc_assert (in_n_elts
< n_elts
);
944 gcc_assert ((n_elts
% in_n_elts
) == 0);
945 for (i
= 0; i
< n_elts
; i
++)
946 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
948 return gen_rtx_CONST_VECTOR (mode
, v
);
952 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
954 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
955 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
956 enum machine_mode opmode
= GET_MODE (op
);
957 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
958 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
959 rtvec v
= rtvec_alloc (n_elts
);
962 gcc_assert (op_n_elts
== n_elts
);
963 for (i
= 0; i
< n_elts
; i
++)
965 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
966 CONST_VECTOR_ELT (op
, i
),
967 GET_MODE_INNER (opmode
));
970 RTVEC_ELT (v
, i
) = x
;
972 return gen_rtx_CONST_VECTOR (mode
, v
);
975 /* The order of these tests is critical so that, for example, we don't
976 check the wrong mode (input vs. output) for a conversion operation,
977 such as FIX. At some point, this should be simplified. */
979 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
980 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
982 HOST_WIDE_INT hv
, lv
;
985 if (GET_CODE (op
) == CONST_INT
)
986 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
988 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
990 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
991 d
= real_value_truncate (mode
, d
);
992 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
994 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
995 && (GET_CODE (op
) == CONST_DOUBLE
996 || GET_CODE (op
) == CONST_INT
))
998 HOST_WIDE_INT hv
, lv
;
1001 if (GET_CODE (op
) == CONST_INT
)
1002 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1004 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1006 if (op_mode
== VOIDmode
)
1008 /* We don't know how to interpret negative-looking numbers in
1009 this case, so don't try to fold those. */
1013 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
1016 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1018 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1019 d
= real_value_truncate (mode
, d
);
1020 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1023 if (GET_CODE (op
) == CONST_INT
1024 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1026 HOST_WIDE_INT arg0
= INTVAL (op
);
1040 val
= (arg0
>= 0 ? arg0
: - arg0
);
1044 /* Don't use ffs here. Instead, get low order bit and then its
1045 number. If arg0 is zero, this will return 0, as desired. */
1046 arg0
&= GET_MODE_MASK (mode
);
1047 val
= exact_log2 (arg0
& (- arg0
)) + 1;
1051 arg0
&= GET_MODE_MASK (mode
);
1052 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1055 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
1059 arg0
&= GET_MODE_MASK (mode
);
1062 /* Even if the value at zero is undefined, we have to come
1063 up with some replacement. Seems good enough. */
1064 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1065 val
= GET_MODE_BITSIZE (mode
);
1068 val
= exact_log2 (arg0
& -arg0
);
1072 arg0
&= GET_MODE_MASK (mode
);
1075 val
++, arg0
&= arg0
- 1;
1079 arg0
&= GET_MODE_MASK (mode
);
1082 val
++, arg0
&= arg0
- 1;
1091 for (s
= 0; s
< width
; s
+= 8)
1093 unsigned int d
= width
- s
- 8;
1094 unsigned HOST_WIDE_INT byte
;
1095 byte
= (arg0
>> s
) & 0xff;
1106 /* When zero-extending a CONST_INT, we need to know its
1108 gcc_assert (op_mode
!= VOIDmode
);
1109 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1111 /* If we were really extending the mode,
1112 we would have to distinguish between zero-extension
1113 and sign-extension. */
1114 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1117 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1118 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1124 if (op_mode
== VOIDmode
)
1126 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1128 /* If we were really extending the mode,
1129 we would have to distinguish between zero-extension
1130 and sign-extension. */
1131 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1134 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1137 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1139 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
1140 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1148 case FLOAT_TRUNCATE
:
1159 return gen_int_mode (val
, mode
);
1162 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1163 for a DImode operation on a CONST_INT. */
1164 else if (GET_MODE (op
) == VOIDmode
1165 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1166 && (GET_CODE (op
) == CONST_DOUBLE
1167 || GET_CODE (op
) == CONST_INT
))
1169 unsigned HOST_WIDE_INT l1
, lv
;
1170 HOST_WIDE_INT h1
, hv
;
1172 if (GET_CODE (op
) == CONST_DOUBLE
)
1173 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1175 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1185 neg_double (l1
, h1
, &lv
, &hv
);
1190 neg_double (l1
, h1
, &lv
, &hv
);
1202 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
1205 lv
= exact_log2 (l1
& -l1
) + 1;
1211 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
1212 - HOST_BITS_PER_WIDE_INT
;
1214 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
1215 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1216 lv
= GET_MODE_BITSIZE (mode
);
1222 lv
= exact_log2 (l1
& -l1
);
1224 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
1225 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1226 lv
= GET_MODE_BITSIZE (mode
);
1254 for (s
= 0; s
< width
; s
+= 8)
1256 unsigned int d
= width
- s
- 8;
1257 unsigned HOST_WIDE_INT byte
;
1259 if (s
< HOST_BITS_PER_WIDE_INT
)
1260 byte
= (l1
>> s
) & 0xff;
1262 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1264 if (d
< HOST_BITS_PER_WIDE_INT
)
1267 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1273 /* This is just a change-of-mode, so do nothing. */
1278 gcc_assert (op_mode
!= VOIDmode
);
1280 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1284 lv
= l1
& GET_MODE_MASK (op_mode
);
1288 if (op_mode
== VOIDmode
1289 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1293 lv
= l1
& GET_MODE_MASK (op_mode
);
1294 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
1295 && (lv
& ((HOST_WIDE_INT
) 1
1296 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
1297 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1299 hv
= HWI_SIGN_EXTEND (lv
);
1310 return immed_double_const (lv
, hv
, mode
);
1313 else if (GET_CODE (op
) == CONST_DOUBLE
1314 && SCALAR_FLOAT_MODE_P (mode
))
1316 REAL_VALUE_TYPE d
, t
;
1317 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1322 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1324 real_sqrt (&t
, mode
, &d
);
1328 d
= REAL_VALUE_ABS (d
);
1331 d
= REAL_VALUE_NEGATE (d
);
1333 case FLOAT_TRUNCATE
:
1334 d
= real_value_truncate (mode
, d
);
1337 /* All this does is change the mode. */
1340 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1347 real_to_target (tmp
, &d
, GET_MODE (op
));
1348 for (i
= 0; i
< 4; i
++)
1350 real_from_target (&d
, tmp
, mode
);
1356 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1359 else if (GET_CODE (op
) == CONST_DOUBLE
1360 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1361 && GET_MODE_CLASS (mode
) == MODE_INT
1362 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1364 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1365 operators are intentionally left unspecified (to ease implementation
1366 by target backends), for consistency, this routine implements the
1367 same semantics for constant folding as used by the middle-end. */
1369 /* This was formerly used only for non-IEEE float.
1370 eggert@twinsun.com says it is safe for IEEE also. */
1371 HOST_WIDE_INT xh
, xl
, th
, tl
;
1372 REAL_VALUE_TYPE x
, t
;
1373 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1377 if (REAL_VALUE_ISNAN (x
))
1380 /* Test against the signed upper bound. */
1381 if (width
> HOST_BITS_PER_WIDE_INT
)
1383 th
= ((unsigned HOST_WIDE_INT
) 1
1384 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1390 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1392 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1393 if (REAL_VALUES_LESS (t
, x
))
1400 /* Test against the signed lower bound. */
1401 if (width
> HOST_BITS_PER_WIDE_INT
)
1403 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1409 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1411 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1412 if (REAL_VALUES_LESS (x
, t
))
1418 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1422 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1425 /* Test against the unsigned upper bound. */
1426 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1431 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1433 th
= ((unsigned HOST_WIDE_INT
) 1
1434 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1440 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1442 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1443 if (REAL_VALUES_LESS (t
, x
))
1450 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1456 return immed_double_const (xl
, xh
, mode
);
1462 /* Subroutine of simplify_binary_operation to simplify a commutative,
1463 associative binary operation CODE with result mode MODE, operating
1464 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1465 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1466 canonicalization is possible. */
1469 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1474 /* Linearize the operator to the left. */
1475 if (GET_CODE (op1
) == code
)
1477 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1478 if (GET_CODE (op0
) == code
)
1480 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1481 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1484 /* "a op (b op c)" becomes "(b op c) op a". */
1485 if (! swap_commutative_operands_p (op1
, op0
))
1486 return simplify_gen_binary (code
, mode
, op1
, op0
);
1493 if (GET_CODE (op0
) == code
)
1495 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1496 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1498 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1499 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1502 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1503 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1505 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1507 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1508 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1510 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1517 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1518 and OP1. Return 0 if no simplification is possible.
1520 Don't use this for relational operations such as EQ or LT.
1521 Use simplify_relational_operation instead. */
1523 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1526 rtx trueop0
, trueop1
;
1529 /* Relational operations don't work here. We must know the mode
1530 of the operands in order to do the comparison correctly.
1531 Assuming a full word can give incorrect results.
1532 Consider comparing 128 with -128 in QImode. */
1533 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1534 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1536 /* Make sure the constant is second. */
1537 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1538 && swap_commutative_operands_p (op0
, op1
))
1540 tem
= op0
, op0
= op1
, op1
= tem
;
1543 trueop0
= avoid_constant_pool_reference (op0
);
1544 trueop1
= avoid_constant_pool_reference (op1
);
1546 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1549 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1552 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1553 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1554 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1555 actual constants. */
1558 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1559 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1561 rtx tem
, reversed
, opleft
, opright
;
1563 unsigned int width
= GET_MODE_BITSIZE (mode
);
1565 /* Even if we can't compute a constant result,
1566 there are some cases worth simplifying. */
1571 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1572 when x is NaN, infinite, or finite and nonzero. They aren't
1573 when x is -0 and the rounding mode is not towards -infinity,
1574 since (-0) + 0 is then 0. */
1575 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1578 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1579 transformations are safe even for IEEE. */
1580 if (GET_CODE (op0
) == NEG
)
1581 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1582 else if (GET_CODE (op1
) == NEG
)
1583 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1585 /* (~a) + 1 -> -a */
1586 if (INTEGRAL_MODE_P (mode
)
1587 && GET_CODE (op0
) == NOT
1588 && trueop1
== const1_rtx
)
1589 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1591 /* Handle both-operands-constant cases. We can only add
1592 CONST_INTs to constants since the sum of relocatable symbols
1593 can't be handled by most assemblers. Don't add CONST_INT
1594 to CONST_INT since overflow won't be computed properly if wider
1595 than HOST_BITS_PER_WIDE_INT. */
1597 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1598 && GET_CODE (op1
) == CONST_INT
)
1599 return plus_constant (op0
, INTVAL (op1
));
1600 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1601 && GET_CODE (op0
) == CONST_INT
)
1602 return plus_constant (op1
, INTVAL (op0
));
1604 /* See if this is something like X * C - X or vice versa or
1605 if the multiplication is written as a shift. If so, we can
1606 distribute and make a new multiply, shift, or maybe just
1607 have X (if C is 2 in the example above). But don't make
1608 something more expensive than we had before. */
1610 if (SCALAR_INT_MODE_P (mode
))
1612 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1613 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1614 rtx lhs
= op0
, rhs
= op1
;
1616 if (GET_CODE (lhs
) == NEG
)
1620 lhs
= XEXP (lhs
, 0);
1622 else if (GET_CODE (lhs
) == MULT
1623 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1625 coeff0l
= INTVAL (XEXP (lhs
, 1));
1626 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1627 lhs
= XEXP (lhs
, 0);
1629 else if (GET_CODE (lhs
) == ASHIFT
1630 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1631 && INTVAL (XEXP (lhs
, 1)) >= 0
1632 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1634 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1636 lhs
= XEXP (lhs
, 0);
1639 if (GET_CODE (rhs
) == NEG
)
1643 rhs
= XEXP (rhs
, 0);
1645 else if (GET_CODE (rhs
) == MULT
1646 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1648 coeff1l
= INTVAL (XEXP (rhs
, 1));
1649 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1650 rhs
= XEXP (rhs
, 0);
1652 else if (GET_CODE (rhs
) == ASHIFT
1653 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1654 && INTVAL (XEXP (rhs
, 1)) >= 0
1655 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1657 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1659 rhs
= XEXP (rhs
, 0);
1662 if (rtx_equal_p (lhs
, rhs
))
1664 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1666 unsigned HOST_WIDE_INT l
;
1669 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1670 coeff
= immed_double_const (l
, h
, mode
);
1672 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1673 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1678 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1679 if ((GET_CODE (op1
) == CONST_INT
1680 || GET_CODE (op1
) == CONST_DOUBLE
)
1681 && GET_CODE (op0
) == XOR
1682 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1683 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1684 && mode_signbit_p (mode
, op1
))
1685 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1686 simplify_gen_binary (XOR
, mode
, op1
,
1689 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1690 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1691 && GET_CODE (op0
) == MULT
1692 && GET_CODE (XEXP (op0
, 0)) == NEG
)
1696 in1
= XEXP (XEXP (op0
, 0), 0);
1697 in2
= XEXP (op0
, 1);
1698 return simplify_gen_binary (MINUS
, mode
, op1
,
1699 simplify_gen_binary (MULT
, mode
,
1703 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1704 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1706 if (COMPARISON_P (op0
)
1707 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
1708 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
1709 && (reversed
= reversed_comparison (op0
, mode
)))
1711 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
1713 /* If one of the operands is a PLUS or a MINUS, see if we can
1714 simplify this by the associative law.
1715 Don't use the associative law for floating point.
1716 The inaccuracy makes it nonassociative,
1717 and subtle programs can break if operations are associated. */
1719 if (INTEGRAL_MODE_P (mode
)
1720 && (plus_minus_operand_p (op0
)
1721 || plus_minus_operand_p (op1
))
1722 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1725 /* Reassociate floating point addition only when the user
1726 specifies associative math operations. */
1727 if (FLOAT_MODE_P (mode
)
1728 && flag_associative_math
)
1730 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1738 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1739 using cc0, in which case we want to leave it as a COMPARE
1740 so we can distinguish it from a register-register-copy.
1742 In IEEE floating point, x-0 is not the same as x. */
1743 if (!(HONOR_SIGNED_ZEROS (mode
)
1744 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1745 && trueop1
== CONST0_RTX (mode
))
1749 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1750 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1751 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1752 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1754 rtx xop00
= XEXP (op0
, 0);
1755 rtx xop10
= XEXP (op1
, 0);
1758 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1760 if (REG_P (xop00
) && REG_P (xop10
)
1761 && GET_MODE (xop00
) == GET_MODE (xop10
)
1762 && REGNO (xop00
) == REGNO (xop10
)
1763 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1764 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1771 /* We can't assume x-x is 0 even with non-IEEE floating point,
1772 but since it is zero except in very strange circumstances, we
1773 will treat it as zero with -ffinite-math-only. */
1774 if (rtx_equal_p (trueop0
, trueop1
)
1775 && ! side_effects_p (op0
)
1776 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
1777 return CONST0_RTX (mode
);
1779 /* Change subtraction from zero into negation. (0 - x) is the
1780 same as -x when x is NaN, infinite, or finite and nonzero.
1781 But if the mode has signed zeros, and does not round towards
1782 -infinity, then 0 - 0 is 0, not -0. */
1783 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1784 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1786 /* (-1 - a) is ~a. */
1787 if (trueop0
== constm1_rtx
)
1788 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1790 /* Subtracting 0 has no effect unless the mode has signed zeros
1791 and supports rounding towards -infinity. In such a case,
1793 if (!(HONOR_SIGNED_ZEROS (mode
)
1794 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1795 && trueop1
== CONST0_RTX (mode
))
1798 /* See if this is something like X * C - X or vice versa or
1799 if the multiplication is written as a shift. If so, we can
1800 distribute and make a new multiply, shift, or maybe just
1801 have X (if C is 2 in the example above). But don't make
1802 something more expensive than we had before. */
1804 if (SCALAR_INT_MODE_P (mode
))
1806 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1807 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1808 rtx lhs
= op0
, rhs
= op1
;
1810 if (GET_CODE (lhs
) == NEG
)
1814 lhs
= XEXP (lhs
, 0);
1816 else if (GET_CODE (lhs
) == MULT
1817 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1819 coeff0l
= INTVAL (XEXP (lhs
, 1));
1820 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1821 lhs
= XEXP (lhs
, 0);
1823 else if (GET_CODE (lhs
) == ASHIFT
1824 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1825 && INTVAL (XEXP (lhs
, 1)) >= 0
1826 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1828 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1830 lhs
= XEXP (lhs
, 0);
1833 if (GET_CODE (rhs
) == NEG
)
1837 rhs
= XEXP (rhs
, 0);
1839 else if (GET_CODE (rhs
) == MULT
1840 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1842 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1843 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1844 rhs
= XEXP (rhs
, 0);
1846 else if (GET_CODE (rhs
) == ASHIFT
1847 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1848 && INTVAL (XEXP (rhs
, 1)) >= 0
1849 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1851 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
1853 rhs
= XEXP (rhs
, 0);
1856 if (rtx_equal_p (lhs
, rhs
))
1858 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1860 unsigned HOST_WIDE_INT l
;
1863 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
1864 coeff
= immed_double_const (l
, h
, mode
);
1866 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1867 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1872 /* (a - (-b)) -> (a + b). True even for IEEE. */
1873 if (GET_CODE (op1
) == NEG
)
1874 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1876 /* (-x - c) may be simplified as (-c - x). */
1877 if (GET_CODE (op0
) == NEG
1878 && (GET_CODE (op1
) == CONST_INT
1879 || GET_CODE (op1
) == CONST_DOUBLE
))
1881 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1883 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1886 /* Don't let a relocatable value get a negative coeff. */
1887 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1888 return simplify_gen_binary (PLUS
, mode
,
1890 neg_const_int (mode
, op1
));
1892 /* (x - (x & y)) -> (x & ~y) */
1893 if (GET_CODE (op1
) == AND
)
1895 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1897 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1898 GET_MODE (XEXP (op1
, 1)));
1899 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1901 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1903 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1904 GET_MODE (XEXP (op1
, 0)));
1905 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1909 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1910 by reversing the comparison code if valid. */
1911 if (STORE_FLAG_VALUE
== 1
1912 && trueop0
== const1_rtx
1913 && COMPARISON_P (op1
)
1914 && (reversed
= reversed_comparison (op1
, mode
)))
1917 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1918 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1919 && GET_CODE (op1
) == MULT
1920 && GET_CODE (XEXP (op1
, 0)) == NEG
)
1924 in1
= XEXP (XEXP (op1
, 0), 0);
1925 in2
= XEXP (op1
, 1);
1926 return simplify_gen_binary (PLUS
, mode
,
1927 simplify_gen_binary (MULT
, mode
,
1932 /* Canonicalize (minus (neg A) (mult B C)) to
1933 (minus (mult (neg B) C) A). */
1934 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1935 && GET_CODE (op1
) == MULT
1936 && GET_CODE (op0
) == NEG
)
1940 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
1941 in2
= XEXP (op1
, 1);
1942 return simplify_gen_binary (MINUS
, mode
,
1943 simplify_gen_binary (MULT
, mode
,
1948 /* If one of the operands is a PLUS or a MINUS, see if we can
1949 simplify this by the associative law. This will, for example,
1950 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1951 Don't use the associative law for floating point.
1952 The inaccuracy makes it nonassociative,
1953 and subtle programs can break if operations are associated. */
1955 if (INTEGRAL_MODE_P (mode
)
1956 && (plus_minus_operand_p (op0
)
1957 || plus_minus_operand_p (op1
))
1958 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1963 if (trueop1
== constm1_rtx
)
1964 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1966 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1967 x is NaN, since x * 0 is then also NaN. Nor is it valid
1968 when the mode has signed zeros, since multiplying a negative
1969 number by 0 will give -0, not 0. */
1970 if (!HONOR_NANS (mode
)
1971 && !HONOR_SIGNED_ZEROS (mode
)
1972 && trueop1
== CONST0_RTX (mode
)
1973 && ! side_effects_p (op0
))
1976 /* In IEEE floating point, x*1 is not equivalent to x for
1978 if (!HONOR_SNANS (mode
)
1979 && trueop1
== CONST1_RTX (mode
))
1982 /* Convert multiply by constant power of two into shift unless
1983 we are still generating RTL. This test is a kludge. */
1984 if (GET_CODE (trueop1
) == CONST_INT
1985 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1986 /* If the mode is larger than the host word size, and the
1987 uppermost bit is set, then this isn't a power of two due
1988 to implicit sign extension. */
1989 && (width
<= HOST_BITS_PER_WIDE_INT
1990 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1991 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1993 /* Likewise for multipliers wider than a word. */
1994 if (GET_CODE (trueop1
) == CONST_DOUBLE
1995 && (GET_MODE (trueop1
) == VOIDmode
1996 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
1997 && GET_MODE (op0
) == mode
1998 && CONST_DOUBLE_LOW (trueop1
) == 0
1999 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
2000 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2001 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2003 /* x*2 is x+x and x*(-1) is -x */
2004 if (GET_CODE (trueop1
) == CONST_DOUBLE
2005 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2006 && GET_MODE (op0
) == mode
)
2009 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2011 if (REAL_VALUES_EQUAL (d
, dconst2
))
2012 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2014 if (!HONOR_SNANS (mode
)
2015 && REAL_VALUES_EQUAL (d
, dconstm1
))
2016 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2019 /* Optimize -x * -x as x * x. */
2020 if (FLOAT_MODE_P (mode
)
2021 && GET_CODE (op0
) == NEG
2022 && GET_CODE (op1
) == NEG
2023 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2024 && !side_effects_p (XEXP (op0
, 0)))
2025 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2027 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2028 if (SCALAR_FLOAT_MODE_P (mode
)
2029 && GET_CODE (op0
) == ABS
2030 && GET_CODE (op1
) == ABS
2031 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2032 && !side_effects_p (XEXP (op0
, 0)))
2033 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2035 /* Reassociate multiplication, but for floating point MULTs
2036 only when the user specifies unsafe math optimizations. */
2037 if (! FLOAT_MODE_P (mode
)
2038 || flag_unsafe_math_optimizations
)
2040 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2047 if (trueop1
== const0_rtx
)
2049 if (GET_CODE (trueop1
) == CONST_INT
2050 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2051 == GET_MODE_MASK (mode
)))
2053 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2055 /* A | (~A) -> -1 */
2056 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2057 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2058 && ! side_effects_p (op0
)
2059 && SCALAR_INT_MODE_P (mode
))
2062 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2063 if (GET_CODE (op1
) == CONST_INT
2064 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2065 && (nonzero_bits (op0
, mode
) & ~INTVAL (op1
)) == 0)
2068 /* Canonicalize (X & C1) | C2. */
2069 if (GET_CODE (op0
) == AND
2070 && GET_CODE (trueop1
) == CONST_INT
2071 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
)
2073 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2074 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2075 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2077 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2079 && !side_effects_p (XEXP (op0
, 0)))
2082 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2083 if (((c1
|c2
) & mask
) == mask
)
2084 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2086 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2087 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2089 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2090 gen_int_mode (c1
& ~c2
, mode
));
2091 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2095 /* Convert (A & B) | A to A. */
2096 if (GET_CODE (op0
) == AND
2097 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2098 || rtx_equal_p (XEXP (op0
, 1), op1
))
2099 && ! side_effects_p (XEXP (op0
, 0))
2100 && ! side_effects_p (XEXP (op0
, 1)))
2103 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2104 mode size to (rotate A CX). */
2106 if (GET_CODE (op1
) == ASHIFT
2107 || GET_CODE (op1
) == SUBREG
)
2118 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2119 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2120 && GET_CODE (XEXP (opleft
, 1)) == CONST_INT
2121 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2122 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2123 == GET_MODE_BITSIZE (mode
)))
2124 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2126 /* Same, but for ashift that has been "simplified" to a wider mode
2127 by simplify_shift_const. */
2129 if (GET_CODE (opleft
) == SUBREG
2130 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2131 && GET_CODE (opright
) == LSHIFTRT
2132 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2133 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2134 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2135 && (GET_MODE_SIZE (GET_MODE (opleft
))
2136 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2137 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2138 SUBREG_REG (XEXP (opright
, 0)))
2139 && GET_CODE (XEXP (SUBREG_REG (opleft
), 1)) == CONST_INT
2140 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2141 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2142 == GET_MODE_BITSIZE (mode
)))
2143 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2144 XEXP (SUBREG_REG (opleft
), 1));
2146 /* If we have (ior (and (X C1) C2)), simplify this by making
2147 C1 as small as possible if C1 actually changes. */
2148 if (GET_CODE (op1
) == CONST_INT
2149 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2150 || INTVAL (op1
) > 0)
2151 && GET_CODE (op0
) == AND
2152 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2153 && GET_CODE (op1
) == CONST_INT
2154 && (INTVAL (XEXP (op0
, 1)) & INTVAL (op1
)) != 0)
2155 return simplify_gen_binary (IOR
, mode
,
2157 (AND
, mode
, XEXP (op0
, 0),
2158 GEN_INT (INTVAL (XEXP (op0
, 1))
2162 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2163 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2164 the PLUS does not affect any of the bits in OP1: then we can do
2165 the IOR as a PLUS and we can associate. This is valid if OP1
2166 can be safely shifted left C bits. */
2167 if (GET_CODE (trueop1
) == CONST_INT
&& GET_CODE (op0
) == ASHIFTRT
2168 && GET_CODE (XEXP (op0
, 0)) == PLUS
2169 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == CONST_INT
2170 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2171 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2173 int count
= INTVAL (XEXP (op0
, 1));
2174 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2176 if (mask
>> count
== INTVAL (trueop1
)
2177 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2178 return simplify_gen_binary (ASHIFTRT
, mode
,
2179 plus_constant (XEXP (op0
, 0), mask
),
2183 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2189 if (trueop1
== const0_rtx
)
2191 if (GET_CODE (trueop1
) == CONST_INT
2192 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2193 == GET_MODE_MASK (mode
)))
2194 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2195 if (rtx_equal_p (trueop0
, trueop1
)
2196 && ! side_effects_p (op0
)
2197 && GET_MODE_CLASS (mode
) != MODE_CC
)
2198 return CONST0_RTX (mode
);
2200 /* Canonicalize XOR of the most significant bit to PLUS. */
2201 if ((GET_CODE (op1
) == CONST_INT
2202 || GET_CODE (op1
) == CONST_DOUBLE
)
2203 && mode_signbit_p (mode
, op1
))
2204 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2205 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2206 if ((GET_CODE (op1
) == CONST_INT
2207 || GET_CODE (op1
) == CONST_DOUBLE
)
2208 && GET_CODE (op0
) == PLUS
2209 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
2210 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2211 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2212 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2213 simplify_gen_binary (XOR
, mode
, op1
,
2216 /* If we are XORing two things that have no bits in common,
2217 convert them into an IOR. This helps to detect rotation encoded
2218 using those methods and possibly other simplifications. */
2220 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2221 && (nonzero_bits (op0
, mode
)
2222 & nonzero_bits (op1
, mode
)) == 0)
2223 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2225 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2226 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2229 int num_negated
= 0;
2231 if (GET_CODE (op0
) == NOT
)
2232 num_negated
++, op0
= XEXP (op0
, 0);
2233 if (GET_CODE (op1
) == NOT
)
2234 num_negated
++, op1
= XEXP (op1
, 0);
2236 if (num_negated
== 2)
2237 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2238 else if (num_negated
== 1)
2239 return simplify_gen_unary (NOT
, mode
,
2240 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2244 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2245 correspond to a machine insn or result in further simplifications
2246 if B is a constant. */
2248 if (GET_CODE (op0
) == AND
2249 && rtx_equal_p (XEXP (op0
, 1), op1
)
2250 && ! side_effects_p (op1
))
2251 return simplify_gen_binary (AND
, mode
,
2252 simplify_gen_unary (NOT
, mode
,
2253 XEXP (op0
, 0), mode
),
2256 else if (GET_CODE (op0
) == AND
2257 && rtx_equal_p (XEXP (op0
, 0), op1
)
2258 && ! side_effects_p (op1
))
2259 return simplify_gen_binary (AND
, mode
,
2260 simplify_gen_unary (NOT
, mode
,
2261 XEXP (op0
, 1), mode
),
2264 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2265 comparison if STORE_FLAG_VALUE is 1. */
2266 if (STORE_FLAG_VALUE
== 1
2267 && trueop1
== const1_rtx
2268 && COMPARISON_P (op0
)
2269 && (reversed
= reversed_comparison (op0
, mode
)))
2272 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2273 is (lt foo (const_int 0)), so we can perform the above
2274 simplification if STORE_FLAG_VALUE is 1. */
2276 if (STORE_FLAG_VALUE
== 1
2277 && trueop1
== const1_rtx
2278 && GET_CODE (op0
) == LSHIFTRT
2279 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2280 && INTVAL (XEXP (op0
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
2281 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2283 /* (xor (comparison foo bar) (const_int sign-bit))
2284 when STORE_FLAG_VALUE is the sign bit. */
2285 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2286 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
2287 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
2288 && trueop1
== const_true_rtx
2289 && COMPARISON_P (op0
)
2290 && (reversed
= reversed_comparison (op0
, mode
)))
2293 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2299 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2301 /* If we are turning off bits already known off in OP0, we need
2303 if (GET_CODE (trueop1
) == CONST_INT
2304 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2305 && (nonzero_bits (trueop0
, mode
) & ~INTVAL (trueop1
)) == 0)
2307 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2308 && GET_MODE_CLASS (mode
) != MODE_CC
)
2311 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2312 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2313 && ! side_effects_p (op0
)
2314 && GET_MODE_CLASS (mode
) != MODE_CC
)
2315 return CONST0_RTX (mode
);
2317 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2318 there are no nonzero bits of C outside of X's mode. */
2319 if ((GET_CODE (op0
) == SIGN_EXTEND
2320 || GET_CODE (op0
) == ZERO_EXTEND
)
2321 && GET_CODE (trueop1
) == CONST_INT
2322 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2323 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2324 & INTVAL (trueop1
)) == 0)
2326 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2327 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2328 gen_int_mode (INTVAL (trueop1
),
2330 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2333 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2334 if (GET_CODE (op0
) == IOR
2335 && GET_CODE (trueop1
) == CONST_INT
2336 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
)
2338 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2339 return simplify_gen_binary (IOR
, mode
,
2340 simplify_gen_binary (AND
, mode
,
2341 XEXP (op0
, 0), op1
),
2342 gen_int_mode (tmp
, mode
));
2345 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2346 insn (and may simplify more). */
2347 if (GET_CODE (op0
) == XOR
2348 && rtx_equal_p (XEXP (op0
, 0), op1
)
2349 && ! side_effects_p (op1
))
2350 return simplify_gen_binary (AND
, mode
,
2351 simplify_gen_unary (NOT
, mode
,
2352 XEXP (op0
, 1), mode
),
2355 if (GET_CODE (op0
) == XOR
2356 && rtx_equal_p (XEXP (op0
, 1), op1
)
2357 && ! side_effects_p (op1
))
2358 return simplify_gen_binary (AND
, mode
,
2359 simplify_gen_unary (NOT
, mode
,
2360 XEXP (op0
, 0), mode
),
2363 /* Similarly for (~(A ^ B)) & A. */
2364 if (GET_CODE (op0
) == NOT
2365 && GET_CODE (XEXP (op0
, 0)) == XOR
2366 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2367 && ! side_effects_p (op1
))
2368 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2370 if (GET_CODE (op0
) == NOT
2371 && GET_CODE (XEXP (op0
, 0)) == XOR
2372 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2373 && ! side_effects_p (op1
))
2374 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2376 /* Convert (A | B) & A to A. */
2377 if (GET_CODE (op0
) == IOR
2378 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2379 || rtx_equal_p (XEXP (op0
, 1), op1
))
2380 && ! side_effects_p (XEXP (op0
, 0))
2381 && ! side_effects_p (XEXP (op0
, 1)))
2384 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2385 ((A & N) + B) & M -> (A + B) & M
2386 Similarly if (N & M) == 0,
2387 ((A | N) + B) & M -> (A + B) & M
2388 and for - instead of + and/or ^ instead of |. */
2389 if (GET_CODE (trueop1
) == CONST_INT
2390 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2391 && ~INTVAL (trueop1
)
2392 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
2393 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2398 pmop
[0] = XEXP (op0
, 0);
2399 pmop
[1] = XEXP (op0
, 1);
2401 for (which
= 0; which
< 2; which
++)
2404 switch (GET_CODE (tem
))
2407 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2408 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
2409 == INTVAL (trueop1
))
2410 pmop
[which
] = XEXP (tem
, 0);
2414 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2415 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
2416 pmop
[which
] = XEXP (tem
, 0);
2423 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2425 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2427 return simplify_gen_binary (code
, mode
, tem
, op1
);
2431 /* (and X (ior (not X) Y) -> (and X Y) */
2432 if (GET_CODE (op1
) == IOR
2433 && GET_CODE (XEXP (op1
, 0)) == NOT
2434 && op0
== XEXP (XEXP (op1
, 0), 0))
2435 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2437 /* (and (ior (not X) Y) X) -> (and X Y) */
2438 if (GET_CODE (op0
) == IOR
2439 && GET_CODE (XEXP (op0
, 0)) == NOT
2440 && op1
== XEXP (XEXP (op0
, 0), 0))
2441 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2443 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2449 /* 0/x is 0 (or x&0 if x has side-effects). */
2450 if (trueop0
== CONST0_RTX (mode
))
2452 if (side_effects_p (op1
))
2453 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2457 if (trueop1
== CONST1_RTX (mode
))
2458 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2459 /* Convert divide by power of two into shift. */
2460 if (GET_CODE (trueop1
) == CONST_INT
2461 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
2462 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2466 /* Handle floating point and integers separately. */
2467 if (SCALAR_FLOAT_MODE_P (mode
))
2469 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2470 safe for modes with NaNs, since 0.0 / 0.0 will then be
2471 NaN rather than 0.0. Nor is it safe for modes with signed
2472 zeros, since dividing 0 by a negative number gives -0.0 */
2473 if (trueop0
== CONST0_RTX (mode
)
2474 && !HONOR_NANS (mode
)
2475 && !HONOR_SIGNED_ZEROS (mode
)
2476 && ! side_effects_p (op1
))
2479 if (trueop1
== CONST1_RTX (mode
)
2480 && !HONOR_SNANS (mode
))
2483 if (GET_CODE (trueop1
) == CONST_DOUBLE
2484 && trueop1
!= CONST0_RTX (mode
))
2487 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2490 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2491 && !HONOR_SNANS (mode
))
2492 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2494 /* Change FP division by a constant into multiplication.
2495 Only do this with -freciprocal-math. */
2496 if (flag_reciprocal_math
2497 && !REAL_VALUES_EQUAL (d
, dconst0
))
2499 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2500 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2501 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2507 /* 0/x is 0 (or x&0 if x has side-effects). */
2508 if (trueop0
== CONST0_RTX (mode
))
2510 if (side_effects_p (op1
))
2511 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2515 if (trueop1
== CONST1_RTX (mode
))
2516 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2518 if (trueop1
== constm1_rtx
)
2520 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2521 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2527 /* 0%x is 0 (or x&0 if x has side-effects). */
2528 if (trueop0
== CONST0_RTX (mode
))
2530 if (side_effects_p (op1
))
2531 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2534 /* x%1 is 0 (of x&0 if x has side-effects). */
2535 if (trueop1
== CONST1_RTX (mode
))
2537 if (side_effects_p (op0
))
2538 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2539 return CONST0_RTX (mode
);
2541 /* Implement modulus by power of two as AND. */
2542 if (GET_CODE (trueop1
) == CONST_INT
2543 && exact_log2 (INTVAL (trueop1
)) > 0)
2544 return simplify_gen_binary (AND
, mode
, op0
,
2545 GEN_INT (INTVAL (op1
) - 1));
2549 /* 0%x is 0 (or x&0 if x has side-effects). */
2550 if (trueop0
== CONST0_RTX (mode
))
2552 if (side_effects_p (op1
))
2553 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2556 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2557 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
2559 if (side_effects_p (op0
))
2560 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2561 return CONST0_RTX (mode
);
2568 if (trueop1
== CONST0_RTX (mode
))
2570 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2572 /* Rotating ~0 always results in ~0. */
2573 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
2574 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2575 && ! side_effects_p (op1
))
2578 if (SHIFT_COUNT_TRUNCATED
&& GET_CODE (op1
) == CONST_INT
)
2580 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
2581 if (val
!= INTVAL (op1
))
2582 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
2589 if (trueop1
== CONST0_RTX (mode
))
2591 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2593 goto canonicalize_shift
;
2596 if (trueop1
== CONST0_RTX (mode
))
2598 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2600 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2601 if (GET_CODE (op0
) == CLZ
2602 && GET_CODE (trueop1
) == CONST_INT
2603 && STORE_FLAG_VALUE
== 1
2604 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
2606 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2607 unsigned HOST_WIDE_INT zero_val
= 0;
2609 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
2610 && zero_val
== GET_MODE_BITSIZE (imode
)
2611 && INTVAL (trueop1
) == exact_log2 (zero_val
))
2612 return simplify_gen_relational (EQ
, mode
, imode
,
2613 XEXP (op0
, 0), const0_rtx
);
2615 goto canonicalize_shift
;
2618 if (width
<= HOST_BITS_PER_WIDE_INT
2619 && GET_CODE (trueop1
) == CONST_INT
2620 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2621 && ! side_effects_p (op0
))
2623 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2625 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2631 if (width
<= HOST_BITS_PER_WIDE_INT
2632 && GET_CODE (trueop1
) == CONST_INT
2633 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2634 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2635 && ! side_effects_p (op0
))
2637 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2639 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2645 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2647 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2649 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2655 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2657 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2659 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2672 /* ??? There are simplifications that can be done. */
2676 if (!VECTOR_MODE_P (mode
))
2678 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2679 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2680 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2681 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2682 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
2684 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2685 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2688 /* Extract a scalar element from a nested VEC_SELECT expression
2689 (with optional nested VEC_CONCAT expression). Some targets
2690 (i386) extract scalar element from a vector using chain of
2691 nested VEC_SELECT expressions. When input operand is a memory
2692 operand, this operation can be simplified to a simple scalar
2693 load from an offseted memory address. */
2694 if (GET_CODE (trueop0
) == VEC_SELECT
)
2696 rtx op0
= XEXP (trueop0
, 0);
2697 rtx op1
= XEXP (trueop0
, 1);
2699 enum machine_mode opmode
= GET_MODE (op0
);
2700 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
2701 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
2703 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
2709 gcc_assert (GET_CODE (op1
) == PARALLEL
);
2710 gcc_assert (i
< n_elts
);
2712 /* Select element, pointed by nested selector. */
2713 elem
= INTVAL (XVECEXP (op1
, 0, i
));
2715 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2716 if (GET_CODE (op0
) == VEC_CONCAT
)
2718 rtx op00
= XEXP (op0
, 0);
2719 rtx op01
= XEXP (op0
, 1);
2721 enum machine_mode mode00
, mode01
;
2722 int n_elts00
, n_elts01
;
2724 mode00
= GET_MODE (op00
);
2725 mode01
= GET_MODE (op01
);
2727 /* Find out number of elements of each operand. */
2728 if (VECTOR_MODE_P (mode00
))
2730 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
2731 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
2736 if (VECTOR_MODE_P (mode01
))
2738 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
2739 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
2744 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
2746 /* Select correct operand of VEC_CONCAT
2747 and adjust selector. */
2748 if (elem
< n_elts01
)
2759 vec
= rtvec_alloc (1);
2760 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
2762 tmp
= gen_rtx_fmt_ee (code
, mode
,
2763 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
2769 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2770 gcc_assert (GET_MODE_INNER (mode
)
2771 == GET_MODE_INNER (GET_MODE (trueop0
)));
2772 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2774 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2776 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2777 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2778 rtvec v
= rtvec_alloc (n_elts
);
2781 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2782 for (i
= 0; i
< n_elts
; i
++)
2784 rtx x
= XVECEXP (trueop1
, 0, i
);
2786 gcc_assert (GET_CODE (x
) == CONST_INT
);
2787 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2791 return gen_rtx_CONST_VECTOR (mode
, v
);
2795 if (XVECLEN (trueop1
, 0) == 1
2796 && GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
2797 && GET_CODE (trueop0
) == VEC_CONCAT
)
2800 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
2802 /* Try to find the element in the VEC_CONCAT. */
2803 while (GET_MODE (vec
) != mode
2804 && GET_CODE (vec
) == VEC_CONCAT
)
2806 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
2807 if (offset
< vec_size
)
2808 vec
= XEXP (vec
, 0);
2812 vec
= XEXP (vec
, 1);
2814 vec
= avoid_constant_pool_reference (vec
);
2817 if (GET_MODE (vec
) == mode
)
2824 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2825 ? GET_MODE (trueop0
)
2826 : GET_MODE_INNER (mode
));
2827 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2828 ? GET_MODE (trueop1
)
2829 : GET_MODE_INNER (mode
));
2831 gcc_assert (VECTOR_MODE_P (mode
));
2832 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2833 == GET_MODE_SIZE (mode
));
2835 if (VECTOR_MODE_P (op0_mode
))
2836 gcc_assert (GET_MODE_INNER (mode
)
2837 == GET_MODE_INNER (op0_mode
));
2839 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2841 if (VECTOR_MODE_P (op1_mode
))
2842 gcc_assert (GET_MODE_INNER (mode
)
2843 == GET_MODE_INNER (op1_mode
));
2845 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2847 if ((GET_CODE (trueop0
) == CONST_VECTOR
2848 || GET_CODE (trueop0
) == CONST_INT
2849 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2850 && (GET_CODE (trueop1
) == CONST_VECTOR
2851 || GET_CODE (trueop1
) == CONST_INT
2852 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2854 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2855 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2856 rtvec v
= rtvec_alloc (n_elts
);
2858 unsigned in_n_elts
= 1;
2860 if (VECTOR_MODE_P (op0_mode
))
2861 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2862 for (i
= 0; i
< n_elts
; i
++)
2866 if (!VECTOR_MODE_P (op0_mode
))
2867 RTVEC_ELT (v
, i
) = trueop0
;
2869 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2873 if (!VECTOR_MODE_P (op1_mode
))
2874 RTVEC_ELT (v
, i
) = trueop1
;
2876 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2881 return gen_rtx_CONST_VECTOR (mode
, v
);
2894 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2897 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
2899 unsigned int width
= GET_MODE_BITSIZE (mode
);
2901 if (VECTOR_MODE_P (mode
)
2902 && code
!= VEC_CONCAT
2903 && GET_CODE (op0
) == CONST_VECTOR
2904 && GET_CODE (op1
) == CONST_VECTOR
)
2906 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2907 enum machine_mode op0mode
= GET_MODE (op0
);
2908 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
2909 enum machine_mode op1mode
= GET_MODE (op1
);
2910 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
2911 rtvec v
= rtvec_alloc (n_elts
);
2914 gcc_assert (op0_n_elts
== n_elts
);
2915 gcc_assert (op1_n_elts
== n_elts
);
2916 for (i
= 0; i
< n_elts
; i
++)
2918 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
2919 CONST_VECTOR_ELT (op0
, i
),
2920 CONST_VECTOR_ELT (op1
, i
));
2923 RTVEC_ELT (v
, i
) = x
;
2926 return gen_rtx_CONST_VECTOR (mode
, v
);
2929 if (VECTOR_MODE_P (mode
)
2930 && code
== VEC_CONCAT
2931 && (CONST_INT_P (op0
)
2932 || GET_CODE (op0
) == CONST_DOUBLE
2933 || GET_CODE (op0
) == CONST_FIXED
)
2934 && (CONST_INT_P (op1
)
2935 || GET_CODE (op1
) == CONST_DOUBLE
2936 || GET_CODE (op1
) == CONST_FIXED
))
2938 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2939 rtvec v
= rtvec_alloc (n_elts
);
2941 gcc_assert (n_elts
>= 2);
2944 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
2945 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
2947 RTVEC_ELT (v
, 0) = op0
;
2948 RTVEC_ELT (v
, 1) = op1
;
2952 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
2953 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
2956 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
2957 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
2958 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
2960 for (i
= 0; i
< op0_n_elts
; ++i
)
2961 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
2962 for (i
= 0; i
< op1_n_elts
; ++i
)
2963 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
2966 return gen_rtx_CONST_VECTOR (mode
, v
);
2969 if (SCALAR_FLOAT_MODE_P (mode
)
2970 && GET_CODE (op0
) == CONST_DOUBLE
2971 && GET_CODE (op1
) == CONST_DOUBLE
2972 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
2983 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
2985 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
2987 for (i
= 0; i
< 4; i
++)
3004 real_from_target (&r
, tmp0
, mode
);
3005 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3009 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3012 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3013 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3014 real_convert (&f0
, mode
, &f0
);
3015 real_convert (&f1
, mode
, &f1
);
3017 if (HONOR_SNANS (mode
)
3018 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3022 && REAL_VALUES_EQUAL (f1
, dconst0
)
3023 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3026 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3027 && flag_trapping_math
3028 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3030 int s0
= REAL_VALUE_NEGATIVE (f0
);
3031 int s1
= REAL_VALUE_NEGATIVE (f1
);
3036 /* Inf + -Inf = NaN plus exception. */
3041 /* Inf - Inf = NaN plus exception. */
3046 /* Inf / Inf = NaN plus exception. */
3053 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3054 && flag_trapping_math
3055 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3056 || (REAL_VALUE_ISINF (f1
)
3057 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3058 /* Inf * 0 = NaN plus exception. */
3061 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3063 real_convert (&result
, mode
, &value
);
3065 /* Don't constant fold this floating point operation if
3066 the result has overflowed and flag_trapping_math. */
3068 if (flag_trapping_math
3069 && MODE_HAS_INFINITIES (mode
)
3070 && REAL_VALUE_ISINF (result
)
3071 && !REAL_VALUE_ISINF (f0
)
3072 && !REAL_VALUE_ISINF (f1
))
3073 /* Overflow plus exception. */
3076 /* Don't constant fold this floating point operation if the
3077 result may dependent upon the run-time rounding mode and
3078 flag_rounding_math is set, or if GCC's software emulation
3079 is unable to accurately represent the result. */
3081 if ((flag_rounding_math
3082 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3083 && (inexact
|| !real_identical (&result
, &value
)))
3086 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3090 /* We can fold some multi-word operations. */
3091 if (GET_MODE_CLASS (mode
) == MODE_INT
3092 && width
== HOST_BITS_PER_WIDE_INT
* 2
3093 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
3094 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
3096 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
3097 HOST_WIDE_INT h1
, h2
, hv
, ht
;
3099 if (GET_CODE (op0
) == CONST_DOUBLE
)
3100 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
3102 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
3104 if (GET_CODE (op1
) == CONST_DOUBLE
)
3105 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
3107 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
3112 /* A - B == A + (-B). */
3113 neg_double (l2
, h2
, &lv
, &hv
);
3116 /* Fall through.... */
3119 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3123 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3127 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3128 &lv
, &hv
, <
, &ht
))
3133 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3134 <
, &ht
, &lv
, &hv
))
3139 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3140 &lv
, &hv
, <
, &ht
))
3145 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3146 <
, &ht
, &lv
, &hv
))
3151 lv
= l1
& l2
, hv
= h1
& h2
;
3155 lv
= l1
| l2
, hv
= h1
| h2
;
3159 lv
= l1
^ l2
, hv
= h1
^ h2
;
3165 && ((unsigned HOST_WIDE_INT
) l1
3166 < (unsigned HOST_WIDE_INT
) l2
)))
3175 && ((unsigned HOST_WIDE_INT
) l1
3176 > (unsigned HOST_WIDE_INT
) l2
)))
3183 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
3185 && ((unsigned HOST_WIDE_INT
) l1
3186 < (unsigned HOST_WIDE_INT
) l2
)))
3193 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
3195 && ((unsigned HOST_WIDE_INT
) l1
3196 > (unsigned HOST_WIDE_INT
) l2
)))
3202 case LSHIFTRT
: case ASHIFTRT
:
3204 case ROTATE
: case ROTATERT
:
3205 if (SHIFT_COUNT_TRUNCATED
)
3206 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
3208 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
3211 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3212 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
3214 else if (code
== ASHIFT
)
3215 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
3216 else if (code
== ROTATE
)
3217 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3218 else /* code == ROTATERT */
3219 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3226 return immed_double_const (lv
, hv
, mode
);
3229 if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
3230 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3232 /* Get the integer argument values in two forms:
3233 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3235 arg0
= INTVAL (op0
);
3236 arg1
= INTVAL (op1
);
3238 if (width
< HOST_BITS_PER_WIDE_INT
)
3240 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3241 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3244 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3245 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3248 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3249 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3257 /* Compute the value of the arithmetic. */
3262 val
= arg0s
+ arg1s
;
3266 val
= arg0s
- arg1s
;
3270 val
= arg0s
* arg1s
;
3275 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3278 val
= arg0s
/ arg1s
;
3283 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3286 val
= arg0s
% arg1s
;
3291 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3294 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3299 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3302 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3320 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3321 the value is in range. We can't return any old value for
3322 out-of-range arguments because either the middle-end (via
3323 shift_truncation_mask) or the back-end might be relying on
3324 target-specific knowledge. Nor can we rely on
3325 shift_truncation_mask, since the shift might not be part of an
3326 ashlM3, lshrM3 or ashrM3 instruction. */
3327 if (SHIFT_COUNT_TRUNCATED
)
3328 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3329 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3332 val
= (code
== ASHIFT
3333 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3334 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3336 /* Sign-extend the result for arithmetic right shifts. */
3337 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3338 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
3346 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3347 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3355 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3356 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3360 /* Do nothing here. */
3364 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3368 val
= ((unsigned HOST_WIDE_INT
) arg0
3369 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3373 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3377 val
= ((unsigned HOST_WIDE_INT
) arg0
3378 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3391 /* ??? There are simplifications that can be done. */
3398 return gen_int_mode (val
, mode
);
3406 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3409 Rather than test for specific case, we do this by a brute-force method
3410 and do all possible simplifications until no more changes occur. Then
3411 we rebuild the operation. */
3413 struct simplify_plus_minus_op_data
3420 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3424 result
= (commutative_operand_precedence (y
)
3425 - commutative_operand_precedence (x
));
3429 /* Group together equal REGs to do more simplification. */
3430 if (REG_P (x
) && REG_P (y
))
3431 return REGNO (x
) > REGNO (y
);
3437 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3440 struct simplify_plus_minus_op_data ops
[8];
3442 int n_ops
= 2, input_ops
= 2;
3443 int changed
, n_constants
= 0, canonicalized
= 0;
3446 memset (ops
, 0, sizeof ops
);
3448 /* Set up the two operands and then expand them until nothing has been
3449 changed. If we run out of room in our array, give up; this should
3450 almost never happen. */
3455 ops
[1].neg
= (code
== MINUS
);
3461 for (i
= 0; i
< n_ops
; i
++)
3463 rtx this_op
= ops
[i
].op
;
3464 int this_neg
= ops
[i
].neg
;
3465 enum rtx_code this_code
= GET_CODE (this_op
);
3474 ops
[n_ops
].op
= XEXP (this_op
, 1);
3475 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3478 ops
[i
].op
= XEXP (this_op
, 0);
3481 canonicalized
|= this_neg
;
3485 ops
[i
].op
= XEXP (this_op
, 0);
3486 ops
[i
].neg
= ! this_neg
;
3493 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3494 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3495 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3497 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3498 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3499 ops
[n_ops
].neg
= this_neg
;
3507 /* ~a -> (-a - 1) */
3510 ops
[n_ops
].op
= constm1_rtx
;
3511 ops
[n_ops
++].neg
= this_neg
;
3512 ops
[i
].op
= XEXP (this_op
, 0);
3513 ops
[i
].neg
= !this_neg
;
3523 ops
[i
].op
= neg_const_int (mode
, this_op
);
3537 if (n_constants
> 1)
3540 gcc_assert (n_ops
>= 2);
3542 /* If we only have two operands, we can avoid the loops. */
3545 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3548 /* Get the two operands. Be careful with the order, especially for
3549 the cases where code == MINUS. */
3550 if (ops
[0].neg
&& ops
[1].neg
)
3552 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3555 else if (ops
[0].neg
)
3566 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
3569 /* Now simplify each pair of operands until nothing changes. */
3572 /* Insertion sort is good enough for an eight-element array. */
3573 for (i
= 1; i
< n_ops
; i
++)
3575 struct simplify_plus_minus_op_data save
;
3577 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
3583 ops
[j
+ 1] = ops
[j
];
3584 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
3588 /* This is only useful the first time through. */
3593 for (i
= n_ops
- 1; i
> 0; i
--)
3594 for (j
= i
- 1; j
>= 0; j
--)
3596 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
3597 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
3599 if (lhs
!= 0 && rhs
!= 0)
3601 enum rtx_code ncode
= PLUS
;
3607 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3609 else if (swap_commutative_operands_p (lhs
, rhs
))
3610 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3612 if ((GET_CODE (lhs
) == CONST
|| GET_CODE (lhs
) == CONST_INT
)
3613 && (GET_CODE (rhs
) == CONST
|| GET_CODE (rhs
) == CONST_INT
))
3615 rtx tem_lhs
, tem_rhs
;
3617 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
3618 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
3619 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
3621 if (tem
&& !CONSTANT_P (tem
))
3622 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
3625 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
3627 /* Reject "simplifications" that just wrap the two
3628 arguments in a CONST. Failure to do so can result
3629 in infinite recursion with simplify_binary_operation
3630 when it calls us to simplify CONST operations. */
3632 && ! (GET_CODE (tem
) == CONST
3633 && GET_CODE (XEXP (tem
, 0)) == ncode
3634 && XEXP (XEXP (tem
, 0), 0) == lhs
3635 && XEXP (XEXP (tem
, 0), 1) == rhs
))
3638 if (GET_CODE (tem
) == NEG
)
3639 tem
= XEXP (tem
, 0), lneg
= !lneg
;
3640 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
3641 tem
= neg_const_int (mode
, tem
), lneg
= 0;
3645 ops
[j
].op
= NULL_RTX
;
3651 /* Pack all the operands to the lower-numbered entries. */
3652 for (i
= 0, j
= 0; j
< n_ops
; j
++)
3662 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3664 && GET_CODE (ops
[1].op
) == CONST_INT
3665 && CONSTANT_P (ops
[0].op
)
3667 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
3669 /* We suppressed creation of trivial CONST expressions in the
3670 combination loop to avoid recursion. Create one manually now.
3671 The combination loop should have ensured that there is exactly
3672 one CONST_INT, and the sort will have ensured that it is last
3673 in the array and that any other constant will be next-to-last. */
3675 if (GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
)
3683 && CONSTANT_P (ops
[i
].op
)
3684 && GET_CODE (ops
[i
].op
) == GET_CODE (ops
[i
- 1].op
))
3686 ops
[i
- 1].op
= gen_rtx_MINUS (mode
, ops
[i
- 1].op
, ops
[i
].op
);
3687 ops
[i
- 1].op
= gen_rtx_CONST (mode
, ops
[i
- 1].op
);
3689 ops
[i
] = ops
[i
+ 1];
3694 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
3695 && CONSTANT_P (ops
[n_ops
- 2].op
))
3697 rtx value
= ops
[n_ops
- 1].op
;
3698 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
3699 value
= neg_const_int (mode
, value
);
3700 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
3704 /* Put a non-negated operand first, if possible. */
3706 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
3709 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
3718 /* Now make the result by performing the requested operations. */
3720 for (i
= 1; i
< n_ops
; i
++)
3721 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
3722 mode
, result
, ops
[i
].op
);
3727 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3729 plus_minus_operand_p (const_rtx x
)
3731 return GET_CODE (x
) == PLUS
3732 || GET_CODE (x
) == MINUS
3733 || (GET_CODE (x
) == CONST
3734 && GET_CODE (XEXP (x
, 0)) == PLUS
3735 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
3736 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
3739 /* Like simplify_binary_operation except used for relational operators.
3740 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3741 not also be VOIDmode.
3743 CMP_MODE specifies in which mode the comparison is done in, so it is
3744 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3745 the operands or, if both are VOIDmode, the operands are compared in
3746 "infinite precision". */
3748 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
3749 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3751 rtx tem
, trueop0
, trueop1
;
3753 if (cmp_mode
== VOIDmode
)
3754 cmp_mode
= GET_MODE (op0
);
3755 if (cmp_mode
== VOIDmode
)
3756 cmp_mode
= GET_MODE (op1
);
3758 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
3761 if (SCALAR_FLOAT_MODE_P (mode
))
3763 if (tem
== const0_rtx
)
3764 return CONST0_RTX (mode
);
3765 #ifdef FLOAT_STORE_FLAG_VALUE
3767 REAL_VALUE_TYPE val
;
3768 val
= FLOAT_STORE_FLAG_VALUE (mode
);
3769 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
3775 if (VECTOR_MODE_P (mode
))
3777 if (tem
== const0_rtx
)
3778 return CONST0_RTX (mode
);
3779 #ifdef VECTOR_STORE_FLAG_VALUE
3784 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
3785 if (val
== NULL_RTX
)
3787 if (val
== const1_rtx
)
3788 return CONST1_RTX (mode
);
3790 units
= GET_MODE_NUNITS (mode
);
3791 v
= rtvec_alloc (units
);
3792 for (i
= 0; i
< units
; i
++)
3793 RTVEC_ELT (v
, i
) = val
;
3794 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
3804 /* For the following tests, ensure const0_rtx is op1. */
3805 if (swap_commutative_operands_p (op0
, op1
)
3806 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
3807 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
3809 /* If op0 is a compare, extract the comparison arguments from it. */
3810 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3811 return simplify_relational_operation (code
, mode
, VOIDmode
,
3812 XEXP (op0
, 0), XEXP (op0
, 1));
3814 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
3818 trueop0
= avoid_constant_pool_reference (op0
);
3819 trueop1
= avoid_constant_pool_reference (op1
);
3820 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
3824 /* This part of simplify_relational_operation is only used when CMP_MODE
3825 is not in class MODE_CC (i.e. it is a real comparison).
3827 MODE is the mode of the result, while CMP_MODE specifies in which
3828 mode the comparison is done in, so it is the mode of the operands. */
3831 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
3832 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3834 enum rtx_code op0code
= GET_CODE (op0
);
3836 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
3838 /* If op0 is a comparison, extract the comparison arguments
3842 if (GET_MODE (op0
) == mode
)
3843 return simplify_rtx (op0
);
3845 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
3846 XEXP (op0
, 0), XEXP (op0
, 1));
3848 else if (code
== EQ
)
3850 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
3851 if (new_code
!= UNKNOWN
)
3852 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
3853 XEXP (op0
, 0), XEXP (op0
, 1));
3857 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
3858 if ((code
== LTU
|| code
== GEU
)
3859 && GET_CODE (op0
) == PLUS
3860 && rtx_equal_p (op1
, XEXP (op0
, 1))
3861 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
3862 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
3863 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
, XEXP (op0
, 0));
3865 if (op1
== const0_rtx
)
3867 /* Canonicalize (GTU x 0) as (NE x 0). */
3869 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
3870 /* Canonicalize (LEU x 0) as (EQ x 0). */
3872 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
3874 else if (op1
== const1_rtx
)
3879 /* Canonicalize (GE x 1) as (GT x 0). */
3880 return simplify_gen_relational (GT
, mode
, cmp_mode
,
3883 /* Canonicalize (GEU x 1) as (NE x 0). */
3884 return simplify_gen_relational (NE
, mode
, cmp_mode
,
3887 /* Canonicalize (LT x 1) as (LE x 0). */
3888 return simplify_gen_relational (LE
, mode
, cmp_mode
,
3891 /* Canonicalize (LTU x 1) as (EQ x 0). */
3892 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
3898 else if (op1
== constm1_rtx
)
3900 /* Canonicalize (LE x -1) as (LT x 0). */
3902 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
3903 /* Canonicalize (GT x -1) as (GE x 0). */
3905 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
3908 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3909 if ((code
== EQ
|| code
== NE
)
3910 && (op0code
== PLUS
|| op0code
== MINUS
)
3912 && CONSTANT_P (XEXP (op0
, 1))
3913 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
3915 rtx x
= XEXP (op0
, 0);
3916 rtx c
= XEXP (op0
, 1);
3918 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
3920 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
3923 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3924 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3926 && op1
== const0_rtx
3927 && GET_MODE_CLASS (mode
) == MODE_INT
3928 && cmp_mode
!= VOIDmode
3929 /* ??? Work-around BImode bugs in the ia64 backend. */
3931 && cmp_mode
!= BImode
3932 && nonzero_bits (op0
, cmp_mode
) == 1
3933 && STORE_FLAG_VALUE
== 1)
3934 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
3935 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
3936 : lowpart_subreg (mode
, op0
, cmp_mode
);
3938 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3939 if ((code
== EQ
|| code
== NE
)
3940 && op1
== const0_rtx
3942 return simplify_gen_relational (code
, mode
, cmp_mode
,
3943 XEXP (op0
, 0), XEXP (op0
, 1));
3945 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3946 if ((code
== EQ
|| code
== NE
)
3948 && rtx_equal_p (XEXP (op0
, 0), op1
)
3949 && !side_effects_p (XEXP (op0
, 0)))
3950 return simplify_gen_relational (code
, mode
, cmp_mode
,
3951 XEXP (op0
, 1), const0_rtx
);
3953 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3954 if ((code
== EQ
|| code
== NE
)
3956 && rtx_equal_p (XEXP (op0
, 1), op1
)
3957 && !side_effects_p (XEXP (op0
, 1)))
3958 return simplify_gen_relational (code
, mode
, cmp_mode
,
3959 XEXP (op0
, 0), const0_rtx
);
3961 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3962 if ((code
== EQ
|| code
== NE
)
3964 && (GET_CODE (op1
) == CONST_INT
3965 || GET_CODE (op1
) == CONST_DOUBLE
)
3966 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
3967 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
3968 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
3969 simplify_gen_binary (XOR
, cmp_mode
,
3970 XEXP (op0
, 1), op1
));
3972 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
3978 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3979 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
3980 XEXP (op0
, 0), const0_rtx
);
3985 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3986 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
3987 XEXP (op0
, 0), const0_rtx
);
4006 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4007 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4008 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4009 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4010 For floating-point comparisons, assume that the operands were ordered. */
4013 comparison_result (enum rtx_code code
, int known_results
)
4019 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4022 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4026 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4029 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4033 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4036 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4039 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4041 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4044 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4046 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4049 return const_true_rtx
;
4057 /* Check if the given comparison (done in the given MODE) is actually a
4058 tautology or a contradiction.
4059 If no simplification is possible, this function returns zero.
4060 Otherwise, it returns either const_true_rtx or const0_rtx. */
4063 simplify_const_relational_operation (enum rtx_code code
,
4064 enum machine_mode mode
,
4071 gcc_assert (mode
!= VOIDmode
4072 || (GET_MODE (op0
) == VOIDmode
4073 && GET_MODE (op1
) == VOIDmode
));
4075 /* If op0 is a compare, extract the comparison arguments from it. */
4076 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4078 op1
= XEXP (op0
, 1);
4079 op0
= XEXP (op0
, 0);
4081 if (GET_MODE (op0
) != VOIDmode
)
4082 mode
= GET_MODE (op0
);
4083 else if (GET_MODE (op1
) != VOIDmode
)
4084 mode
= GET_MODE (op1
);
4089 /* We can't simplify MODE_CC values since we don't know what the
4090 actual comparison is. */
4091 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4094 /* Make sure the constant is second. */
4095 if (swap_commutative_operands_p (op0
, op1
))
4097 tem
= op0
, op0
= op1
, op1
= tem
;
4098 code
= swap_condition (code
);
4101 trueop0
= avoid_constant_pool_reference (op0
);
4102 trueop1
= avoid_constant_pool_reference (op1
);
4104 /* For integer comparisons of A and B maybe we can simplify A - B and can
4105 then simplify a comparison of that with zero. If A and B are both either
4106 a register or a CONST_INT, this can't help; testing for these cases will
4107 prevent infinite recursion here and speed things up.
4109 We can only do this for EQ and NE comparisons as otherwise we may
4110 lose or introduce overflow which we cannot disregard as undefined as
4111 we do not know the signedness of the operation on either the left or
4112 the right hand side of the comparison. */
4114 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4115 && (code
== EQ
|| code
== NE
)
4116 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
4117 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
4118 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4119 /* We cannot do this if tem is a nonzero address. */
4120 && ! nonzero_address_p (tem
))
4121 return simplify_const_relational_operation (signed_condition (code
),
4122 mode
, tem
, const0_rtx
);
4124 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4125 return const_true_rtx
;
4127 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4130 /* For modes without NaNs, if the two operands are equal, we know the
4131 result except if they have side-effects. Even with NaNs we know
4132 the result of unordered comparisons and, if signaling NaNs are
4133 irrelevant, also the result of LT/GT/LTGT. */
4134 if ((! HONOR_NANS (GET_MODE (trueop0
))
4135 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4136 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4137 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4138 && rtx_equal_p (trueop0
, trueop1
)
4139 && ! side_effects_p (trueop0
))
4140 return comparison_result (code
, CMP_EQ
);
4142 /* If the operands are floating-point constants, see if we can fold
4144 if (GET_CODE (trueop0
) == CONST_DOUBLE
4145 && GET_CODE (trueop1
) == CONST_DOUBLE
4146 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4148 REAL_VALUE_TYPE d0
, d1
;
4150 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4151 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4153 /* Comparisons are unordered iff at least one of the values is NaN. */
4154 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4164 return const_true_rtx
;
4177 return comparison_result (code
,
4178 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4179 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4182 /* Otherwise, see if the operands are both integers. */
4183 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4184 && (GET_CODE (trueop0
) == CONST_DOUBLE
4185 || GET_CODE (trueop0
) == CONST_INT
)
4186 && (GET_CODE (trueop1
) == CONST_DOUBLE
4187 || GET_CODE (trueop1
) == CONST_INT
))
4189 int width
= GET_MODE_BITSIZE (mode
);
4190 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4191 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4193 /* Get the two words comprising each integer constant. */
4194 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
4196 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4197 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4201 l0u
= l0s
= INTVAL (trueop0
);
4202 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4205 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
4207 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4208 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4212 l1u
= l1s
= INTVAL (trueop1
);
4213 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4216 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4217 we have to sign or zero-extend the values. */
4218 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4220 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4221 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4223 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4224 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
4226 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4227 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
4229 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4230 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4232 if (h0u
== h1u
&& l0u
== l1u
)
4233 return comparison_result (code
, CMP_EQ
);
4237 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4238 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4239 return comparison_result (code
, cr
);
4243 /* Optimize comparisons with upper and lower bounds. */
4244 if (SCALAR_INT_MODE_P (mode
)
4245 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
4246 && GET_CODE (trueop1
) == CONST_INT
)
4249 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4250 HOST_WIDE_INT val
= INTVAL (trueop1
);
4251 HOST_WIDE_INT mmin
, mmax
;
4261 /* Get a reduced range if the sign bit is zero. */
4262 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4269 rtx mmin_rtx
, mmax_rtx
;
4270 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4272 mmin
= INTVAL (mmin_rtx
);
4273 mmax
= INTVAL (mmax_rtx
);
4276 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4278 mmin
>>= (sign_copies
- 1);
4279 mmax
>>= (sign_copies
- 1);
4285 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4287 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4288 return const_true_rtx
;
4289 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4294 return const_true_rtx
;
4299 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4301 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4302 return const_true_rtx
;
4303 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4308 return const_true_rtx
;
4314 /* x == y is always false for y out of range. */
4315 if (val
< mmin
|| val
> mmax
)
4319 /* x > y is always false for y >= mmax, always true for y < mmin. */
4321 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4323 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4324 return const_true_rtx
;
4330 return const_true_rtx
;
4333 /* x < y is always false for y <= mmin, always true for y > mmax. */
4335 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4337 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4338 return const_true_rtx
;
4344 return const_true_rtx
;
4348 /* x != y is always true for y out of range. */
4349 if (val
< mmin
|| val
> mmax
)
4350 return const_true_rtx
;
4358 /* Optimize integer comparisons with zero. */
4359 if (trueop1
== const0_rtx
)
4361 /* Some addresses are known to be nonzero. We don't know
4362 their sign, but equality comparisons are known. */
4363 if (nonzero_address_p (trueop0
))
4365 if (code
== EQ
|| code
== LEU
)
4367 if (code
== NE
|| code
== GTU
)
4368 return const_true_rtx
;
4371 /* See if the first operand is an IOR with a constant. If so, we
4372 may be able to determine the result of this comparison. */
4373 if (GET_CODE (op0
) == IOR
)
4375 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4376 if (GET_CODE (inner_const
) == CONST_INT
&& inner_const
!= const0_rtx
)
4378 int sign_bitnum
= GET_MODE_BITSIZE (mode
) - 1;
4379 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4380 && (INTVAL (inner_const
)
4381 & ((HOST_WIDE_INT
) 1 << sign_bitnum
)));
4390 return const_true_rtx
;
4394 return const_true_rtx
;
4408 /* Optimize comparison of ABS with zero. */
4409 if (trueop1
== CONST0_RTX (mode
)
4410 && (GET_CODE (trueop0
) == ABS
4411 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4412 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4417 /* Optimize abs(x) < 0.0. */
4418 if (!HONOR_SNANS (mode
)
4419 && (!INTEGRAL_MODE_P (mode
)
4420 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4422 if (INTEGRAL_MODE_P (mode
)
4423 && (issue_strict_overflow_warning
4424 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4425 warning (OPT_Wstrict_overflow
,
4426 ("assuming signed overflow does not occur when "
4427 "assuming abs (x) < 0 is false"));
4433 /* Optimize abs(x) >= 0.0. */
4434 if (!HONOR_NANS (mode
)
4435 && (!INTEGRAL_MODE_P (mode
)
4436 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4438 if (INTEGRAL_MODE_P (mode
)
4439 && (issue_strict_overflow_warning
4440 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4441 warning (OPT_Wstrict_overflow
,
4442 ("assuming signed overflow does not occur when "
4443 "assuming abs (x) >= 0 is true"));
4444 return const_true_rtx
;
4449 /* Optimize ! (abs(x) < 0.0). */
4450 return const_true_rtx
;
4460 /* Simplify CODE, an operation with result mode MODE and three operands,
4461 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4462 a constant. Return 0 if no simplifications is possible. */
4465 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4466 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4469 unsigned int width
= GET_MODE_BITSIZE (mode
);
4471 /* VOIDmode means "infinite" precision. */
4473 width
= HOST_BITS_PER_WIDE_INT
;
4479 if (GET_CODE (op0
) == CONST_INT
4480 && GET_CODE (op1
) == CONST_INT
4481 && GET_CODE (op2
) == CONST_INT
4482 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4483 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4485 /* Extracting a bit-field from a constant */
4486 HOST_WIDE_INT val
= INTVAL (op0
);
4488 if (BITS_BIG_ENDIAN
)
4489 val
>>= (GET_MODE_BITSIZE (op0_mode
)
4490 - INTVAL (op2
) - INTVAL (op1
));
4492 val
>>= INTVAL (op2
);
4494 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
4496 /* First zero-extend. */
4497 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
4498 /* If desired, propagate sign bit. */
4499 if (code
== SIGN_EXTRACT
4500 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
4501 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
4504 /* Clear the bits that don't belong in our mode,
4505 unless they and our sign bit are all one.
4506 So we get either a reasonable negative value or a reasonable
4507 unsigned value for this mode. */
4508 if (width
< HOST_BITS_PER_WIDE_INT
4509 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
4510 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
4511 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4513 return gen_int_mode (val
, mode
);
4518 if (GET_CODE (op0
) == CONST_INT
)
4519 return op0
!= const0_rtx
? op1
: op2
;
4521 /* Convert c ? a : a into "a". */
4522 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4525 /* Convert a != b ? a : b into "a". */
4526 if (GET_CODE (op0
) == NE
4527 && ! side_effects_p (op0
)
4528 && ! HONOR_NANS (mode
)
4529 && ! HONOR_SIGNED_ZEROS (mode
)
4530 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4531 && rtx_equal_p (XEXP (op0
, 1), op2
))
4532 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4533 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4536 /* Convert a == b ? a : b into "b". */
4537 if (GET_CODE (op0
) == EQ
4538 && ! side_effects_p (op0
)
4539 && ! HONOR_NANS (mode
)
4540 && ! HONOR_SIGNED_ZEROS (mode
)
4541 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4542 && rtx_equal_p (XEXP (op0
, 1), op2
))
4543 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4544 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4547 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
4549 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
4550 ? GET_MODE (XEXP (op0
, 1))
4551 : GET_MODE (XEXP (op0
, 0)));
4554 /* Look for happy constants in op1 and op2. */
4555 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
4557 HOST_WIDE_INT t
= INTVAL (op1
);
4558 HOST_WIDE_INT f
= INTVAL (op2
);
4560 if (t
== STORE_FLAG_VALUE
&& f
== 0)
4561 code
= GET_CODE (op0
);
4562 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
4565 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
4573 return simplify_gen_relational (code
, mode
, cmp_mode
,
4574 XEXP (op0
, 0), XEXP (op0
, 1));
4577 if (cmp_mode
== VOIDmode
)
4578 cmp_mode
= op0_mode
;
4579 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
4580 cmp_mode
, XEXP (op0
, 0),
4583 /* See if any simplifications were possible. */
4586 if (GET_CODE (temp
) == CONST_INT
)
4587 return temp
== const0_rtx
? op2
: op1
;
4589 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
4595 gcc_assert (GET_MODE (op0
) == mode
);
4596 gcc_assert (GET_MODE (op1
) == mode
);
4597 gcc_assert (VECTOR_MODE_P (mode
));
4598 op2
= avoid_constant_pool_reference (op2
);
4599 if (GET_CODE (op2
) == CONST_INT
)
4601 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
4602 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
4603 int mask
= (1 << n_elts
) - 1;
4605 if (!(INTVAL (op2
) & mask
))
4607 if ((INTVAL (op2
) & mask
) == mask
)
4610 op0
= avoid_constant_pool_reference (op0
);
4611 op1
= avoid_constant_pool_reference (op1
);
4612 if (GET_CODE (op0
) == CONST_VECTOR
4613 && GET_CODE (op1
) == CONST_VECTOR
)
4615 rtvec v
= rtvec_alloc (n_elts
);
4618 for (i
= 0; i
< n_elts
; i
++)
4619 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
4620 ? CONST_VECTOR_ELT (op0
, i
)
4621 : CONST_VECTOR_ELT (op1
, i
));
4622 return gen_rtx_CONST_VECTOR (mode
, v
);
4634 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4636 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4638 Works by unpacking OP into a collection of 8-bit values
4639 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4640 and then repacking them again for OUTERMODE. */
4643 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
4644 enum machine_mode innermode
, unsigned int byte
)
4646 /* We support up to 512-bit values (for V8DFmode). */
4650 value_mask
= (1 << value_bit
) - 1
4652 unsigned char value
[max_bitsize
/ value_bit
];
4661 rtvec result_v
= NULL
;
4662 enum mode_class outer_class
;
4663 enum machine_mode outer_submode
;
4665 /* Some ports misuse CCmode. */
4666 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
4669 /* We have no way to represent a complex constant at the rtl level. */
4670 if (COMPLEX_MODE_P (outermode
))
4673 /* Unpack the value. */
4675 if (GET_CODE (op
) == CONST_VECTOR
)
4677 num_elem
= CONST_VECTOR_NUNITS (op
);
4678 elems
= &CONST_VECTOR_ELT (op
, 0);
4679 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
4685 elem_bitsize
= max_bitsize
;
4687 /* If this asserts, it is too complicated; reducing value_bit may help. */
4688 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
4689 /* I don't know how to handle endianness of sub-units. */
4690 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
4692 for (elem
= 0; elem
< num_elem
; elem
++)
4695 rtx el
= elems
[elem
];
4697 /* Vectors are kept in target memory order. (This is probably
4700 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4701 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4703 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4704 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4705 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4706 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4707 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4710 switch (GET_CODE (el
))
4714 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4716 *vp
++ = INTVAL (el
) >> i
;
4717 /* CONST_INTs are always logically sign-extended. */
4718 for (; i
< elem_bitsize
; i
+= value_bit
)
4719 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
4723 if (GET_MODE (el
) == VOIDmode
)
4725 /* If this triggers, someone should have generated a
4726 CONST_INT instead. */
4727 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
4729 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4730 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
4731 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
4734 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
4737 /* It shouldn't matter what's done here, so fill it with
4739 for (; i
< elem_bitsize
; i
+= value_bit
)
4744 long tmp
[max_bitsize
/ 32];
4745 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
4747 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
4748 gcc_assert (bitsize
<= elem_bitsize
);
4749 gcc_assert (bitsize
% value_bit
== 0);
4751 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
4754 /* real_to_target produces its result in words affected by
4755 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4756 and use WORDS_BIG_ENDIAN instead; see the documentation
4757 of SUBREG in rtl.texi. */
4758 for (i
= 0; i
< bitsize
; i
+= value_bit
)
4761 if (WORDS_BIG_ENDIAN
)
4762 ibase
= bitsize
- 1 - i
;
4765 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
4768 /* It shouldn't matter what's done here, so fill it with
4770 for (; i
< elem_bitsize
; i
+= value_bit
)
4776 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4778 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4779 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
4783 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4784 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
4785 for (; i
< 2 * HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4787 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
4788 >> (i
- HOST_BITS_PER_WIDE_INT
);
4789 for (; i
< elem_bitsize
; i
+= value_bit
)
4799 /* Now, pick the right byte to start with. */
4800 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4801 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4802 will already have offset 0. */
4803 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
4805 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
4807 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4808 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4809 byte
= (subword_byte
% UNITS_PER_WORD
4810 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4813 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4814 so if it's become negative it will instead be very large.) */
4815 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4817 /* Convert from bytes to chunks of size value_bit. */
4818 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
4820 /* Re-pack the value. */
4822 if (VECTOR_MODE_P (outermode
))
4824 num_elem
= GET_MODE_NUNITS (outermode
);
4825 result_v
= rtvec_alloc (num_elem
);
4826 elems
= &RTVEC_ELT (result_v
, 0);
4827 outer_submode
= GET_MODE_INNER (outermode
);
4833 outer_submode
= outermode
;
4836 outer_class
= GET_MODE_CLASS (outer_submode
);
4837 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
4839 gcc_assert (elem_bitsize
% value_bit
== 0);
4840 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
4842 for (elem
= 0; elem
< num_elem
; elem
++)
4846 /* Vectors are stored in target memory order. (This is probably
4849 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4850 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4852 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4853 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4854 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4855 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4856 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4859 switch (outer_class
)
4862 case MODE_PARTIAL_INT
:
4864 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
4867 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4869 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
4870 for (; i
< elem_bitsize
; i
+= value_bit
)
4871 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
4872 << (i
- HOST_BITS_PER_WIDE_INT
));
4874 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4876 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4877 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
4878 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
4879 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
4886 case MODE_DECIMAL_FLOAT
:
4889 long tmp
[max_bitsize
/ 32];
4891 /* real_from_target wants its input in words affected by
4892 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4893 and use WORDS_BIG_ENDIAN instead; see the documentation
4894 of SUBREG in rtl.texi. */
4895 for (i
= 0; i
< max_bitsize
/ 32; i
++)
4897 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4900 if (WORDS_BIG_ENDIAN
)
4901 ibase
= elem_bitsize
- 1 - i
;
4904 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
4907 real_from_target (&r
, tmp
, outer_submode
);
4908 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
4920 f
.mode
= outer_submode
;
4923 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4925 f
.data
.low
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
4926 for (; i
< elem_bitsize
; i
+= value_bit
)
4927 f
.data
.high
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
4928 << (i
- HOST_BITS_PER_WIDE_INT
));
4930 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
4938 if (VECTOR_MODE_P (outermode
))
4939 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
4944 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4945 Return 0 if no simplifications are possible. */
4947 simplify_subreg (enum machine_mode outermode
, rtx op
,
4948 enum machine_mode innermode
, unsigned int byte
)
4950 /* Little bit of sanity checking. */
4951 gcc_assert (innermode
!= VOIDmode
);
4952 gcc_assert (outermode
!= VOIDmode
);
4953 gcc_assert (innermode
!= BLKmode
);
4954 gcc_assert (outermode
!= BLKmode
);
4956 gcc_assert (GET_MODE (op
) == innermode
4957 || GET_MODE (op
) == VOIDmode
);
4959 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
4960 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4962 if (outermode
== innermode
&& !byte
)
4965 if (GET_CODE (op
) == CONST_INT
4966 || GET_CODE (op
) == CONST_DOUBLE
4967 || GET_CODE (op
) == CONST_FIXED
4968 || GET_CODE (op
) == CONST_VECTOR
)
4969 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
4971 /* Changing mode twice with SUBREG => just change it once,
4972 or not at all if changing back op starting mode. */
4973 if (GET_CODE (op
) == SUBREG
)
4975 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
4976 int final_offset
= byte
+ SUBREG_BYTE (op
);
4979 if (outermode
== innermostmode
4980 && byte
== 0 && SUBREG_BYTE (op
) == 0)
4981 return SUBREG_REG (op
);
4983 /* The SUBREG_BYTE represents offset, as if the value were stored
4984 in memory. Irritating exception is paradoxical subreg, where
4985 we define SUBREG_BYTE to be 0. On big endian machines, this
4986 value should be negative. For a moment, undo this exception. */
4987 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
4989 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
4990 if (WORDS_BIG_ENDIAN
)
4991 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4992 if (BYTES_BIG_ENDIAN
)
4993 final_offset
+= difference
% UNITS_PER_WORD
;
4995 if (SUBREG_BYTE (op
) == 0
4996 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
4998 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
4999 if (WORDS_BIG_ENDIAN
)
5000 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5001 if (BYTES_BIG_ENDIAN
)
5002 final_offset
+= difference
% UNITS_PER_WORD
;
5005 /* See whether resulting subreg will be paradoxical. */
5006 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5008 /* In nonparadoxical subregs we can't handle negative offsets. */
5009 if (final_offset
< 0)
5011 /* Bail out in case resulting subreg would be incorrect. */
5012 if (final_offset
% GET_MODE_SIZE (outermode
)
5013 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5019 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5021 /* In paradoxical subreg, see if we are still looking on lower part.
5022 If so, our SUBREG_BYTE will be 0. */
5023 if (WORDS_BIG_ENDIAN
)
5024 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5025 if (BYTES_BIG_ENDIAN
)
5026 offset
+= difference
% UNITS_PER_WORD
;
5027 if (offset
== final_offset
)
5033 /* Recurse for further possible simplifications. */
5034 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5038 if (validate_subreg (outermode
, innermostmode
,
5039 SUBREG_REG (op
), final_offset
))
5041 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5042 if (SUBREG_PROMOTED_VAR_P (op
)
5043 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5044 && GET_MODE_CLASS (outermode
) == MODE_INT
5045 && IN_RANGE (GET_MODE_SIZE (outermode
),
5046 GET_MODE_SIZE (innermode
),
5047 GET_MODE_SIZE (innermostmode
))
5048 && subreg_lowpart_p (newx
))
5050 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5051 SUBREG_PROMOTED_UNSIGNED_SET
5052 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5059 /* Merge implicit and explicit truncations. */
5061 if (GET_CODE (op
) == TRUNCATE
5062 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
5063 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
5064 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
5065 GET_MODE (XEXP (op
, 0)));
5067 /* SUBREG of a hard register => just change the register number
5068 and/or mode. If the hard register is not valid in that mode,
5069 suppress this simplification. If the hard register is the stack,
5070 frame, or argument pointer, leave this as a SUBREG. */
5073 && REGNO (op
) < FIRST_PSEUDO_REGISTER
5074 #ifdef CANNOT_CHANGE_MODE_CLASS
5075 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
5076 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
5077 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
5079 && ((reload_completed
&& !frame_pointer_needed
)
5080 || (REGNO (op
) != FRAME_POINTER_REGNUM
5081 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
5082 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
5085 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
5086 && REGNO (op
) != ARG_POINTER_REGNUM
5088 && REGNO (op
) != STACK_POINTER_REGNUM
5089 && subreg_offset_representable_p (REGNO (op
), innermode
,
5092 unsigned int regno
= REGNO (op
);
5093 unsigned int final_regno
5094 = regno
+ subreg_regno_offset (regno
, innermode
, byte
, outermode
);
5096 /* ??? We do allow it if the current REG is not valid for
5097 its mode. This is a kludge to work around how float/complex
5098 arguments are passed on 32-bit SPARC and should be fixed. */
5099 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
5100 || ! HARD_REGNO_MODE_OK (regno
, innermode
))
5103 int final_offset
= byte
;
5105 /* Adjust offset for paradoxical subregs. */
5107 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5109 int difference
= (GET_MODE_SIZE (innermode
)
5110 - GET_MODE_SIZE (outermode
));
5111 if (WORDS_BIG_ENDIAN
)
5112 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5113 if (BYTES_BIG_ENDIAN
)
5114 final_offset
+= difference
% UNITS_PER_WORD
;
5117 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5119 /* Propagate original regno. We don't have any way to specify
5120 the offset inside original regno, so do so only for lowpart.
5121 The information is used only by alias analysis that can not
5122 grog partial register anyway. */
5124 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5125 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5130 /* If we have a SUBREG of a register that we are replacing and we are
5131 replacing it with a MEM, make a new MEM and try replacing the
5132 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5133 or if we would be widening it. */
5136 && ! mode_dependent_address_p (XEXP (op
, 0))
5137 /* Allow splitting of volatile memory references in case we don't
5138 have instruction to move the whole thing. */
5139 && (! MEM_VOLATILE_P (op
)
5140 || ! have_insn_for (SET
, innermode
))
5141 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5142 return adjust_address_nv (op
, outermode
, byte
);
5144 /* Handle complex values represented as CONCAT
5145 of real and imaginary part. */
5146 if (GET_CODE (op
) == CONCAT
)
5148 unsigned int part_size
, final_offset
;
5151 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5152 if (byte
< part_size
)
5154 part
= XEXP (op
, 0);
5155 final_offset
= byte
;
5159 part
= XEXP (op
, 1);
5160 final_offset
= byte
- part_size
;
5163 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5166 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5169 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5170 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5174 /* Optimize SUBREG truncations of zero and sign extended values. */
5175 if ((GET_CODE (op
) == ZERO_EXTEND
5176 || GET_CODE (op
) == SIGN_EXTEND
)
5177 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
5179 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5181 /* If we're requesting the lowpart of a zero or sign extension,
5182 there are three possibilities. If the outermode is the same
5183 as the origmode, we can omit both the extension and the subreg.
5184 If the outermode is not larger than the origmode, we can apply
5185 the truncation without the extension. Finally, if the outermode
5186 is larger than the origmode, but both are integer modes, we
5187 can just extend to the appropriate mode. */
5190 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
5191 if (outermode
== origmode
)
5192 return XEXP (op
, 0);
5193 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
5194 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
5195 subreg_lowpart_offset (outermode
,
5197 if (SCALAR_INT_MODE_P (outermode
))
5198 return simplify_gen_unary (GET_CODE (op
), outermode
,
5199 XEXP (op
, 0), origmode
);
5202 /* A SUBREG resulting from a zero extension may fold to zero if
5203 it extracts higher bits that the ZERO_EXTEND's source bits. */
5204 if (GET_CODE (op
) == ZERO_EXTEND
5205 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
5206 return CONST0_RTX (outermode
);
5209 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5210 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5211 the outer subreg is effectively a truncation to the original mode. */
5212 if ((GET_CODE (op
) == LSHIFTRT
5213 || GET_CODE (op
) == ASHIFTRT
)
5214 && SCALAR_INT_MODE_P (outermode
)
5215 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5216 to avoid the possibility that an outer LSHIFTRT shifts by more
5217 than the sign extension's sign_bit_copies and introduces zeros
5218 into the high bits of the result. */
5219 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
5220 && GET_CODE (XEXP (op
, 1)) == CONST_INT
5221 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
5222 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5223 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5224 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5225 return simplify_gen_binary (ASHIFTRT
, outermode
,
5226 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5228 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5229 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5230 the outer subreg is effectively a truncation to the original mode. */
5231 if ((GET_CODE (op
) == LSHIFTRT
5232 || GET_CODE (op
) == ASHIFTRT
)
5233 && SCALAR_INT_MODE_P (outermode
)
5234 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5235 && GET_CODE (XEXP (op
, 1)) == CONST_INT
5236 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5237 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5238 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5239 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5240 return simplify_gen_binary (LSHIFTRT
, outermode
,
5241 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5243 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5244 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5245 the outer subreg is effectively a truncation to the original mode. */
5246 if (GET_CODE (op
) == ASHIFT
5247 && SCALAR_INT_MODE_P (outermode
)
5248 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5249 && GET_CODE (XEXP (op
, 1)) == CONST_INT
5250 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5251 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
5252 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5253 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5254 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5255 return simplify_gen_binary (ASHIFT
, outermode
,
5256 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5258 /* Recognize a word extraction from a multi-word subreg. */
5259 if ((GET_CODE (op
) == LSHIFTRT
5260 || GET_CODE (op
) == ASHIFTRT
)
5261 && SCALAR_INT_MODE_P (outermode
)
5262 && GET_MODE_BITSIZE (outermode
) >= BITS_PER_WORD
5263 && GET_MODE_BITSIZE (innermode
) >= (2 * GET_MODE_BITSIZE (outermode
))
5264 && GET_CODE (XEXP (op
, 1)) == CONST_INT
5265 && (INTVAL (XEXP (op
, 1)) & (GET_MODE_BITSIZE (outermode
) - 1)) == 0
5266 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5267 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5269 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5270 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
,
5272 ? byte
- shifted_bytes
: byte
+ shifted_bytes
));
5278 /* Make a SUBREG operation or equivalent if it folds. */
5281 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5282 enum machine_mode innermode
, unsigned int byte
)
5286 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5290 if (GET_CODE (op
) == SUBREG
5291 || GET_CODE (op
) == CONCAT
5292 || GET_MODE (op
) == VOIDmode
)
5295 if (validate_subreg (outermode
, innermode
, op
, byte
))
5296 return gen_rtx_SUBREG (outermode
, op
, byte
);
5301 /* Simplify X, an rtx expression.
5303 Return the simplified expression or NULL if no simplifications
5306 This is the preferred entry point into the simplification routines;
5307 however, we still allow passes to call the more specific routines.
5309 Right now GCC has three (yes, three) major bodies of RTL simplification
5310 code that need to be unified.
5312 1. fold_rtx in cse.c. This code uses various CSE specific
5313 information to aid in RTL simplification.
5315 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5316 it uses combine specific information to aid in RTL
5319 3. The routines in this file.
5322 Long term we want to only have one body of simplification code; to
5323 get to that state I recommend the following steps:
5325 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5326 which are not pass dependent state into these routines.
5328 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5329 use this routine whenever possible.
5331 3. Allow for pass dependent state to be provided to these
5332 routines and add simplifications based on the pass dependent
5333 state. Remove code from cse.c & combine.c that becomes
5336 It will take time, but ultimately the compiler will be easier to
5337 maintain and improve. It's totally silly that when we add a
5338 simplification that it needs to be added to 4 places (3 for RTL
5339 simplification and 1 for tree simplification. */
5342 simplify_rtx (const_rtx x
)
5344 const enum rtx_code code
= GET_CODE (x
);
5345 const enum machine_mode mode
= GET_MODE (x
);
5347 switch (GET_RTX_CLASS (code
))
5350 return simplify_unary_operation (code
, mode
,
5351 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5352 case RTX_COMM_ARITH
:
5353 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5354 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5356 /* Fall through.... */
5359 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5362 case RTX_BITFIELD_OPS
:
5363 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5364 XEXP (x
, 0), XEXP (x
, 1),
5368 case RTX_COMM_COMPARE
:
5369 return simplify_relational_operation (code
, mode
,
5370 ((GET_MODE (XEXP (x
, 0))
5372 ? GET_MODE (XEXP (x
, 0))
5373 : GET_MODE (XEXP (x
, 1))),
5379 return simplify_subreg (mode
, SUBREG_REG (x
),
5380 GET_MODE (SUBREG_REG (x
)),
5387 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5388 if (GET_CODE (XEXP (x
, 0)) == HIGH
5389 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))