1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
53 static bool plus_minus_operand_p (const_rtx
);
54 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
61 enum machine_mode
, rtx
, rtx
);
62 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
63 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode
, const_rtx i
)
71 return gen_int_mode (- INTVAL (i
), mode
);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
80 unsigned HOST_WIDE_INT val
;
83 if (GET_MODE_CLASS (mode
) != MODE_INT
)
86 width
= GET_MODE_BITSIZE (mode
);
90 if (width
<= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x
) == CONST_INT
)
93 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x
) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x
) == 0)
97 val
= CONST_DOUBLE_HIGH (x
);
98 width
-= HOST_BITS_PER_WIDE_INT
;
103 if (width
< HOST_BITS_PER_WIDE_INT
)
104 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
105 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
117 /* If this simplifies, do it. */
118 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0
, op1
))
125 tem
= op0
, op0
= op1
, op1
= tem
;
127 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x
)
136 enum machine_mode cmode
;
137 HOST_WIDE_INT offset
= 0;
139 switch (GET_CODE (x
))
145 /* Handle float extensions of constant pool references. */
147 c
= avoid_constant_pool_reference (tmp
);
148 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
152 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
161 if (GET_MODE (x
) == BLKmode
)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr
= targetm
.delegitimize_address (addr
);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr
) == CONST
171 && GET_CODE (XEXP (addr
, 0)) == PLUS
172 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST_INT
)
174 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
175 addr
= XEXP (XEXP (addr
, 0), 0);
178 if (GET_CODE (addr
) == LO_SUM
)
179 addr
= XEXP (addr
, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr
) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr
))
186 c
= get_pool_constant (addr
);
187 cmode
= get_pool_mode (addr
);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset
!= 0 || cmode
!= GET_MODE (x
))
194 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
195 if (tem
&& CONSTANT_P (tem
))
205 /* Make a unary operation by first seeing if it folds and otherwise making
206 the specified operation. */
209 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
210 enum machine_mode op_mode
)
214 /* If this simplifies, use it. */
215 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
218 return gen_rtx_fmt_e (code
, mode
, op
);
221 /* Likewise for ternary operations. */
224 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
225 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
229 /* If this simplifies, use it. */
230 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
234 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
237 /* Likewise, for relational operations.
238 CMP_MODE specifies mode comparison is done in. */
241 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
242 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
246 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
250 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
253 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
257 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
259 enum rtx_code code
= GET_CODE (x
);
260 enum machine_mode mode
= GET_MODE (x
);
261 enum machine_mode op_mode
;
264 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
265 to build a new expression substituting recursively. If we can't do
266 anything, return our input. */
271 switch (GET_RTX_CLASS (code
))
275 op_mode
= GET_MODE (op0
);
276 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
277 if (op0
== XEXP (x
, 0))
279 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
283 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
284 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
285 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
287 return simplify_gen_binary (code
, mode
, op0
, op1
);
290 case RTX_COMM_COMPARE
:
293 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
294 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
295 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
296 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
298 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
301 case RTX_BITFIELD_OPS
:
303 op_mode
= GET_MODE (op0
);
304 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
305 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
306 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
307 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
309 if (op_mode
== VOIDmode
)
310 op_mode
= GET_MODE (op0
);
311 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
314 /* The only case we try to handle is a SUBREG. */
317 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
318 if (op0
== SUBREG_REG (x
))
320 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
321 GET_MODE (SUBREG_REG (x
)),
323 return op0
? op0
: x
;
330 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
331 if (op0
== XEXP (x
, 0))
333 return replace_equiv_address_nv (x
, op0
);
335 else if (code
== LO_SUM
)
337 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
338 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
340 /* (lo_sum (high x) x) -> x */
341 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
344 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
346 return gen_rtx_LO_SUM (mode
, op0
, op1
);
348 else if (code
== REG
)
350 if (rtx_equal_p (x
, old_rtx
))
361 /* Try to simplify a unary operation CODE whose output mode is to be
362 MODE with input operand OP whose mode was originally OP_MODE.
363 Return zero if no simplification can be made. */
365 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
366 rtx op
, enum machine_mode op_mode
)
370 if (GET_CODE (op
) == CONST
)
373 trueop
= avoid_constant_pool_reference (op
);
375 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
379 return simplify_unary_operation_1 (code
, mode
, op
);
382 /* Perform some simplifications we can do even if the operands
385 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
387 enum rtx_code reversed
;
393 /* (not (not X)) == X. */
394 if (GET_CODE (op
) == NOT
)
397 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
398 comparison is all ones. */
399 if (COMPARISON_P (op
)
400 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
401 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
402 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
403 XEXP (op
, 0), XEXP (op
, 1));
405 /* (not (plus X -1)) can become (neg X). */
406 if (GET_CODE (op
) == PLUS
407 && XEXP (op
, 1) == constm1_rtx
)
408 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
410 /* Similarly, (not (neg X)) is (plus X -1). */
411 if (GET_CODE (op
) == NEG
)
412 return plus_constant (XEXP (op
, 0), -1);
414 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
415 if (GET_CODE (op
) == XOR
416 && GET_CODE (XEXP (op
, 1)) == CONST_INT
417 && (temp
= simplify_unary_operation (NOT
, mode
,
418 XEXP (op
, 1), mode
)) != 0)
419 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
421 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
422 if (GET_CODE (op
) == PLUS
423 && GET_CODE (XEXP (op
, 1)) == CONST_INT
424 && mode_signbit_p (mode
, XEXP (op
, 1))
425 && (temp
= simplify_unary_operation (NOT
, mode
,
426 XEXP (op
, 1), mode
)) != 0)
427 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
430 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
431 operands other than 1, but that is not valid. We could do a
432 similar simplification for (not (lshiftrt C X)) where C is
433 just the sign bit, but this doesn't seem common enough to
435 if (GET_CODE (op
) == ASHIFT
436 && XEXP (op
, 0) == const1_rtx
)
438 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
439 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
446 if (STORE_FLAG_VALUE
== -1
447 && GET_CODE (op
) == ASHIFTRT
448 && GET_CODE (XEXP (op
, 1)) == CONST_INT
449 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
450 return simplify_gen_relational (GE
, mode
, VOIDmode
,
451 XEXP (op
, 0), const0_rtx
);
454 if (GET_CODE (op
) == SUBREG
455 && subreg_lowpart_p (op
)
456 && (GET_MODE_SIZE (GET_MODE (op
))
457 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
458 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
459 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
461 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
464 x
= gen_rtx_ROTATE (inner_mode
,
465 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
467 XEXP (SUBREG_REG (op
), 1));
468 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
471 /* Apply De Morgan's laws to reduce number of patterns for machines
472 with negating logical insns (and-not, nand, etc.). If result has
473 only one NOT, put it first, since that is how the patterns are
476 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
478 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
479 enum machine_mode op_mode
;
481 op_mode
= GET_MODE (in1
);
482 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
484 op_mode
= GET_MODE (in2
);
485 if (op_mode
== VOIDmode
)
487 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
489 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
492 in2
= in1
; in1
= tem
;
495 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
501 /* (neg (neg X)) == X. */
502 if (GET_CODE (op
) == NEG
)
505 /* (neg (plus X 1)) can become (not X). */
506 if (GET_CODE (op
) == PLUS
507 && XEXP (op
, 1) == const1_rtx
)
508 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
510 /* Similarly, (neg (not X)) is (plus X 1). */
511 if (GET_CODE (op
) == NOT
)
512 return plus_constant (XEXP (op
, 0), 1);
514 /* (neg (minus X Y)) can become (minus Y X). This transformation
515 isn't safe for modes with signed zeros, since if X and Y are
516 both +0, (minus Y X) is the same as (minus X Y). If the
517 rounding mode is towards +infinity (or -infinity) then the two
518 expressions will be rounded differently. */
519 if (GET_CODE (op
) == MINUS
520 && !HONOR_SIGNED_ZEROS (mode
)
521 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
522 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
524 if (GET_CODE (op
) == PLUS
525 && !HONOR_SIGNED_ZEROS (mode
)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
528 /* (neg (plus A C)) is simplified to (minus -C A). */
529 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
530 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
532 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
534 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
537 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
538 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
539 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
542 /* (neg (mult A B)) becomes (mult (neg A) B).
543 This works even for floating-point values. */
544 if (GET_CODE (op
) == MULT
545 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
547 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
548 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
551 /* NEG commutes with ASHIFT since it is multiplication. Only do
552 this if we can then eliminate the NEG (e.g., if the operand
554 if (GET_CODE (op
) == ASHIFT
)
556 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
558 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
561 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
562 C is equal to the width of MODE minus 1. */
563 if (GET_CODE (op
) == ASHIFTRT
564 && GET_CODE (XEXP (op
, 1)) == CONST_INT
565 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
566 return simplify_gen_binary (LSHIFTRT
, mode
,
567 XEXP (op
, 0), XEXP (op
, 1));
569 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
570 C is equal to the width of MODE minus 1. */
571 if (GET_CODE (op
) == LSHIFTRT
572 && GET_CODE (XEXP (op
, 1)) == CONST_INT
573 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
574 return simplify_gen_binary (ASHIFTRT
, mode
,
575 XEXP (op
, 0), XEXP (op
, 1));
577 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
578 if (GET_CODE (op
) == XOR
579 && XEXP (op
, 1) == const1_rtx
580 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
581 return plus_constant (XEXP (op
, 0), -1);
583 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
584 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
585 if (GET_CODE (op
) == LT
586 && XEXP (op
, 1) == const0_rtx
587 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
589 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
590 int isize
= GET_MODE_BITSIZE (inner
);
591 if (STORE_FLAG_VALUE
== 1)
593 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
594 GEN_INT (isize
- 1));
597 if (GET_MODE_BITSIZE (mode
) > isize
)
598 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
599 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
601 else if (STORE_FLAG_VALUE
== -1)
603 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
604 GEN_INT (isize
- 1));
607 if (GET_MODE_BITSIZE (mode
) > isize
)
608 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
609 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
615 /* We can't handle truncation to a partial integer mode here
616 because we don't know the real bitsize of the partial
618 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
622 if ((GET_CODE (op
) == SIGN_EXTEND
623 || GET_CODE (op
) == ZERO_EXTEND
)
624 && GET_MODE (XEXP (op
, 0)) == mode
)
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
628 (OP:SI foo:SI) if OP is NEG or ABS. */
629 if ((GET_CODE (op
) == ABS
630 || GET_CODE (op
) == NEG
)
631 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
632 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
633 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
634 return simplify_gen_unary (GET_CODE (op
), mode
,
635 XEXP (XEXP (op
, 0), 0), mode
);
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is
639 if (GET_CODE (op
) == SUBREG
640 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
641 && subreg_lowpart_p (op
))
642 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
643 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
645 /* If we know that the value is already truncated, we can
646 replace the TRUNCATE with a SUBREG. Note that this is also
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
648 modes we just have to apply a different definition for
649 truncation. But don't do this for an (LSHIFTRT (MULT ...))
650 since this will cause problems with the umulXi3_highpart
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
653 GET_MODE_BITSIZE (GET_MODE (op
)))
654 ? (num_sign_bit_copies (op
, GET_MODE (op
))
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op
))
656 - GET_MODE_BITSIZE (mode
)))
657 : truncated_to_mode (mode
, op
))
658 && ! (GET_CODE (op
) == LSHIFTRT
659 && GET_CODE (XEXP (op
, 0)) == MULT
))
660 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
662 /* A truncate of a comparison can be replaced with a subreg if
663 STORE_FLAG_VALUE permits. This is like the previous test,
664 but it works even if the comparison is done in a mode larger
665 than HOST_BITS_PER_WIDE_INT. */
666 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
668 && ((HOST_WIDE_INT
) STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
669 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
673 if (DECIMAL_FLOAT_MODE_P (mode
))
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
677 if (GET_CODE (op
) == FLOAT_EXTEND
678 && GET_MODE (XEXP (op
, 0)) == mode
)
681 /* (float_truncate:SF (float_truncate:DF foo:XF))
682 = (float_truncate:SF foo:XF).
683 This may eliminate double rounding, so it is unsafe.
685 (float_truncate:SF (float_extend:XF foo:DF))
686 = (float_truncate:SF foo:DF).
688 (float_truncate:DF (float_extend:XF foo:SF))
689 = (float_extend:SF foo:DF). */
690 if ((GET_CODE (op
) == FLOAT_TRUNCATE
691 && flag_unsafe_math_optimizations
)
692 || GET_CODE (op
) == FLOAT_EXTEND
)
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
695 > GET_MODE_SIZE (mode
)
696 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
700 /* (float_truncate (float x)) is (float x) */
701 if (GET_CODE (op
) == FLOAT
702 && (flag_unsafe_math_optimizations
703 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
704 && ((unsigned)significand_size (GET_MODE (op
))
705 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
706 - num_sign_bit_copies (XEXP (op
, 0),
707 GET_MODE (XEXP (op
, 0))))))))
708 return simplify_gen_unary (FLOAT
, mode
,
710 GET_MODE (XEXP (op
, 0)));
712 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
713 (OP:SF foo:SF) if OP is NEG or ABS. */
714 if ((GET_CODE (op
) == ABS
715 || GET_CODE (op
) == NEG
)
716 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
717 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
718 return simplify_gen_unary (GET_CODE (op
), mode
,
719 XEXP (XEXP (op
, 0), 0), mode
);
721 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
722 is (float_truncate:SF x). */
723 if (GET_CODE (op
) == SUBREG
724 && subreg_lowpart_p (op
)
725 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
726 return SUBREG_REG (op
);
730 if (DECIMAL_FLOAT_MODE_P (mode
))
733 /* (float_extend (float_extend x)) is (float_extend x)
735 (float_extend (float x)) is (float x) assuming that double
736 rounding can't happen.
738 if (GET_CODE (op
) == FLOAT_EXTEND
739 || (GET_CODE (op
) == FLOAT
740 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
741 && ((unsigned)significand_size (GET_MODE (op
))
742 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
743 - num_sign_bit_copies (XEXP (op
, 0),
744 GET_MODE (XEXP (op
, 0)))))))
745 return simplify_gen_unary (GET_CODE (op
), mode
,
747 GET_MODE (XEXP (op
, 0)));
752 /* (abs (neg <foo>)) -> (abs <foo>) */
753 if (GET_CODE (op
) == NEG
)
754 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
755 GET_MODE (XEXP (op
, 0)));
757 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
759 if (GET_MODE (op
) == VOIDmode
)
762 /* If operand is something known to be positive, ignore the ABS. */
763 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
764 || ((GET_MODE_BITSIZE (GET_MODE (op
))
765 <= HOST_BITS_PER_WIDE_INT
)
766 && ((nonzero_bits (op
, GET_MODE (op
))
768 << (GET_MODE_BITSIZE (GET_MODE (op
)) - 1)))
772 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
773 if (num_sign_bit_copies (op
, mode
) == GET_MODE_BITSIZE (mode
))
774 return gen_rtx_NEG (mode
, op
);
779 /* (ffs (*_extend <X>)) = (ffs <X>) */
780 if (GET_CODE (op
) == SIGN_EXTEND
781 || GET_CODE (op
) == ZERO_EXTEND
)
782 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
783 GET_MODE (XEXP (op
, 0)));
787 switch (GET_CODE (op
))
791 /* (popcount (zero_extend <X>)) = (popcount <X>) */
792 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
793 GET_MODE (XEXP (op
, 0)));
797 /* Rotations don't affect popcount. */
798 if (!side_effects_p (XEXP (op
, 1)))
799 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
800 GET_MODE (XEXP (op
, 0)));
809 switch (GET_CODE (op
))
815 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
816 GET_MODE (XEXP (op
, 0)));
820 /* Rotations don't affect parity. */
821 if (!side_effects_p (XEXP (op
, 1)))
822 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
823 GET_MODE (XEXP (op
, 0)));
832 /* (bswap (bswap x)) -> x. */
833 if (GET_CODE (op
) == BSWAP
)
838 /* (float (sign_extend <X>)) = (float <X>). */
839 if (GET_CODE (op
) == SIGN_EXTEND
)
840 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
841 GET_MODE (XEXP (op
, 0)));
845 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
846 becomes just the MINUS if its mode is MODE. This allows
847 folding switch statements on machines using casesi (such as
849 if (GET_CODE (op
) == TRUNCATE
850 && GET_MODE (XEXP (op
, 0)) == mode
851 && GET_CODE (XEXP (op
, 0)) == MINUS
852 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
853 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
856 /* Check for a sign extension of a subreg of a promoted
857 variable, where the promotion is sign-extended, and the
858 target mode is the same as the variable's promotion. */
859 if (GET_CODE (op
) == SUBREG
860 && SUBREG_PROMOTED_VAR_P (op
)
861 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
862 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
863 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
865 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
866 if (! POINTERS_EXTEND_UNSIGNED
867 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
869 || (GET_CODE (op
) == SUBREG
870 && REG_P (SUBREG_REG (op
))
871 && REG_POINTER (SUBREG_REG (op
))
872 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
873 return convert_memory_address (Pmode
, op
);
878 /* Check for a zero extension of a subreg of a promoted
879 variable, where the promotion is zero-extended, and the
880 target mode is the same as the variable's promotion. */
881 if (GET_CODE (op
) == SUBREG
882 && SUBREG_PROMOTED_VAR_P (op
)
883 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
884 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
885 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
887 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
888 if (POINTERS_EXTEND_UNSIGNED
> 0
889 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
891 || (GET_CODE (op
) == SUBREG
892 && REG_P (SUBREG_REG (op
))
893 && REG_POINTER (SUBREG_REG (op
))
894 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
895 return convert_memory_address (Pmode
, op
);
906 /* Try to compute the value of a unary operation CODE whose output mode is to
907 be MODE with input operand OP whose mode was originally OP_MODE.
908 Return zero if the value cannot be computed. */
910 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
911 rtx op
, enum machine_mode op_mode
)
913 unsigned int width
= GET_MODE_BITSIZE (mode
);
915 if (code
== VEC_DUPLICATE
)
917 gcc_assert (VECTOR_MODE_P (mode
));
918 if (GET_MODE (op
) != VOIDmode
)
920 if (!VECTOR_MODE_P (GET_MODE (op
)))
921 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
923 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
926 if (GET_CODE (op
) == CONST_INT
|| GET_CODE (op
) == CONST_DOUBLE
927 || GET_CODE (op
) == CONST_VECTOR
)
929 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
930 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
931 rtvec v
= rtvec_alloc (n_elts
);
934 if (GET_CODE (op
) != CONST_VECTOR
)
935 for (i
= 0; i
< n_elts
; i
++)
936 RTVEC_ELT (v
, i
) = op
;
939 enum machine_mode inmode
= GET_MODE (op
);
940 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
941 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
943 gcc_assert (in_n_elts
< n_elts
);
944 gcc_assert ((n_elts
% in_n_elts
) == 0);
945 for (i
= 0; i
< n_elts
; i
++)
946 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
948 return gen_rtx_CONST_VECTOR (mode
, v
);
952 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
954 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
955 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
956 enum machine_mode opmode
= GET_MODE (op
);
957 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
958 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
959 rtvec v
= rtvec_alloc (n_elts
);
962 gcc_assert (op_n_elts
== n_elts
);
963 for (i
= 0; i
< n_elts
; i
++)
965 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
966 CONST_VECTOR_ELT (op
, i
),
967 GET_MODE_INNER (opmode
));
970 RTVEC_ELT (v
, i
) = x
;
972 return gen_rtx_CONST_VECTOR (mode
, v
);
975 /* The order of these tests is critical so that, for example, we don't
976 check the wrong mode (input vs. output) for a conversion operation,
977 such as FIX. At some point, this should be simplified. */
979 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
980 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
982 HOST_WIDE_INT hv
, lv
;
985 if (GET_CODE (op
) == CONST_INT
)
986 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
988 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
990 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
991 d
= real_value_truncate (mode
, d
);
992 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
994 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
995 && (GET_CODE (op
) == CONST_DOUBLE
996 || GET_CODE (op
) == CONST_INT
))
998 HOST_WIDE_INT hv
, lv
;
1001 if (GET_CODE (op
) == CONST_INT
)
1002 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1004 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1006 if (op_mode
== VOIDmode
)
1008 /* We don't know how to interpret negative-looking numbers in
1009 this case, so don't try to fold those. */
1013 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
1016 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1018 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1019 d
= real_value_truncate (mode
, d
);
1020 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1023 if (GET_CODE (op
) == CONST_INT
1024 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1026 HOST_WIDE_INT arg0
= INTVAL (op
);
1040 val
= (arg0
>= 0 ? arg0
: - arg0
);
1044 /* Don't use ffs here. Instead, get low order bit and then its
1045 number. If arg0 is zero, this will return 0, as desired. */
1046 arg0
&= GET_MODE_MASK (mode
);
1047 val
= exact_log2 (arg0
& (- arg0
)) + 1;
1051 arg0
&= GET_MODE_MASK (mode
);
1052 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1055 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
1059 arg0
&= GET_MODE_MASK (mode
);
1062 /* Even if the value at zero is undefined, we have to come
1063 up with some replacement. Seems good enough. */
1064 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1065 val
= GET_MODE_BITSIZE (mode
);
1068 val
= exact_log2 (arg0
& -arg0
);
1072 arg0
&= GET_MODE_MASK (mode
);
1075 val
++, arg0
&= arg0
- 1;
1079 arg0
&= GET_MODE_MASK (mode
);
1082 val
++, arg0
&= arg0
- 1;
1091 for (s
= 0; s
< width
; s
+= 8)
1093 unsigned int d
= width
- s
- 8;
1094 unsigned HOST_WIDE_INT byte
;
1095 byte
= (arg0
>> s
) & 0xff;
1106 /* When zero-extending a CONST_INT, we need to know its
1108 gcc_assert (op_mode
!= VOIDmode
);
1109 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1111 /* If we were really extending the mode,
1112 we would have to distinguish between zero-extension
1113 and sign-extension. */
1114 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1117 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1118 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1124 if (op_mode
== VOIDmode
)
1126 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1128 /* If we were really extending the mode,
1129 we would have to distinguish between zero-extension
1130 and sign-extension. */
1131 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1134 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1137 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1139 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
1140 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1148 case FLOAT_TRUNCATE
:
1159 return gen_int_mode (val
, mode
);
1162 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1163 for a DImode operation on a CONST_INT. */
1164 else if (GET_MODE (op
) == VOIDmode
1165 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1166 && (GET_CODE (op
) == CONST_DOUBLE
1167 || GET_CODE (op
) == CONST_INT
))
1169 unsigned HOST_WIDE_INT l1
, lv
;
1170 HOST_WIDE_INT h1
, hv
;
1172 if (GET_CODE (op
) == CONST_DOUBLE
)
1173 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1175 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1185 neg_double (l1
, h1
, &lv
, &hv
);
1190 neg_double (l1
, h1
, &lv
, &hv
);
1202 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
1205 lv
= exact_log2 (l1
& -l1
) + 1;
1211 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
1212 - HOST_BITS_PER_WIDE_INT
;
1214 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
1215 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1216 lv
= GET_MODE_BITSIZE (mode
);
1222 lv
= exact_log2 (l1
& -l1
);
1224 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
1225 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1226 lv
= GET_MODE_BITSIZE (mode
);
1254 for (s
= 0; s
< width
; s
+= 8)
1256 unsigned int d
= width
- s
- 8;
1257 unsigned HOST_WIDE_INT byte
;
1259 if (s
< HOST_BITS_PER_WIDE_INT
)
1260 byte
= (l1
>> s
) & 0xff;
1262 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1264 if (d
< HOST_BITS_PER_WIDE_INT
)
1267 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1273 /* This is just a change-of-mode, so do nothing. */
1278 gcc_assert (op_mode
!= VOIDmode
);
1280 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1284 lv
= l1
& GET_MODE_MASK (op_mode
);
1288 if (op_mode
== VOIDmode
1289 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1293 lv
= l1
& GET_MODE_MASK (op_mode
);
1294 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
1295 && (lv
& ((HOST_WIDE_INT
) 1
1296 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
1297 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1299 hv
= HWI_SIGN_EXTEND (lv
);
1310 return immed_double_const (lv
, hv
, mode
);
1313 else if (GET_CODE (op
) == CONST_DOUBLE
1314 && SCALAR_FLOAT_MODE_P (mode
))
1316 REAL_VALUE_TYPE d
, t
;
1317 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1322 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1324 real_sqrt (&t
, mode
, &d
);
1328 d
= REAL_VALUE_ABS (d
);
1331 d
= REAL_VALUE_NEGATE (d
);
1333 case FLOAT_TRUNCATE
:
1334 d
= real_value_truncate (mode
, d
);
1337 /* All this does is change the mode. */
1340 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1347 real_to_target (tmp
, &d
, GET_MODE (op
));
1348 for (i
= 0; i
< 4; i
++)
1350 real_from_target (&d
, tmp
, mode
);
1356 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1359 else if (GET_CODE (op
) == CONST_DOUBLE
1360 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1361 && GET_MODE_CLASS (mode
) == MODE_INT
1362 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1364 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1365 operators are intentionally left unspecified (to ease implementation
1366 by target backends), for consistency, this routine implements the
1367 same semantics for constant folding as used by the middle-end. */
1369 /* This was formerly used only for non-IEEE float.
1370 eggert@twinsun.com says it is safe for IEEE also. */
1371 HOST_WIDE_INT xh
, xl
, th
, tl
;
1372 REAL_VALUE_TYPE x
, t
;
1373 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1377 if (REAL_VALUE_ISNAN (x
))
1380 /* Test against the signed upper bound. */
1381 if (width
> HOST_BITS_PER_WIDE_INT
)
1383 th
= ((unsigned HOST_WIDE_INT
) 1
1384 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1390 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1392 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1393 if (REAL_VALUES_LESS (t
, x
))
1400 /* Test against the signed lower bound. */
1401 if (width
> HOST_BITS_PER_WIDE_INT
)
1403 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1409 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1411 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1412 if (REAL_VALUES_LESS (x
, t
))
1418 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1422 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1425 /* Test against the unsigned upper bound. */
1426 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1431 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1433 th
= ((unsigned HOST_WIDE_INT
) 1
1434 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1440 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1442 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1443 if (REAL_VALUES_LESS (t
, x
))
1450 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1456 return immed_double_const (xl
, xh
, mode
);
1462 /* Subroutine of simplify_binary_operation to simplify a commutative,
1463 associative binary operation CODE with result mode MODE, operating
1464 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1465 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1466 canonicalization is possible. */
1469 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1474 /* Linearize the operator to the left. */
1475 if (GET_CODE (op1
) == code
)
1477 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1478 if (GET_CODE (op0
) == code
)
1480 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1481 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1484 /* "a op (b op c)" becomes "(b op c) op a". */
1485 if (! swap_commutative_operands_p (op1
, op0
))
1486 return simplify_gen_binary (code
, mode
, op1
, op0
);
1493 if (GET_CODE (op0
) == code
)
1495 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1496 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1498 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1499 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1502 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1503 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1505 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1507 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1508 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1510 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1517 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1518 and OP1. Return 0 if no simplification is possible.
1520 Don't use this for relational operations such as EQ or LT.
1521 Use simplify_relational_operation instead. */
1523 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1526 rtx trueop0
, trueop1
;
1529 /* Relational operations don't work here. We must know the mode
1530 of the operands in order to do the comparison correctly.
1531 Assuming a full word can give incorrect results.
1532 Consider comparing 128 with -128 in QImode. */
1533 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1534 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1536 /* Make sure the constant is second. */
1537 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1538 && swap_commutative_operands_p (op0
, op1
))
1540 tem
= op0
, op0
= op1
, op1
= tem
;
1543 trueop0
= avoid_constant_pool_reference (op0
);
1544 trueop1
= avoid_constant_pool_reference (op1
);
1546 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1549 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1552 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1553 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1554 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1555 actual constants. */
1558 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1559 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1561 rtx tem
, reversed
, opleft
, opright
;
1563 unsigned int width
= GET_MODE_BITSIZE (mode
);
1565 /* Even if we can't compute a constant result,
1566 there are some cases worth simplifying. */
1571 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1572 when x is NaN, infinite, or finite and nonzero. They aren't
1573 when x is -0 and the rounding mode is not towards -infinity,
1574 since (-0) + 0 is then 0. */
1575 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1578 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1579 transformations are safe even for IEEE. */
1580 if (GET_CODE (op0
) == NEG
)
1581 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1582 else if (GET_CODE (op1
) == NEG
)
1583 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1585 /* (~a) + 1 -> -a */
1586 if (INTEGRAL_MODE_P (mode
)
1587 && GET_CODE (op0
) == NOT
1588 && trueop1
== const1_rtx
)
1589 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1591 /* Handle both-operands-constant cases. We can only add
1592 CONST_INTs to constants since the sum of relocatable symbols
1593 can't be handled by most assemblers. Don't add CONST_INT
1594 to CONST_INT since overflow won't be computed properly if wider
1595 than HOST_BITS_PER_WIDE_INT. */
1597 if ((GET_CODE (op0
) == CONST
1598 || GET_CODE (op0
) == SYMBOL_REF
1599 || GET_CODE (op0
) == LABEL_REF
)
1600 && GET_CODE (op1
) == CONST_INT
)
1601 return plus_constant (op0
, INTVAL (op1
));
1602 else if ((GET_CODE (op1
) == CONST
1603 || GET_CODE (op1
) == SYMBOL_REF
1604 || GET_CODE (op1
) == LABEL_REF
)
1605 && GET_CODE (op0
) == CONST_INT
)
1606 return plus_constant (op1
, INTVAL (op0
));
1608 /* See if this is something like X * C - X or vice versa or
1609 if the multiplication is written as a shift. If so, we can
1610 distribute and make a new multiply, shift, or maybe just
1611 have X (if C is 2 in the example above). But don't make
1612 something more expensive than we had before. */
1614 if (SCALAR_INT_MODE_P (mode
))
1616 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1617 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1618 rtx lhs
= op0
, rhs
= op1
;
1620 if (GET_CODE (lhs
) == NEG
)
1624 lhs
= XEXP (lhs
, 0);
1626 else if (GET_CODE (lhs
) == MULT
1627 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1629 coeff0l
= INTVAL (XEXP (lhs
, 1));
1630 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1631 lhs
= XEXP (lhs
, 0);
1633 else if (GET_CODE (lhs
) == ASHIFT
1634 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1635 && INTVAL (XEXP (lhs
, 1)) >= 0
1636 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1638 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1640 lhs
= XEXP (lhs
, 0);
1643 if (GET_CODE (rhs
) == NEG
)
1647 rhs
= XEXP (rhs
, 0);
1649 else if (GET_CODE (rhs
) == MULT
1650 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1652 coeff1l
= INTVAL (XEXP (rhs
, 1));
1653 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1654 rhs
= XEXP (rhs
, 0);
1656 else if (GET_CODE (rhs
) == ASHIFT
1657 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1658 && INTVAL (XEXP (rhs
, 1)) >= 0
1659 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1661 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1663 rhs
= XEXP (rhs
, 0);
1666 if (rtx_equal_p (lhs
, rhs
))
1668 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1670 unsigned HOST_WIDE_INT l
;
1672 bool speed
= optimize_function_for_speed_p (cfun
);
1674 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1675 coeff
= immed_double_const (l
, h
, mode
);
1677 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1678 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
1683 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1684 if ((GET_CODE (op1
) == CONST_INT
1685 || GET_CODE (op1
) == CONST_DOUBLE
)
1686 && GET_CODE (op0
) == XOR
1687 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1688 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1689 && mode_signbit_p (mode
, op1
))
1690 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1691 simplify_gen_binary (XOR
, mode
, op1
,
1694 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1695 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1696 && GET_CODE (op0
) == MULT
1697 && GET_CODE (XEXP (op0
, 0)) == NEG
)
1701 in1
= XEXP (XEXP (op0
, 0), 0);
1702 in2
= XEXP (op0
, 1);
1703 return simplify_gen_binary (MINUS
, mode
, op1
,
1704 simplify_gen_binary (MULT
, mode
,
1708 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1709 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1711 if (COMPARISON_P (op0
)
1712 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
1713 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
1714 && (reversed
= reversed_comparison (op0
, mode
)))
1716 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
1718 /* If one of the operands is a PLUS or a MINUS, see if we can
1719 simplify this by the associative law.
1720 Don't use the associative law for floating point.
1721 The inaccuracy makes it nonassociative,
1722 and subtle programs can break if operations are associated. */
1724 if (INTEGRAL_MODE_P (mode
)
1725 && (plus_minus_operand_p (op0
)
1726 || plus_minus_operand_p (op1
))
1727 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1730 /* Reassociate floating point addition only when the user
1731 specifies associative math operations. */
1732 if (FLOAT_MODE_P (mode
)
1733 && flag_associative_math
)
1735 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1742 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1743 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1744 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1745 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1747 rtx xop00
= XEXP (op0
, 0);
1748 rtx xop10
= XEXP (op1
, 0);
1751 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1753 if (REG_P (xop00
) && REG_P (xop10
)
1754 && GET_MODE (xop00
) == GET_MODE (xop10
)
1755 && REGNO (xop00
) == REGNO (xop10
)
1756 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1757 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1764 /* We can't assume x-x is 0 even with non-IEEE floating point,
1765 but since it is zero except in very strange circumstances, we
1766 will treat it as zero with -ffinite-math-only. */
1767 if (rtx_equal_p (trueop0
, trueop1
)
1768 && ! side_effects_p (op0
)
1769 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
1770 return CONST0_RTX (mode
);
1772 /* Change subtraction from zero into negation. (0 - x) is the
1773 same as -x when x is NaN, infinite, or finite and nonzero.
1774 But if the mode has signed zeros, and does not round towards
1775 -infinity, then 0 - 0 is 0, not -0. */
1776 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1777 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1779 /* (-1 - a) is ~a. */
1780 if (trueop0
== constm1_rtx
)
1781 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1783 /* Subtracting 0 has no effect unless the mode has signed zeros
1784 and supports rounding towards -infinity. In such a case,
1786 if (!(HONOR_SIGNED_ZEROS (mode
)
1787 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1788 && trueop1
== CONST0_RTX (mode
))
1791 /* See if this is something like X * C - X or vice versa or
1792 if the multiplication is written as a shift. If so, we can
1793 distribute and make a new multiply, shift, or maybe just
1794 have X (if C is 2 in the example above). But don't make
1795 something more expensive than we had before. */
1797 if (SCALAR_INT_MODE_P (mode
))
1799 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1800 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1801 rtx lhs
= op0
, rhs
= op1
;
1803 if (GET_CODE (lhs
) == NEG
)
1807 lhs
= XEXP (lhs
, 0);
1809 else if (GET_CODE (lhs
) == MULT
1810 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1812 coeff0l
= INTVAL (XEXP (lhs
, 1));
1813 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1814 lhs
= XEXP (lhs
, 0);
1816 else if (GET_CODE (lhs
) == ASHIFT
1817 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1818 && INTVAL (XEXP (lhs
, 1)) >= 0
1819 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1821 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1823 lhs
= XEXP (lhs
, 0);
1826 if (GET_CODE (rhs
) == NEG
)
1830 rhs
= XEXP (rhs
, 0);
1832 else if (GET_CODE (rhs
) == MULT
1833 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1835 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1836 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1837 rhs
= XEXP (rhs
, 0);
1839 else if (GET_CODE (rhs
) == ASHIFT
1840 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1841 && INTVAL (XEXP (rhs
, 1)) >= 0
1842 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1844 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
1846 rhs
= XEXP (rhs
, 0);
1849 if (rtx_equal_p (lhs
, rhs
))
1851 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1853 unsigned HOST_WIDE_INT l
;
1855 bool speed
= optimize_function_for_speed_p (cfun
);
1857 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
1858 coeff
= immed_double_const (l
, h
, mode
);
1860 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1861 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
1866 /* (a - (-b)) -> (a + b). True even for IEEE. */
1867 if (GET_CODE (op1
) == NEG
)
1868 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1870 /* (-x - c) may be simplified as (-c - x). */
1871 if (GET_CODE (op0
) == NEG
1872 && (GET_CODE (op1
) == CONST_INT
1873 || GET_CODE (op1
) == CONST_DOUBLE
))
1875 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1877 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1880 /* Don't let a relocatable value get a negative coeff. */
1881 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1882 return simplify_gen_binary (PLUS
, mode
,
1884 neg_const_int (mode
, op1
));
1886 /* (x - (x & y)) -> (x & ~y) */
1887 if (GET_CODE (op1
) == AND
)
1889 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1891 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1892 GET_MODE (XEXP (op1
, 1)));
1893 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1895 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1897 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1898 GET_MODE (XEXP (op1
, 0)));
1899 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1903 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1904 by reversing the comparison code if valid. */
1905 if (STORE_FLAG_VALUE
== 1
1906 && trueop0
== const1_rtx
1907 && COMPARISON_P (op1
)
1908 && (reversed
= reversed_comparison (op1
, mode
)))
1911 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1912 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1913 && GET_CODE (op1
) == MULT
1914 && GET_CODE (XEXP (op1
, 0)) == NEG
)
1918 in1
= XEXP (XEXP (op1
, 0), 0);
1919 in2
= XEXP (op1
, 1);
1920 return simplify_gen_binary (PLUS
, mode
,
1921 simplify_gen_binary (MULT
, mode
,
1926 /* Canonicalize (minus (neg A) (mult B C)) to
1927 (minus (mult (neg B) C) A). */
1928 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1929 && GET_CODE (op1
) == MULT
1930 && GET_CODE (op0
) == NEG
)
1934 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
1935 in2
= XEXP (op1
, 1);
1936 return simplify_gen_binary (MINUS
, mode
,
1937 simplify_gen_binary (MULT
, mode
,
1942 /* If one of the operands is a PLUS or a MINUS, see if we can
1943 simplify this by the associative law. This will, for example,
1944 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1945 Don't use the associative law for floating point.
1946 The inaccuracy makes it nonassociative,
1947 and subtle programs can break if operations are associated. */
1949 if (INTEGRAL_MODE_P (mode
)
1950 && (plus_minus_operand_p (op0
)
1951 || plus_minus_operand_p (op1
))
1952 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1957 if (trueop1
== constm1_rtx
)
1958 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1960 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1961 x is NaN, since x * 0 is then also NaN. Nor is it valid
1962 when the mode has signed zeros, since multiplying a negative
1963 number by 0 will give -0, not 0. */
1964 if (!HONOR_NANS (mode
)
1965 && !HONOR_SIGNED_ZEROS (mode
)
1966 && trueop1
== CONST0_RTX (mode
)
1967 && ! side_effects_p (op0
))
1970 /* In IEEE floating point, x*1 is not equivalent to x for
1972 if (!HONOR_SNANS (mode
)
1973 && trueop1
== CONST1_RTX (mode
))
1976 /* Convert multiply by constant power of two into shift unless
1977 we are still generating RTL. This test is a kludge. */
1978 if (GET_CODE (trueop1
) == CONST_INT
1979 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1980 /* If the mode is larger than the host word size, and the
1981 uppermost bit is set, then this isn't a power of two due
1982 to implicit sign extension. */
1983 && (width
<= HOST_BITS_PER_WIDE_INT
1984 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1985 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1987 /* Likewise for multipliers wider than a word. */
1988 if (GET_CODE (trueop1
) == CONST_DOUBLE
1989 && (GET_MODE (trueop1
) == VOIDmode
1990 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
1991 && GET_MODE (op0
) == mode
1992 && CONST_DOUBLE_LOW (trueop1
) == 0
1993 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
1994 return simplify_gen_binary (ASHIFT
, mode
, op0
,
1995 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
1997 /* x*2 is x+x and x*(-1) is -x */
1998 if (GET_CODE (trueop1
) == CONST_DOUBLE
1999 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2000 && GET_MODE (op0
) == mode
)
2003 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2005 if (REAL_VALUES_EQUAL (d
, dconst2
))
2006 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2008 if (!HONOR_SNANS (mode
)
2009 && REAL_VALUES_EQUAL (d
, dconstm1
))
2010 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2013 /* Optimize -x * -x as x * x. */
2014 if (FLOAT_MODE_P (mode
)
2015 && GET_CODE (op0
) == NEG
2016 && GET_CODE (op1
) == NEG
2017 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2018 && !side_effects_p (XEXP (op0
, 0)))
2019 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2021 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2022 if (SCALAR_FLOAT_MODE_P (mode
)
2023 && GET_CODE (op0
) == ABS
2024 && GET_CODE (op1
) == ABS
2025 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2026 && !side_effects_p (XEXP (op0
, 0)))
2027 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2029 /* Reassociate multiplication, but for floating point MULTs
2030 only when the user specifies unsafe math optimizations. */
2031 if (! FLOAT_MODE_P (mode
)
2032 || flag_unsafe_math_optimizations
)
2034 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2041 if (trueop1
== const0_rtx
)
2043 if (GET_CODE (trueop1
) == CONST_INT
2044 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2045 == GET_MODE_MASK (mode
)))
2047 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2049 /* A | (~A) -> -1 */
2050 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2051 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2052 && ! side_effects_p (op0
)
2053 && SCALAR_INT_MODE_P (mode
))
2056 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2057 if (GET_CODE (op1
) == CONST_INT
2058 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2059 && (nonzero_bits (op0
, mode
) & ~INTVAL (op1
)) == 0)
2062 /* Canonicalize (X & C1) | C2. */
2063 if (GET_CODE (op0
) == AND
2064 && GET_CODE (trueop1
) == CONST_INT
2065 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
)
2067 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2068 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2069 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2071 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2073 && !side_effects_p (XEXP (op0
, 0)))
2076 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2077 if (((c1
|c2
) & mask
) == mask
)
2078 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2080 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2081 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2083 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2084 gen_int_mode (c1
& ~c2
, mode
));
2085 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2089 /* Convert (A & B) | A to A. */
2090 if (GET_CODE (op0
) == AND
2091 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2092 || rtx_equal_p (XEXP (op0
, 1), op1
))
2093 && ! side_effects_p (XEXP (op0
, 0))
2094 && ! side_effects_p (XEXP (op0
, 1)))
2097 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2098 mode size to (rotate A CX). */
2100 if (GET_CODE (op1
) == ASHIFT
2101 || GET_CODE (op1
) == SUBREG
)
2112 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2113 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2114 && GET_CODE (XEXP (opleft
, 1)) == CONST_INT
2115 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2116 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2117 == GET_MODE_BITSIZE (mode
)))
2118 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2120 /* Same, but for ashift that has been "simplified" to a wider mode
2121 by simplify_shift_const. */
2123 if (GET_CODE (opleft
) == SUBREG
2124 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2125 && GET_CODE (opright
) == LSHIFTRT
2126 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2127 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2128 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2129 && (GET_MODE_SIZE (GET_MODE (opleft
))
2130 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2131 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2132 SUBREG_REG (XEXP (opright
, 0)))
2133 && GET_CODE (XEXP (SUBREG_REG (opleft
), 1)) == CONST_INT
2134 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2135 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2136 == GET_MODE_BITSIZE (mode
)))
2137 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2138 XEXP (SUBREG_REG (opleft
), 1));
2140 /* If we have (ior (and (X C1) C2)), simplify this by making
2141 C1 as small as possible if C1 actually changes. */
2142 if (GET_CODE (op1
) == CONST_INT
2143 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2144 || INTVAL (op1
) > 0)
2145 && GET_CODE (op0
) == AND
2146 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2147 && GET_CODE (op1
) == CONST_INT
2148 && (INTVAL (XEXP (op0
, 1)) & INTVAL (op1
)) != 0)
2149 return simplify_gen_binary (IOR
, mode
,
2151 (AND
, mode
, XEXP (op0
, 0),
2152 GEN_INT (INTVAL (XEXP (op0
, 1))
2156 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2157 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2158 the PLUS does not affect any of the bits in OP1: then we can do
2159 the IOR as a PLUS and we can associate. This is valid if OP1
2160 can be safely shifted left C bits. */
2161 if (GET_CODE (trueop1
) == CONST_INT
&& GET_CODE (op0
) == ASHIFTRT
2162 && GET_CODE (XEXP (op0
, 0)) == PLUS
2163 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == CONST_INT
2164 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2165 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2167 int count
= INTVAL (XEXP (op0
, 1));
2168 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2170 if (mask
>> count
== INTVAL (trueop1
)
2171 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2172 return simplify_gen_binary (ASHIFTRT
, mode
,
2173 plus_constant (XEXP (op0
, 0), mask
),
2177 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2183 if (trueop1
== const0_rtx
)
2185 if (GET_CODE (trueop1
) == CONST_INT
2186 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2187 == GET_MODE_MASK (mode
)))
2188 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2189 if (rtx_equal_p (trueop0
, trueop1
)
2190 && ! side_effects_p (op0
)
2191 && GET_MODE_CLASS (mode
) != MODE_CC
)
2192 return CONST0_RTX (mode
);
2194 /* Canonicalize XOR of the most significant bit to PLUS. */
2195 if ((GET_CODE (op1
) == CONST_INT
2196 || GET_CODE (op1
) == CONST_DOUBLE
)
2197 && mode_signbit_p (mode
, op1
))
2198 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2199 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2200 if ((GET_CODE (op1
) == CONST_INT
2201 || GET_CODE (op1
) == CONST_DOUBLE
)
2202 && GET_CODE (op0
) == PLUS
2203 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
2204 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2205 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2206 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2207 simplify_gen_binary (XOR
, mode
, op1
,
2210 /* If we are XORing two things that have no bits in common,
2211 convert them into an IOR. This helps to detect rotation encoded
2212 using those methods and possibly other simplifications. */
2214 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2215 && (nonzero_bits (op0
, mode
)
2216 & nonzero_bits (op1
, mode
)) == 0)
2217 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2219 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2220 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2223 int num_negated
= 0;
2225 if (GET_CODE (op0
) == NOT
)
2226 num_negated
++, op0
= XEXP (op0
, 0);
2227 if (GET_CODE (op1
) == NOT
)
2228 num_negated
++, op1
= XEXP (op1
, 0);
2230 if (num_negated
== 2)
2231 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2232 else if (num_negated
== 1)
2233 return simplify_gen_unary (NOT
, mode
,
2234 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2238 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2239 correspond to a machine insn or result in further simplifications
2240 if B is a constant. */
2242 if (GET_CODE (op0
) == AND
2243 && rtx_equal_p (XEXP (op0
, 1), op1
)
2244 && ! side_effects_p (op1
))
2245 return simplify_gen_binary (AND
, mode
,
2246 simplify_gen_unary (NOT
, mode
,
2247 XEXP (op0
, 0), mode
),
2250 else if (GET_CODE (op0
) == AND
2251 && rtx_equal_p (XEXP (op0
, 0), op1
)
2252 && ! side_effects_p (op1
))
2253 return simplify_gen_binary (AND
, mode
,
2254 simplify_gen_unary (NOT
, mode
,
2255 XEXP (op0
, 1), mode
),
2258 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2259 comparison if STORE_FLAG_VALUE is 1. */
2260 if (STORE_FLAG_VALUE
== 1
2261 && trueop1
== const1_rtx
2262 && COMPARISON_P (op0
)
2263 && (reversed
= reversed_comparison (op0
, mode
)))
2266 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2267 is (lt foo (const_int 0)), so we can perform the above
2268 simplification if STORE_FLAG_VALUE is 1. */
2270 if (STORE_FLAG_VALUE
== 1
2271 && trueop1
== const1_rtx
2272 && GET_CODE (op0
) == LSHIFTRT
2273 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2274 && INTVAL (XEXP (op0
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
2275 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2277 /* (xor (comparison foo bar) (const_int sign-bit))
2278 when STORE_FLAG_VALUE is the sign bit. */
2279 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2280 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
2281 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
2282 && trueop1
== const_true_rtx
2283 && COMPARISON_P (op0
)
2284 && (reversed
= reversed_comparison (op0
, mode
)))
2287 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2293 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2295 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
2297 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2298 HOST_WIDE_INT nzop1
;
2299 if (GET_CODE (trueop1
) == CONST_INT
)
2301 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2302 /* If we are turning off bits already known off in OP0, we need
2304 if ((nzop0
& ~val1
) == 0)
2307 nzop1
= nonzero_bits (trueop1
, mode
);
2308 /* If we are clearing all the nonzero bits, the result is zero. */
2309 if ((nzop1
& nzop0
) == 0
2310 && !side_effects_p (op0
) && !side_effects_p (op1
))
2311 return CONST0_RTX (mode
);
2313 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2314 && GET_MODE_CLASS (mode
) != MODE_CC
)
2317 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2318 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2319 && ! side_effects_p (op0
)
2320 && GET_MODE_CLASS (mode
) != MODE_CC
)
2321 return CONST0_RTX (mode
);
2323 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2324 there are no nonzero bits of C outside of X's mode. */
2325 if ((GET_CODE (op0
) == SIGN_EXTEND
2326 || GET_CODE (op0
) == ZERO_EXTEND
)
2327 && GET_CODE (trueop1
) == CONST_INT
2328 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2329 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2330 & INTVAL (trueop1
)) == 0)
2332 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2333 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2334 gen_int_mode (INTVAL (trueop1
),
2336 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2339 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2340 if (GET_CODE (op0
) == IOR
2341 && GET_CODE (trueop1
) == CONST_INT
2342 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
)
2344 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2345 return simplify_gen_binary (IOR
, mode
,
2346 simplify_gen_binary (AND
, mode
,
2347 XEXP (op0
, 0), op1
),
2348 gen_int_mode (tmp
, mode
));
2351 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2352 insn (and may simplify more). */
2353 if (GET_CODE (op0
) == XOR
2354 && rtx_equal_p (XEXP (op0
, 0), op1
)
2355 && ! side_effects_p (op1
))
2356 return simplify_gen_binary (AND
, mode
,
2357 simplify_gen_unary (NOT
, mode
,
2358 XEXP (op0
, 1), mode
),
2361 if (GET_CODE (op0
) == XOR
2362 && rtx_equal_p (XEXP (op0
, 1), op1
)
2363 && ! side_effects_p (op1
))
2364 return simplify_gen_binary (AND
, mode
,
2365 simplify_gen_unary (NOT
, mode
,
2366 XEXP (op0
, 0), mode
),
2369 /* Similarly for (~(A ^ B)) & A. */
2370 if (GET_CODE (op0
) == NOT
2371 && GET_CODE (XEXP (op0
, 0)) == XOR
2372 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2373 && ! side_effects_p (op1
))
2374 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2376 if (GET_CODE (op0
) == NOT
2377 && GET_CODE (XEXP (op0
, 0)) == XOR
2378 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2379 && ! side_effects_p (op1
))
2380 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2382 /* Convert (A | B) & A to A. */
2383 if (GET_CODE (op0
) == IOR
2384 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2385 || rtx_equal_p (XEXP (op0
, 1), op1
))
2386 && ! side_effects_p (XEXP (op0
, 0))
2387 && ! side_effects_p (XEXP (op0
, 1)))
2390 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2391 ((A & N) + B) & M -> (A + B) & M
2392 Similarly if (N & M) == 0,
2393 ((A | N) + B) & M -> (A + B) & M
2394 and for - instead of + and/or ^ instead of |.
2395 Also, if (N & M) == 0, then
2396 (A +- N) & M -> A & M. */
2397 if (GET_CODE (trueop1
) == CONST_INT
2398 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2399 && ~INTVAL (trueop1
)
2400 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
2401 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2406 pmop
[0] = XEXP (op0
, 0);
2407 pmop
[1] = XEXP (op0
, 1);
2409 if (GET_CODE (pmop
[1]) == CONST_INT
2410 && (INTVAL (pmop
[1]) & INTVAL (trueop1
)) == 0)
2411 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2413 for (which
= 0; which
< 2; which
++)
2416 switch (GET_CODE (tem
))
2419 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2420 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
2421 == INTVAL (trueop1
))
2422 pmop
[which
] = XEXP (tem
, 0);
2426 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2427 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
2428 pmop
[which
] = XEXP (tem
, 0);
2435 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2437 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2439 return simplify_gen_binary (code
, mode
, tem
, op1
);
2443 /* (and X (ior (not X) Y) -> (and X Y) */
2444 if (GET_CODE (op1
) == IOR
2445 && GET_CODE (XEXP (op1
, 0)) == NOT
2446 && op0
== XEXP (XEXP (op1
, 0), 0))
2447 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2449 /* (and (ior (not X) Y) X) -> (and X Y) */
2450 if (GET_CODE (op0
) == IOR
2451 && GET_CODE (XEXP (op0
, 0)) == NOT
2452 && op1
== XEXP (XEXP (op0
, 0), 0))
2453 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2455 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2461 /* 0/x is 0 (or x&0 if x has side-effects). */
2462 if (trueop0
== CONST0_RTX (mode
))
2464 if (side_effects_p (op1
))
2465 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2469 if (trueop1
== CONST1_RTX (mode
))
2470 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2471 /* Convert divide by power of two into shift. */
2472 if (GET_CODE (trueop1
) == CONST_INT
2473 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
2474 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2478 /* Handle floating point and integers separately. */
2479 if (SCALAR_FLOAT_MODE_P (mode
))
2481 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2482 safe for modes with NaNs, since 0.0 / 0.0 will then be
2483 NaN rather than 0.0. Nor is it safe for modes with signed
2484 zeros, since dividing 0 by a negative number gives -0.0 */
2485 if (trueop0
== CONST0_RTX (mode
)
2486 && !HONOR_NANS (mode
)
2487 && !HONOR_SIGNED_ZEROS (mode
)
2488 && ! side_effects_p (op1
))
2491 if (trueop1
== CONST1_RTX (mode
)
2492 && !HONOR_SNANS (mode
))
2495 if (GET_CODE (trueop1
) == CONST_DOUBLE
2496 && trueop1
!= CONST0_RTX (mode
))
2499 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2502 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2503 && !HONOR_SNANS (mode
))
2504 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2506 /* Change FP division by a constant into multiplication.
2507 Only do this with -freciprocal-math. */
2508 if (flag_reciprocal_math
2509 && !REAL_VALUES_EQUAL (d
, dconst0
))
2511 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2512 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2513 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2519 /* 0/x is 0 (or x&0 if x has side-effects). */
2520 if (trueop0
== CONST0_RTX (mode
))
2522 if (side_effects_p (op1
))
2523 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2527 if (trueop1
== CONST1_RTX (mode
))
2528 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2530 if (trueop1
== constm1_rtx
)
2532 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2533 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2539 /* 0%x is 0 (or x&0 if x has side-effects). */
2540 if (trueop0
== CONST0_RTX (mode
))
2542 if (side_effects_p (op1
))
2543 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2546 /* x%1 is 0 (of x&0 if x has side-effects). */
2547 if (trueop1
== CONST1_RTX (mode
))
2549 if (side_effects_p (op0
))
2550 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2551 return CONST0_RTX (mode
);
2553 /* Implement modulus by power of two as AND. */
2554 if (GET_CODE (trueop1
) == CONST_INT
2555 && exact_log2 (INTVAL (trueop1
)) > 0)
2556 return simplify_gen_binary (AND
, mode
, op0
,
2557 GEN_INT (INTVAL (op1
) - 1));
2561 /* 0%x is 0 (or x&0 if x has side-effects). */
2562 if (trueop0
== CONST0_RTX (mode
))
2564 if (side_effects_p (op1
))
2565 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2568 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2569 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
2571 if (side_effects_p (op0
))
2572 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2573 return CONST0_RTX (mode
);
2580 if (trueop1
== CONST0_RTX (mode
))
2582 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2584 /* Rotating ~0 always results in ~0. */
2585 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
2586 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2587 && ! side_effects_p (op1
))
2590 if (SHIFT_COUNT_TRUNCATED
&& GET_CODE (op1
) == CONST_INT
)
2592 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
2593 if (val
!= INTVAL (op1
))
2594 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
2601 if (trueop1
== CONST0_RTX (mode
))
2603 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2605 goto canonicalize_shift
;
2608 if (trueop1
== CONST0_RTX (mode
))
2610 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2612 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2613 if (GET_CODE (op0
) == CLZ
2614 && GET_CODE (trueop1
) == CONST_INT
2615 && STORE_FLAG_VALUE
== 1
2616 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
2618 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2619 unsigned HOST_WIDE_INT zero_val
= 0;
2621 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
2622 && zero_val
== GET_MODE_BITSIZE (imode
)
2623 && INTVAL (trueop1
) == exact_log2 (zero_val
))
2624 return simplify_gen_relational (EQ
, mode
, imode
,
2625 XEXP (op0
, 0), const0_rtx
);
2627 goto canonicalize_shift
;
2630 if (width
<= HOST_BITS_PER_WIDE_INT
2631 && GET_CODE (trueop1
) == CONST_INT
2632 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2633 && ! side_effects_p (op0
))
2635 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2637 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2643 if (width
<= HOST_BITS_PER_WIDE_INT
2644 && GET_CODE (trueop1
) == CONST_INT
2645 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2646 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2647 && ! side_effects_p (op0
))
2649 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2651 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2657 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2659 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2661 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2667 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2669 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2671 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2684 /* ??? There are simplifications that can be done. */
2688 if (!VECTOR_MODE_P (mode
))
2690 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2691 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2692 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2693 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2694 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
2696 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2697 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2700 /* Extract a scalar element from a nested VEC_SELECT expression
2701 (with optional nested VEC_CONCAT expression). Some targets
2702 (i386) extract scalar element from a vector using chain of
2703 nested VEC_SELECT expressions. When input operand is a memory
2704 operand, this operation can be simplified to a simple scalar
2705 load from an offseted memory address. */
2706 if (GET_CODE (trueop0
) == VEC_SELECT
)
2708 rtx op0
= XEXP (trueop0
, 0);
2709 rtx op1
= XEXP (trueop0
, 1);
2711 enum machine_mode opmode
= GET_MODE (op0
);
2712 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
2713 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
2715 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
2721 gcc_assert (GET_CODE (op1
) == PARALLEL
);
2722 gcc_assert (i
< n_elts
);
2724 /* Select element, pointed by nested selector. */
2725 elem
= INTVAL (XVECEXP (op1
, 0, i
));
2727 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2728 if (GET_CODE (op0
) == VEC_CONCAT
)
2730 rtx op00
= XEXP (op0
, 0);
2731 rtx op01
= XEXP (op0
, 1);
2733 enum machine_mode mode00
, mode01
;
2734 int n_elts00
, n_elts01
;
2736 mode00
= GET_MODE (op00
);
2737 mode01
= GET_MODE (op01
);
2739 /* Find out number of elements of each operand. */
2740 if (VECTOR_MODE_P (mode00
))
2742 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
2743 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
2748 if (VECTOR_MODE_P (mode01
))
2750 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
2751 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
2756 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
2758 /* Select correct operand of VEC_CONCAT
2759 and adjust selector. */
2760 if (elem
< n_elts01
)
2771 vec
= rtvec_alloc (1);
2772 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
2774 tmp
= gen_rtx_fmt_ee (code
, mode
,
2775 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
2781 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2782 gcc_assert (GET_MODE_INNER (mode
)
2783 == GET_MODE_INNER (GET_MODE (trueop0
)));
2784 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2786 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2788 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2789 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2790 rtvec v
= rtvec_alloc (n_elts
);
2793 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2794 for (i
= 0; i
< n_elts
; i
++)
2796 rtx x
= XVECEXP (trueop1
, 0, i
);
2798 gcc_assert (GET_CODE (x
) == CONST_INT
);
2799 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2803 return gen_rtx_CONST_VECTOR (mode
, v
);
2807 if (XVECLEN (trueop1
, 0) == 1
2808 && GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
2809 && GET_CODE (trueop0
) == VEC_CONCAT
)
2812 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
2814 /* Try to find the element in the VEC_CONCAT. */
2815 while (GET_MODE (vec
) != mode
2816 && GET_CODE (vec
) == VEC_CONCAT
)
2818 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
2819 if (offset
< vec_size
)
2820 vec
= XEXP (vec
, 0);
2824 vec
= XEXP (vec
, 1);
2826 vec
= avoid_constant_pool_reference (vec
);
2829 if (GET_MODE (vec
) == mode
)
2836 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2837 ? GET_MODE (trueop0
)
2838 : GET_MODE_INNER (mode
));
2839 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2840 ? GET_MODE (trueop1
)
2841 : GET_MODE_INNER (mode
));
2843 gcc_assert (VECTOR_MODE_P (mode
));
2844 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2845 == GET_MODE_SIZE (mode
));
2847 if (VECTOR_MODE_P (op0_mode
))
2848 gcc_assert (GET_MODE_INNER (mode
)
2849 == GET_MODE_INNER (op0_mode
));
2851 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2853 if (VECTOR_MODE_P (op1_mode
))
2854 gcc_assert (GET_MODE_INNER (mode
)
2855 == GET_MODE_INNER (op1_mode
));
2857 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2859 if ((GET_CODE (trueop0
) == CONST_VECTOR
2860 || GET_CODE (trueop0
) == CONST_INT
2861 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2862 && (GET_CODE (trueop1
) == CONST_VECTOR
2863 || GET_CODE (trueop1
) == CONST_INT
2864 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2866 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2867 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2868 rtvec v
= rtvec_alloc (n_elts
);
2870 unsigned in_n_elts
= 1;
2872 if (VECTOR_MODE_P (op0_mode
))
2873 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2874 for (i
= 0; i
< n_elts
; i
++)
2878 if (!VECTOR_MODE_P (op0_mode
))
2879 RTVEC_ELT (v
, i
) = trueop0
;
2881 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2885 if (!VECTOR_MODE_P (op1_mode
))
2886 RTVEC_ELT (v
, i
) = trueop1
;
2888 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2893 return gen_rtx_CONST_VECTOR (mode
, v
);
2906 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2909 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
2911 unsigned int width
= GET_MODE_BITSIZE (mode
);
2913 if (VECTOR_MODE_P (mode
)
2914 && code
!= VEC_CONCAT
2915 && GET_CODE (op0
) == CONST_VECTOR
2916 && GET_CODE (op1
) == CONST_VECTOR
)
2918 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2919 enum machine_mode op0mode
= GET_MODE (op0
);
2920 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
2921 enum machine_mode op1mode
= GET_MODE (op1
);
2922 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
2923 rtvec v
= rtvec_alloc (n_elts
);
2926 gcc_assert (op0_n_elts
== n_elts
);
2927 gcc_assert (op1_n_elts
== n_elts
);
2928 for (i
= 0; i
< n_elts
; i
++)
2930 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
2931 CONST_VECTOR_ELT (op0
, i
),
2932 CONST_VECTOR_ELT (op1
, i
));
2935 RTVEC_ELT (v
, i
) = x
;
2938 return gen_rtx_CONST_VECTOR (mode
, v
);
2941 if (VECTOR_MODE_P (mode
)
2942 && code
== VEC_CONCAT
2943 && (CONST_INT_P (op0
)
2944 || GET_CODE (op0
) == CONST_DOUBLE
2945 || GET_CODE (op0
) == CONST_FIXED
)
2946 && (CONST_INT_P (op1
)
2947 || GET_CODE (op1
) == CONST_DOUBLE
2948 || GET_CODE (op1
) == CONST_FIXED
))
2950 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2951 rtvec v
= rtvec_alloc (n_elts
);
2953 gcc_assert (n_elts
>= 2);
2956 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
2957 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
2959 RTVEC_ELT (v
, 0) = op0
;
2960 RTVEC_ELT (v
, 1) = op1
;
2964 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
2965 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
2968 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
2969 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
2970 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
2972 for (i
= 0; i
< op0_n_elts
; ++i
)
2973 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
2974 for (i
= 0; i
< op1_n_elts
; ++i
)
2975 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
2978 return gen_rtx_CONST_VECTOR (mode
, v
);
2981 if (SCALAR_FLOAT_MODE_P (mode
)
2982 && GET_CODE (op0
) == CONST_DOUBLE
2983 && GET_CODE (op1
) == CONST_DOUBLE
2984 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
2995 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
2997 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
2999 for (i
= 0; i
< 4; i
++)
3016 real_from_target (&r
, tmp0
, mode
);
3017 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3021 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3024 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3025 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3026 real_convert (&f0
, mode
, &f0
);
3027 real_convert (&f1
, mode
, &f1
);
3029 if (HONOR_SNANS (mode
)
3030 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3034 && REAL_VALUES_EQUAL (f1
, dconst0
)
3035 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3038 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3039 && flag_trapping_math
3040 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3042 int s0
= REAL_VALUE_NEGATIVE (f0
);
3043 int s1
= REAL_VALUE_NEGATIVE (f1
);
3048 /* Inf + -Inf = NaN plus exception. */
3053 /* Inf - Inf = NaN plus exception. */
3058 /* Inf / Inf = NaN plus exception. */
3065 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3066 && flag_trapping_math
3067 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3068 || (REAL_VALUE_ISINF (f1
)
3069 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3070 /* Inf * 0 = NaN plus exception. */
3073 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3075 real_convert (&result
, mode
, &value
);
3077 /* Don't constant fold this floating point operation if
3078 the result has overflowed and flag_trapping_math. */
3080 if (flag_trapping_math
3081 && MODE_HAS_INFINITIES (mode
)
3082 && REAL_VALUE_ISINF (result
)
3083 && !REAL_VALUE_ISINF (f0
)
3084 && !REAL_VALUE_ISINF (f1
))
3085 /* Overflow plus exception. */
3088 /* Don't constant fold this floating point operation if the
3089 result may dependent upon the run-time rounding mode and
3090 flag_rounding_math is set, or if GCC's software emulation
3091 is unable to accurately represent the result. */
3093 if ((flag_rounding_math
3094 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3095 && (inexact
|| !real_identical (&result
, &value
)))
3098 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3102 /* We can fold some multi-word operations. */
3103 if (GET_MODE_CLASS (mode
) == MODE_INT
3104 && width
== HOST_BITS_PER_WIDE_INT
* 2
3105 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
3106 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
3108 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
3109 HOST_WIDE_INT h1
, h2
, hv
, ht
;
3111 if (GET_CODE (op0
) == CONST_DOUBLE
)
3112 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
3114 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
3116 if (GET_CODE (op1
) == CONST_DOUBLE
)
3117 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
3119 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
3124 /* A - B == A + (-B). */
3125 neg_double (l2
, h2
, &lv
, &hv
);
3128 /* Fall through.... */
3131 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3135 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3139 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3140 &lv
, &hv
, <
, &ht
))
3145 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3146 <
, &ht
, &lv
, &hv
))
3151 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3152 &lv
, &hv
, <
, &ht
))
3157 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3158 <
, &ht
, &lv
, &hv
))
3163 lv
= l1
& l2
, hv
= h1
& h2
;
3167 lv
= l1
| l2
, hv
= h1
| h2
;
3171 lv
= l1
^ l2
, hv
= h1
^ h2
;
3177 && ((unsigned HOST_WIDE_INT
) l1
3178 < (unsigned HOST_WIDE_INT
) l2
)))
3187 && ((unsigned HOST_WIDE_INT
) l1
3188 > (unsigned HOST_WIDE_INT
) l2
)))
3195 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
3197 && ((unsigned HOST_WIDE_INT
) l1
3198 < (unsigned HOST_WIDE_INT
) l2
)))
3205 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
3207 && ((unsigned HOST_WIDE_INT
) l1
3208 > (unsigned HOST_WIDE_INT
) l2
)))
3214 case LSHIFTRT
: case ASHIFTRT
:
3216 case ROTATE
: case ROTATERT
:
3217 if (SHIFT_COUNT_TRUNCATED
)
3218 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
3220 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
3223 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3224 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
3226 else if (code
== ASHIFT
)
3227 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
3228 else if (code
== ROTATE
)
3229 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3230 else /* code == ROTATERT */
3231 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3238 return immed_double_const (lv
, hv
, mode
);
3241 if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
3242 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3244 /* Get the integer argument values in two forms:
3245 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3247 arg0
= INTVAL (op0
);
3248 arg1
= INTVAL (op1
);
3250 if (width
< HOST_BITS_PER_WIDE_INT
)
3252 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3253 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3256 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3257 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3260 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3261 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3269 /* Compute the value of the arithmetic. */
3274 val
= arg0s
+ arg1s
;
3278 val
= arg0s
- arg1s
;
3282 val
= arg0s
* arg1s
;
3287 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3290 val
= arg0s
/ arg1s
;
3295 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3298 val
= arg0s
% arg1s
;
3303 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3306 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3311 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3314 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3332 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3333 the value is in range. We can't return any old value for
3334 out-of-range arguments because either the middle-end (via
3335 shift_truncation_mask) or the back-end might be relying on
3336 target-specific knowledge. Nor can we rely on
3337 shift_truncation_mask, since the shift might not be part of an
3338 ashlM3, lshrM3 or ashrM3 instruction. */
3339 if (SHIFT_COUNT_TRUNCATED
)
3340 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3341 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3344 val
= (code
== ASHIFT
3345 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3346 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3348 /* Sign-extend the result for arithmetic right shifts. */
3349 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3350 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
3358 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3359 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3367 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3368 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3372 /* Do nothing here. */
3376 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3380 val
= ((unsigned HOST_WIDE_INT
) arg0
3381 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3385 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3389 val
= ((unsigned HOST_WIDE_INT
) arg0
3390 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3403 /* ??? There are simplifications that can be done. */
3410 return gen_int_mode (val
, mode
);
3418 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3421 Rather than test for specific case, we do this by a brute-force method
3422 and do all possible simplifications until no more changes occur. Then
3423 we rebuild the operation. */
3425 struct simplify_plus_minus_op_data
3432 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3436 result
= (commutative_operand_precedence (y
)
3437 - commutative_operand_precedence (x
));
3441 /* Group together equal REGs to do more simplification. */
3442 if (REG_P (x
) && REG_P (y
))
3443 return REGNO (x
) > REGNO (y
);
3449 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3452 struct simplify_plus_minus_op_data ops
[8];
3454 int n_ops
= 2, input_ops
= 2;
3455 int changed
, n_constants
= 0, canonicalized
= 0;
3458 memset (ops
, 0, sizeof ops
);
3460 /* Set up the two operands and then expand them until nothing has been
3461 changed. If we run out of room in our array, give up; this should
3462 almost never happen. */
3467 ops
[1].neg
= (code
== MINUS
);
3473 for (i
= 0; i
< n_ops
; i
++)
3475 rtx this_op
= ops
[i
].op
;
3476 int this_neg
= ops
[i
].neg
;
3477 enum rtx_code this_code
= GET_CODE (this_op
);
3486 ops
[n_ops
].op
= XEXP (this_op
, 1);
3487 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3490 ops
[i
].op
= XEXP (this_op
, 0);
3493 canonicalized
|= this_neg
;
3497 ops
[i
].op
= XEXP (this_op
, 0);
3498 ops
[i
].neg
= ! this_neg
;
3505 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3506 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3507 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3509 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3510 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3511 ops
[n_ops
].neg
= this_neg
;
3519 /* ~a -> (-a - 1) */
3522 ops
[n_ops
].op
= constm1_rtx
;
3523 ops
[n_ops
++].neg
= this_neg
;
3524 ops
[i
].op
= XEXP (this_op
, 0);
3525 ops
[i
].neg
= !this_neg
;
3535 ops
[i
].op
= neg_const_int (mode
, this_op
);
3549 if (n_constants
> 1)
3552 gcc_assert (n_ops
>= 2);
3554 /* If we only have two operands, we can avoid the loops. */
3557 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3560 /* Get the two operands. Be careful with the order, especially for
3561 the cases where code == MINUS. */
3562 if (ops
[0].neg
&& ops
[1].neg
)
3564 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3567 else if (ops
[0].neg
)
3578 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
3581 /* Now simplify each pair of operands until nothing changes. */
3584 /* Insertion sort is good enough for an eight-element array. */
3585 for (i
= 1; i
< n_ops
; i
++)
3587 struct simplify_plus_minus_op_data save
;
3589 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
3595 ops
[j
+ 1] = ops
[j
];
3596 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
3601 for (i
= n_ops
- 1; i
> 0; i
--)
3602 for (j
= i
- 1; j
>= 0; j
--)
3604 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
3605 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
3607 if (lhs
!= 0 && rhs
!= 0)
3609 enum rtx_code ncode
= PLUS
;
3615 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3617 else if (swap_commutative_operands_p (lhs
, rhs
))
3618 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3620 if ((GET_CODE (lhs
) == CONST
|| GET_CODE (lhs
) == CONST_INT
)
3621 && (GET_CODE (rhs
) == CONST
|| GET_CODE (rhs
) == CONST_INT
))
3623 rtx tem_lhs
, tem_rhs
;
3625 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
3626 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
3627 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
3629 if (tem
&& !CONSTANT_P (tem
))
3630 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
3633 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
3635 /* Reject "simplifications" that just wrap the two
3636 arguments in a CONST. Failure to do so can result
3637 in infinite recursion with simplify_binary_operation
3638 when it calls us to simplify CONST operations. */
3640 && ! (GET_CODE (tem
) == CONST
3641 && GET_CODE (XEXP (tem
, 0)) == ncode
3642 && XEXP (XEXP (tem
, 0), 0) == lhs
3643 && XEXP (XEXP (tem
, 0), 1) == rhs
))
3646 if (GET_CODE (tem
) == NEG
)
3647 tem
= XEXP (tem
, 0), lneg
= !lneg
;
3648 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
3649 tem
= neg_const_int (mode
, tem
), lneg
= 0;
3653 ops
[j
].op
= NULL_RTX
;
3660 /* If nothing changed, fail. */
3664 /* Pack all the operands to the lower-numbered entries. */
3665 for (i
= 0, j
= 0; j
< n_ops
; j
++)
3675 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3677 && GET_CODE (ops
[1].op
) == CONST_INT
3678 && CONSTANT_P (ops
[0].op
)
3680 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
3682 /* We suppressed creation of trivial CONST expressions in the
3683 combination loop to avoid recursion. Create one manually now.
3684 The combination loop should have ensured that there is exactly
3685 one CONST_INT, and the sort will have ensured that it is last
3686 in the array and that any other constant will be next-to-last. */
3689 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
3690 && CONSTANT_P (ops
[n_ops
- 2].op
))
3692 rtx value
= ops
[n_ops
- 1].op
;
3693 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
3694 value
= neg_const_int (mode
, value
);
3695 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
3699 /* Put a non-negated operand first, if possible. */
3701 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
3704 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
3713 /* Now make the result by performing the requested operations. */
3715 for (i
= 1; i
< n_ops
; i
++)
3716 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
3717 mode
, result
, ops
[i
].op
);
3722 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3724 plus_minus_operand_p (const_rtx x
)
3726 return GET_CODE (x
) == PLUS
3727 || GET_CODE (x
) == MINUS
3728 || (GET_CODE (x
) == CONST
3729 && GET_CODE (XEXP (x
, 0)) == PLUS
3730 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
3731 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
3734 /* Like simplify_binary_operation except used for relational operators.
3735 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3736 not also be VOIDmode.
3738 CMP_MODE specifies in which mode the comparison is done in, so it is
3739 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3740 the operands or, if both are VOIDmode, the operands are compared in
3741 "infinite precision". */
3743 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
3744 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3746 rtx tem
, trueop0
, trueop1
;
3748 if (cmp_mode
== VOIDmode
)
3749 cmp_mode
= GET_MODE (op0
);
3750 if (cmp_mode
== VOIDmode
)
3751 cmp_mode
= GET_MODE (op1
);
3753 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
3756 if (SCALAR_FLOAT_MODE_P (mode
))
3758 if (tem
== const0_rtx
)
3759 return CONST0_RTX (mode
);
3760 #ifdef FLOAT_STORE_FLAG_VALUE
3762 REAL_VALUE_TYPE val
;
3763 val
= FLOAT_STORE_FLAG_VALUE (mode
);
3764 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
3770 if (VECTOR_MODE_P (mode
))
3772 if (tem
== const0_rtx
)
3773 return CONST0_RTX (mode
);
3774 #ifdef VECTOR_STORE_FLAG_VALUE
3779 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
3780 if (val
== NULL_RTX
)
3782 if (val
== const1_rtx
)
3783 return CONST1_RTX (mode
);
3785 units
= GET_MODE_NUNITS (mode
);
3786 v
= rtvec_alloc (units
);
3787 for (i
= 0; i
< units
; i
++)
3788 RTVEC_ELT (v
, i
) = val
;
3789 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
3799 /* For the following tests, ensure const0_rtx is op1. */
3800 if (swap_commutative_operands_p (op0
, op1
)
3801 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
3802 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
3804 /* If op0 is a compare, extract the comparison arguments from it. */
3805 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3806 return simplify_gen_relational (code
, mode
, VOIDmode
,
3807 XEXP (op0
, 0), XEXP (op0
, 1));
3809 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
3813 trueop0
= avoid_constant_pool_reference (op0
);
3814 trueop1
= avoid_constant_pool_reference (op1
);
3815 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
3819 /* This part of simplify_relational_operation is only used when CMP_MODE
3820 is not in class MODE_CC (i.e. it is a real comparison).
3822 MODE is the mode of the result, while CMP_MODE specifies in which
3823 mode the comparison is done in, so it is the mode of the operands. */
3826 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
3827 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3829 enum rtx_code op0code
= GET_CODE (op0
);
3831 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
3833 /* If op0 is a comparison, extract the comparison arguments
3837 if (GET_MODE (op0
) == mode
)
3838 return simplify_rtx (op0
);
3840 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
3841 XEXP (op0
, 0), XEXP (op0
, 1));
3843 else if (code
== EQ
)
3845 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
3846 if (new_code
!= UNKNOWN
)
3847 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
3848 XEXP (op0
, 0), XEXP (op0
, 1));
3852 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
3853 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
3854 if ((code
== LTU
|| code
== GEU
)
3855 && GET_CODE (op0
) == PLUS
3856 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
3857 && (rtx_equal_p (op1
, XEXP (op0
, 0))
3858 || rtx_equal_p (op1
, XEXP (op0
, 1))))
3861 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
3862 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
3863 cmp_mode
, XEXP (op0
, 0), new_cmp
);
3866 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
3867 if ((code
== LTU
|| code
== GEU
)
3868 && GET_CODE (op0
) == PLUS
3869 && rtx_equal_p (op1
, XEXP (op0
, 1))
3870 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
3871 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
3872 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
, XEXP (op0
, 0));
3874 if (op1
== const0_rtx
)
3876 /* Canonicalize (GTU x 0) as (NE x 0). */
3878 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
3879 /* Canonicalize (LEU x 0) as (EQ x 0). */
3881 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
3883 else if (op1
== const1_rtx
)
3888 /* Canonicalize (GE x 1) as (GT x 0). */
3889 return simplify_gen_relational (GT
, mode
, cmp_mode
,
3892 /* Canonicalize (GEU x 1) as (NE x 0). */
3893 return simplify_gen_relational (NE
, mode
, cmp_mode
,
3896 /* Canonicalize (LT x 1) as (LE x 0). */
3897 return simplify_gen_relational (LE
, mode
, cmp_mode
,
3900 /* Canonicalize (LTU x 1) as (EQ x 0). */
3901 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
3907 else if (op1
== constm1_rtx
)
3909 /* Canonicalize (LE x -1) as (LT x 0). */
3911 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
3912 /* Canonicalize (GT x -1) as (GE x 0). */
3914 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
3917 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3918 if ((code
== EQ
|| code
== NE
)
3919 && (op0code
== PLUS
|| op0code
== MINUS
)
3921 && CONSTANT_P (XEXP (op0
, 1))
3922 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
3924 rtx x
= XEXP (op0
, 0);
3925 rtx c
= XEXP (op0
, 1);
3927 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
3929 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
3932 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3933 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3935 && op1
== const0_rtx
3936 && GET_MODE_CLASS (mode
) == MODE_INT
3937 && cmp_mode
!= VOIDmode
3938 /* ??? Work-around BImode bugs in the ia64 backend. */
3940 && cmp_mode
!= BImode
3941 && nonzero_bits (op0
, cmp_mode
) == 1
3942 && STORE_FLAG_VALUE
== 1)
3943 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
3944 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
3945 : lowpart_subreg (mode
, op0
, cmp_mode
);
3947 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3948 if ((code
== EQ
|| code
== NE
)
3949 && op1
== const0_rtx
3951 return simplify_gen_relational (code
, mode
, cmp_mode
,
3952 XEXP (op0
, 0), XEXP (op0
, 1));
3954 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3955 if ((code
== EQ
|| code
== NE
)
3957 && rtx_equal_p (XEXP (op0
, 0), op1
)
3958 && !side_effects_p (XEXP (op0
, 0)))
3959 return simplify_gen_relational (code
, mode
, cmp_mode
,
3960 XEXP (op0
, 1), const0_rtx
);
3962 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3963 if ((code
== EQ
|| code
== NE
)
3965 && rtx_equal_p (XEXP (op0
, 1), op1
)
3966 && !side_effects_p (XEXP (op0
, 1)))
3967 return simplify_gen_relational (code
, mode
, cmp_mode
,
3968 XEXP (op0
, 0), const0_rtx
);
3970 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3971 if ((code
== EQ
|| code
== NE
)
3973 && (GET_CODE (op1
) == CONST_INT
3974 || GET_CODE (op1
) == CONST_DOUBLE
)
3975 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
3976 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
3977 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
3978 simplify_gen_binary (XOR
, cmp_mode
,
3979 XEXP (op0
, 1), op1
));
3981 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
3987 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3988 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
3989 XEXP (op0
, 0), const0_rtx
);
3994 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3995 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
3996 XEXP (op0
, 0), const0_rtx
);
4015 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4016 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4017 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4018 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4019 For floating-point comparisons, assume that the operands were ordered. */
4022 comparison_result (enum rtx_code code
, int known_results
)
4028 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4031 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4035 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4038 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4042 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4045 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4048 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4050 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4053 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4055 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4058 return const_true_rtx
;
4066 /* Check if the given comparison (done in the given MODE) is actually a
4067 tautology or a contradiction.
4068 If no simplification is possible, this function returns zero.
4069 Otherwise, it returns either const_true_rtx or const0_rtx. */
4072 simplify_const_relational_operation (enum rtx_code code
,
4073 enum machine_mode mode
,
4080 gcc_assert (mode
!= VOIDmode
4081 || (GET_MODE (op0
) == VOIDmode
4082 && GET_MODE (op1
) == VOIDmode
));
4084 /* If op0 is a compare, extract the comparison arguments from it. */
4085 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4087 op1
= XEXP (op0
, 1);
4088 op0
= XEXP (op0
, 0);
4090 if (GET_MODE (op0
) != VOIDmode
)
4091 mode
= GET_MODE (op0
);
4092 else if (GET_MODE (op1
) != VOIDmode
)
4093 mode
= GET_MODE (op1
);
4098 /* We can't simplify MODE_CC values since we don't know what the
4099 actual comparison is. */
4100 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4103 /* Make sure the constant is second. */
4104 if (swap_commutative_operands_p (op0
, op1
))
4106 tem
= op0
, op0
= op1
, op1
= tem
;
4107 code
= swap_condition (code
);
4110 trueop0
= avoid_constant_pool_reference (op0
);
4111 trueop1
= avoid_constant_pool_reference (op1
);
4113 /* For integer comparisons of A and B maybe we can simplify A - B and can
4114 then simplify a comparison of that with zero. If A and B are both either
4115 a register or a CONST_INT, this can't help; testing for these cases will
4116 prevent infinite recursion here and speed things up.
4118 We can only do this for EQ and NE comparisons as otherwise we may
4119 lose or introduce overflow which we cannot disregard as undefined as
4120 we do not know the signedness of the operation on either the left or
4121 the right hand side of the comparison. */
4123 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4124 && (code
== EQ
|| code
== NE
)
4125 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
4126 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
4127 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4128 /* We cannot do this if tem is a nonzero address. */
4129 && ! nonzero_address_p (tem
))
4130 return simplify_const_relational_operation (signed_condition (code
),
4131 mode
, tem
, const0_rtx
);
4133 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4134 return const_true_rtx
;
4136 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4139 /* For modes without NaNs, if the two operands are equal, we know the
4140 result except if they have side-effects. Even with NaNs we know
4141 the result of unordered comparisons and, if signaling NaNs are
4142 irrelevant, also the result of LT/GT/LTGT. */
4143 if ((! HONOR_NANS (GET_MODE (trueop0
))
4144 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4145 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4146 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4147 && rtx_equal_p (trueop0
, trueop1
)
4148 && ! side_effects_p (trueop0
))
4149 return comparison_result (code
, CMP_EQ
);
4151 /* If the operands are floating-point constants, see if we can fold
4153 if (GET_CODE (trueop0
) == CONST_DOUBLE
4154 && GET_CODE (trueop1
) == CONST_DOUBLE
4155 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4157 REAL_VALUE_TYPE d0
, d1
;
4159 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4160 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4162 /* Comparisons are unordered iff at least one of the values is NaN. */
4163 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4173 return const_true_rtx
;
4186 return comparison_result (code
,
4187 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4188 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4191 /* Otherwise, see if the operands are both integers. */
4192 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4193 && (GET_CODE (trueop0
) == CONST_DOUBLE
4194 || GET_CODE (trueop0
) == CONST_INT
)
4195 && (GET_CODE (trueop1
) == CONST_DOUBLE
4196 || GET_CODE (trueop1
) == CONST_INT
))
4198 int width
= GET_MODE_BITSIZE (mode
);
4199 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4200 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4202 /* Get the two words comprising each integer constant. */
4203 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
4205 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4206 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4210 l0u
= l0s
= INTVAL (trueop0
);
4211 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4214 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
4216 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4217 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4221 l1u
= l1s
= INTVAL (trueop1
);
4222 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4225 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4226 we have to sign or zero-extend the values. */
4227 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4229 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4230 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4232 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4233 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
4235 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4236 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
4238 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4239 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4241 if (h0u
== h1u
&& l0u
== l1u
)
4242 return comparison_result (code
, CMP_EQ
);
4246 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4247 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4248 return comparison_result (code
, cr
);
4252 /* Optimize comparisons with upper and lower bounds. */
4253 if (SCALAR_INT_MODE_P (mode
)
4254 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
4255 && GET_CODE (trueop1
) == CONST_INT
)
4258 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4259 HOST_WIDE_INT val
= INTVAL (trueop1
);
4260 HOST_WIDE_INT mmin
, mmax
;
4270 /* Get a reduced range if the sign bit is zero. */
4271 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4278 rtx mmin_rtx
, mmax_rtx
;
4279 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4281 mmin
= INTVAL (mmin_rtx
);
4282 mmax
= INTVAL (mmax_rtx
);
4285 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4287 mmin
>>= (sign_copies
- 1);
4288 mmax
>>= (sign_copies
- 1);
4294 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4296 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4297 return const_true_rtx
;
4298 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4303 return const_true_rtx
;
4308 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4310 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4311 return const_true_rtx
;
4312 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4317 return const_true_rtx
;
4323 /* x == y is always false for y out of range. */
4324 if (val
< mmin
|| val
> mmax
)
4328 /* x > y is always false for y >= mmax, always true for y < mmin. */
4330 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4332 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4333 return const_true_rtx
;
4339 return const_true_rtx
;
4342 /* x < y is always false for y <= mmin, always true for y > mmax. */
4344 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4346 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4347 return const_true_rtx
;
4353 return const_true_rtx
;
4357 /* x != y is always true for y out of range. */
4358 if (val
< mmin
|| val
> mmax
)
4359 return const_true_rtx
;
4367 /* Optimize integer comparisons with zero. */
4368 if (trueop1
== const0_rtx
)
4370 /* Some addresses are known to be nonzero. We don't know
4371 their sign, but equality comparisons are known. */
4372 if (nonzero_address_p (trueop0
))
4374 if (code
== EQ
|| code
== LEU
)
4376 if (code
== NE
|| code
== GTU
)
4377 return const_true_rtx
;
4380 /* See if the first operand is an IOR with a constant. If so, we
4381 may be able to determine the result of this comparison. */
4382 if (GET_CODE (op0
) == IOR
)
4384 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4385 if (GET_CODE (inner_const
) == CONST_INT
&& inner_const
!= const0_rtx
)
4387 int sign_bitnum
= GET_MODE_BITSIZE (mode
) - 1;
4388 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4389 && (INTVAL (inner_const
)
4390 & ((HOST_WIDE_INT
) 1 << sign_bitnum
)));
4399 return const_true_rtx
;
4403 return const_true_rtx
;
4417 /* Optimize comparison of ABS with zero. */
4418 if (trueop1
== CONST0_RTX (mode
)
4419 && (GET_CODE (trueop0
) == ABS
4420 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4421 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4426 /* Optimize abs(x) < 0.0. */
4427 if (!HONOR_SNANS (mode
)
4428 && (!INTEGRAL_MODE_P (mode
)
4429 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4431 if (INTEGRAL_MODE_P (mode
)
4432 && (issue_strict_overflow_warning
4433 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4434 warning (OPT_Wstrict_overflow
,
4435 ("assuming signed overflow does not occur when "
4436 "assuming abs (x) < 0 is false"));
4442 /* Optimize abs(x) >= 0.0. */
4443 if (!HONOR_NANS (mode
)
4444 && (!INTEGRAL_MODE_P (mode
)
4445 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4447 if (INTEGRAL_MODE_P (mode
)
4448 && (issue_strict_overflow_warning
4449 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4450 warning (OPT_Wstrict_overflow
,
4451 ("assuming signed overflow does not occur when "
4452 "assuming abs (x) >= 0 is true"));
4453 return const_true_rtx
;
4458 /* Optimize ! (abs(x) < 0.0). */
4459 return const_true_rtx
;
4469 /* Simplify CODE, an operation with result mode MODE and three operands,
4470 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4471 a constant. Return 0 if no simplifications is possible. */
4474 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4475 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4478 unsigned int width
= GET_MODE_BITSIZE (mode
);
4480 /* VOIDmode means "infinite" precision. */
4482 width
= HOST_BITS_PER_WIDE_INT
;
4488 if (GET_CODE (op0
) == CONST_INT
4489 && GET_CODE (op1
) == CONST_INT
4490 && GET_CODE (op2
) == CONST_INT
4491 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4492 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4494 /* Extracting a bit-field from a constant */
4495 HOST_WIDE_INT val
= INTVAL (op0
);
4497 if (BITS_BIG_ENDIAN
)
4498 val
>>= (GET_MODE_BITSIZE (op0_mode
)
4499 - INTVAL (op2
) - INTVAL (op1
));
4501 val
>>= INTVAL (op2
);
4503 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
4505 /* First zero-extend. */
4506 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
4507 /* If desired, propagate sign bit. */
4508 if (code
== SIGN_EXTRACT
4509 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
4510 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
4513 /* Clear the bits that don't belong in our mode,
4514 unless they and our sign bit are all one.
4515 So we get either a reasonable negative value or a reasonable
4516 unsigned value for this mode. */
4517 if (width
< HOST_BITS_PER_WIDE_INT
4518 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
4519 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
4520 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4522 return gen_int_mode (val
, mode
);
4527 if (GET_CODE (op0
) == CONST_INT
)
4528 return op0
!= const0_rtx
? op1
: op2
;
4530 /* Convert c ? a : a into "a". */
4531 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4534 /* Convert a != b ? a : b into "a". */
4535 if (GET_CODE (op0
) == NE
4536 && ! side_effects_p (op0
)
4537 && ! HONOR_NANS (mode
)
4538 && ! HONOR_SIGNED_ZEROS (mode
)
4539 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4540 && rtx_equal_p (XEXP (op0
, 1), op2
))
4541 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4542 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4545 /* Convert a == b ? a : b into "b". */
4546 if (GET_CODE (op0
) == EQ
4547 && ! side_effects_p (op0
)
4548 && ! HONOR_NANS (mode
)
4549 && ! HONOR_SIGNED_ZEROS (mode
)
4550 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4551 && rtx_equal_p (XEXP (op0
, 1), op2
))
4552 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4553 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4556 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
4558 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
4559 ? GET_MODE (XEXP (op0
, 1))
4560 : GET_MODE (XEXP (op0
, 0)));
4563 /* Look for happy constants in op1 and op2. */
4564 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
4566 HOST_WIDE_INT t
= INTVAL (op1
);
4567 HOST_WIDE_INT f
= INTVAL (op2
);
4569 if (t
== STORE_FLAG_VALUE
&& f
== 0)
4570 code
= GET_CODE (op0
);
4571 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
4574 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
4582 return simplify_gen_relational (code
, mode
, cmp_mode
,
4583 XEXP (op0
, 0), XEXP (op0
, 1));
4586 if (cmp_mode
== VOIDmode
)
4587 cmp_mode
= op0_mode
;
4588 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
4589 cmp_mode
, XEXP (op0
, 0),
4592 /* See if any simplifications were possible. */
4595 if (GET_CODE (temp
) == CONST_INT
)
4596 return temp
== const0_rtx
? op2
: op1
;
4598 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
4604 gcc_assert (GET_MODE (op0
) == mode
);
4605 gcc_assert (GET_MODE (op1
) == mode
);
4606 gcc_assert (VECTOR_MODE_P (mode
));
4607 op2
= avoid_constant_pool_reference (op2
);
4608 if (GET_CODE (op2
) == CONST_INT
)
4610 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
4611 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
4612 int mask
= (1 << n_elts
) - 1;
4614 if (!(INTVAL (op2
) & mask
))
4616 if ((INTVAL (op2
) & mask
) == mask
)
4619 op0
= avoid_constant_pool_reference (op0
);
4620 op1
= avoid_constant_pool_reference (op1
);
4621 if (GET_CODE (op0
) == CONST_VECTOR
4622 && GET_CODE (op1
) == CONST_VECTOR
)
4624 rtvec v
= rtvec_alloc (n_elts
);
4627 for (i
= 0; i
< n_elts
; i
++)
4628 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
4629 ? CONST_VECTOR_ELT (op0
, i
)
4630 : CONST_VECTOR_ELT (op1
, i
));
4631 return gen_rtx_CONST_VECTOR (mode
, v
);
4643 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4645 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4647 Works by unpacking OP into a collection of 8-bit values
4648 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4649 and then repacking them again for OUTERMODE. */
4652 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
4653 enum machine_mode innermode
, unsigned int byte
)
4655 /* We support up to 512-bit values (for V8DFmode). */
4659 value_mask
= (1 << value_bit
) - 1
4661 unsigned char value
[max_bitsize
/ value_bit
];
4670 rtvec result_v
= NULL
;
4671 enum mode_class outer_class
;
4672 enum machine_mode outer_submode
;
4674 /* Some ports misuse CCmode. */
4675 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
4678 /* We have no way to represent a complex constant at the rtl level. */
4679 if (COMPLEX_MODE_P (outermode
))
4682 /* Unpack the value. */
4684 if (GET_CODE (op
) == CONST_VECTOR
)
4686 num_elem
= CONST_VECTOR_NUNITS (op
);
4687 elems
= &CONST_VECTOR_ELT (op
, 0);
4688 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
4694 elem_bitsize
= max_bitsize
;
4696 /* If this asserts, it is too complicated; reducing value_bit may help. */
4697 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
4698 /* I don't know how to handle endianness of sub-units. */
4699 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
4701 for (elem
= 0; elem
< num_elem
; elem
++)
4704 rtx el
= elems
[elem
];
4706 /* Vectors are kept in target memory order. (This is probably
4709 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4710 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4712 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4713 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4714 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4715 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4716 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4719 switch (GET_CODE (el
))
4723 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4725 *vp
++ = INTVAL (el
) >> i
;
4726 /* CONST_INTs are always logically sign-extended. */
4727 for (; i
< elem_bitsize
; i
+= value_bit
)
4728 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
4732 if (GET_MODE (el
) == VOIDmode
)
4734 /* If this triggers, someone should have generated a
4735 CONST_INT instead. */
4736 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
4738 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4739 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
4740 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
4743 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
4746 /* It shouldn't matter what's done here, so fill it with
4748 for (; i
< elem_bitsize
; i
+= value_bit
)
4753 long tmp
[max_bitsize
/ 32];
4754 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
4756 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
4757 gcc_assert (bitsize
<= elem_bitsize
);
4758 gcc_assert (bitsize
% value_bit
== 0);
4760 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
4763 /* real_to_target produces its result in words affected by
4764 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4765 and use WORDS_BIG_ENDIAN instead; see the documentation
4766 of SUBREG in rtl.texi. */
4767 for (i
= 0; i
< bitsize
; i
+= value_bit
)
4770 if (WORDS_BIG_ENDIAN
)
4771 ibase
= bitsize
- 1 - i
;
4774 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
4777 /* It shouldn't matter what's done here, so fill it with
4779 for (; i
< elem_bitsize
; i
+= value_bit
)
4785 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4787 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4788 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
4792 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4793 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
4794 for (; i
< 2 * HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4796 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
4797 >> (i
- HOST_BITS_PER_WIDE_INT
);
4798 for (; i
< elem_bitsize
; i
+= value_bit
)
4808 /* Now, pick the right byte to start with. */
4809 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4810 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4811 will already have offset 0. */
4812 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
4814 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
4816 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4817 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4818 byte
= (subword_byte
% UNITS_PER_WORD
4819 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4822 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4823 so if it's become negative it will instead be very large.) */
4824 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4826 /* Convert from bytes to chunks of size value_bit. */
4827 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
4829 /* Re-pack the value. */
4831 if (VECTOR_MODE_P (outermode
))
4833 num_elem
= GET_MODE_NUNITS (outermode
);
4834 result_v
= rtvec_alloc (num_elem
);
4835 elems
= &RTVEC_ELT (result_v
, 0);
4836 outer_submode
= GET_MODE_INNER (outermode
);
4842 outer_submode
= outermode
;
4845 outer_class
= GET_MODE_CLASS (outer_submode
);
4846 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
4848 gcc_assert (elem_bitsize
% value_bit
== 0);
4849 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
4851 for (elem
= 0; elem
< num_elem
; elem
++)
4855 /* Vectors are stored in target memory order. (This is probably
4858 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4859 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4861 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4862 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4863 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4864 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4865 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4868 switch (outer_class
)
4871 case MODE_PARTIAL_INT
:
4873 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
4876 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4878 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
4879 for (; i
< elem_bitsize
; i
+= value_bit
)
4880 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
4881 << (i
- HOST_BITS_PER_WIDE_INT
));
4883 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4885 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4886 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
4887 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
4888 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
4895 case MODE_DECIMAL_FLOAT
:
4898 long tmp
[max_bitsize
/ 32];
4900 /* real_from_target wants its input in words affected by
4901 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4902 and use WORDS_BIG_ENDIAN instead; see the documentation
4903 of SUBREG in rtl.texi. */
4904 for (i
= 0; i
< max_bitsize
/ 32; i
++)
4906 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4909 if (WORDS_BIG_ENDIAN
)
4910 ibase
= elem_bitsize
- 1 - i
;
4913 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
4916 real_from_target (&r
, tmp
, outer_submode
);
4917 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
4929 f
.mode
= outer_submode
;
4932 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4934 f
.data
.low
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
4935 for (; i
< elem_bitsize
; i
+= value_bit
)
4936 f
.data
.high
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
4937 << (i
- HOST_BITS_PER_WIDE_INT
));
4939 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
4947 if (VECTOR_MODE_P (outermode
))
4948 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
4953 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4954 Return 0 if no simplifications are possible. */
4956 simplify_subreg (enum machine_mode outermode
, rtx op
,
4957 enum machine_mode innermode
, unsigned int byte
)
4959 /* Little bit of sanity checking. */
4960 gcc_assert (innermode
!= VOIDmode
);
4961 gcc_assert (outermode
!= VOIDmode
);
4962 gcc_assert (innermode
!= BLKmode
);
4963 gcc_assert (outermode
!= BLKmode
);
4965 gcc_assert (GET_MODE (op
) == innermode
4966 || GET_MODE (op
) == VOIDmode
);
4968 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
4969 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4971 if (outermode
== innermode
&& !byte
)
4974 if (GET_CODE (op
) == CONST_INT
4975 || GET_CODE (op
) == CONST_DOUBLE
4976 || GET_CODE (op
) == CONST_FIXED
4977 || GET_CODE (op
) == CONST_VECTOR
)
4978 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
4980 /* Changing mode twice with SUBREG => just change it once,
4981 or not at all if changing back op starting mode. */
4982 if (GET_CODE (op
) == SUBREG
)
4984 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
4985 int final_offset
= byte
+ SUBREG_BYTE (op
);
4988 if (outermode
== innermostmode
4989 && byte
== 0 && SUBREG_BYTE (op
) == 0)
4990 return SUBREG_REG (op
);
4992 /* The SUBREG_BYTE represents offset, as if the value were stored
4993 in memory. Irritating exception is paradoxical subreg, where
4994 we define SUBREG_BYTE to be 0. On big endian machines, this
4995 value should be negative. For a moment, undo this exception. */
4996 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
4998 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
4999 if (WORDS_BIG_ENDIAN
)
5000 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5001 if (BYTES_BIG_ENDIAN
)
5002 final_offset
+= difference
% UNITS_PER_WORD
;
5004 if (SUBREG_BYTE (op
) == 0
5005 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5007 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5008 if (WORDS_BIG_ENDIAN
)
5009 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5010 if (BYTES_BIG_ENDIAN
)
5011 final_offset
+= difference
% UNITS_PER_WORD
;
5014 /* See whether resulting subreg will be paradoxical. */
5015 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5017 /* In nonparadoxical subregs we can't handle negative offsets. */
5018 if (final_offset
< 0)
5020 /* Bail out in case resulting subreg would be incorrect. */
5021 if (final_offset
% GET_MODE_SIZE (outermode
)
5022 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5028 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5030 /* In paradoxical subreg, see if we are still looking on lower part.
5031 If so, our SUBREG_BYTE will be 0. */
5032 if (WORDS_BIG_ENDIAN
)
5033 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5034 if (BYTES_BIG_ENDIAN
)
5035 offset
+= difference
% UNITS_PER_WORD
;
5036 if (offset
== final_offset
)
5042 /* Recurse for further possible simplifications. */
5043 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5047 if (validate_subreg (outermode
, innermostmode
,
5048 SUBREG_REG (op
), final_offset
))
5050 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5051 if (SUBREG_PROMOTED_VAR_P (op
)
5052 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5053 && GET_MODE_CLASS (outermode
) == MODE_INT
5054 && IN_RANGE (GET_MODE_SIZE (outermode
),
5055 GET_MODE_SIZE (innermode
),
5056 GET_MODE_SIZE (innermostmode
))
5057 && subreg_lowpart_p (newx
))
5059 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5060 SUBREG_PROMOTED_UNSIGNED_SET
5061 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5068 /* Merge implicit and explicit truncations. */
5070 if (GET_CODE (op
) == TRUNCATE
5071 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
5072 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
5073 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
5074 GET_MODE (XEXP (op
, 0)));
5076 /* SUBREG of a hard register => just change the register number
5077 and/or mode. If the hard register is not valid in that mode,
5078 suppress this simplification. If the hard register is the stack,
5079 frame, or argument pointer, leave this as a SUBREG. */
5081 if (REG_P (op
) && HARD_REGISTER_P (op
))
5083 unsigned int regno
, final_regno
;
5086 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5087 if (HARD_REGISTER_NUM_P (final_regno
))
5090 int final_offset
= byte
;
5092 /* Adjust offset for paradoxical subregs. */
5094 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5096 int difference
= (GET_MODE_SIZE (innermode
)
5097 - GET_MODE_SIZE (outermode
));
5098 if (WORDS_BIG_ENDIAN
)
5099 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5100 if (BYTES_BIG_ENDIAN
)
5101 final_offset
+= difference
% UNITS_PER_WORD
;
5104 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5106 /* Propagate original regno. We don't have any way to specify
5107 the offset inside original regno, so do so only for lowpart.
5108 The information is used only by alias analysis that can not
5109 grog partial register anyway. */
5111 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5112 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5117 /* If we have a SUBREG of a register that we are replacing and we are
5118 replacing it with a MEM, make a new MEM and try replacing the
5119 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5120 or if we would be widening it. */
5123 && ! mode_dependent_address_p (XEXP (op
, 0))
5124 /* Allow splitting of volatile memory references in case we don't
5125 have instruction to move the whole thing. */
5126 && (! MEM_VOLATILE_P (op
)
5127 || ! have_insn_for (SET
, innermode
))
5128 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5129 return adjust_address_nv (op
, outermode
, byte
);
5131 /* Handle complex values represented as CONCAT
5132 of real and imaginary part. */
5133 if (GET_CODE (op
) == CONCAT
)
5135 unsigned int part_size
, final_offset
;
5138 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5139 if (byte
< part_size
)
5141 part
= XEXP (op
, 0);
5142 final_offset
= byte
;
5146 part
= XEXP (op
, 1);
5147 final_offset
= byte
- part_size
;
5150 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5153 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5156 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5157 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5161 /* Optimize SUBREG truncations of zero and sign extended values. */
5162 if ((GET_CODE (op
) == ZERO_EXTEND
5163 || GET_CODE (op
) == SIGN_EXTEND
)
5164 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
5166 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5168 /* If we're requesting the lowpart of a zero or sign extension,
5169 there are three possibilities. If the outermode is the same
5170 as the origmode, we can omit both the extension and the subreg.
5171 If the outermode is not larger than the origmode, we can apply
5172 the truncation without the extension. Finally, if the outermode
5173 is larger than the origmode, but both are integer modes, we
5174 can just extend to the appropriate mode. */
5177 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
5178 if (outermode
== origmode
)
5179 return XEXP (op
, 0);
5180 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
5181 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
5182 subreg_lowpart_offset (outermode
,
5184 if (SCALAR_INT_MODE_P (outermode
))
5185 return simplify_gen_unary (GET_CODE (op
), outermode
,
5186 XEXP (op
, 0), origmode
);
5189 /* A SUBREG resulting from a zero extension may fold to zero if
5190 it extracts higher bits that the ZERO_EXTEND's source bits. */
5191 if (GET_CODE (op
) == ZERO_EXTEND
5192 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
5193 return CONST0_RTX (outermode
);
5196 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5197 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5198 the outer subreg is effectively a truncation to the original mode. */
5199 if ((GET_CODE (op
) == LSHIFTRT
5200 || GET_CODE (op
) == ASHIFTRT
)
5201 && SCALAR_INT_MODE_P (outermode
)
5202 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5203 to avoid the possibility that an outer LSHIFTRT shifts by more
5204 than the sign extension's sign_bit_copies and introduces zeros
5205 into the high bits of the result. */
5206 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
5207 && GET_CODE (XEXP (op
, 1)) == CONST_INT
5208 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
5209 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5210 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5211 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5212 return simplify_gen_binary (ASHIFTRT
, outermode
,
5213 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5215 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5216 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5217 the outer subreg is effectively a truncation to the original mode. */
5218 if ((GET_CODE (op
) == LSHIFTRT
5219 || GET_CODE (op
) == ASHIFTRT
)
5220 && SCALAR_INT_MODE_P (outermode
)
5221 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5222 && GET_CODE (XEXP (op
, 1)) == CONST_INT
5223 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5224 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5225 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5226 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5227 return simplify_gen_binary (LSHIFTRT
, outermode
,
5228 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5230 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5231 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5232 the outer subreg is effectively a truncation to the original mode. */
5233 if (GET_CODE (op
) == ASHIFT
5234 && SCALAR_INT_MODE_P (outermode
)
5235 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5236 && GET_CODE (XEXP (op
, 1)) == CONST_INT
5237 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5238 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
5239 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5240 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5241 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5242 return simplify_gen_binary (ASHIFT
, outermode
,
5243 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5245 /* Recognize a word extraction from a multi-word subreg. */
5246 if ((GET_CODE (op
) == LSHIFTRT
5247 || GET_CODE (op
) == ASHIFTRT
)
5248 && SCALAR_INT_MODE_P (outermode
)
5249 && GET_MODE_BITSIZE (outermode
) >= BITS_PER_WORD
5250 && GET_MODE_BITSIZE (innermode
) >= (2 * GET_MODE_BITSIZE (outermode
))
5251 && GET_CODE (XEXP (op
, 1)) == CONST_INT
5252 && (INTVAL (XEXP (op
, 1)) & (GET_MODE_BITSIZE (outermode
) - 1)) == 0
5253 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5254 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5256 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5257 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
,
5259 ? byte
- shifted_bytes
: byte
+ shifted_bytes
));
5265 /* Make a SUBREG operation or equivalent if it folds. */
5268 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5269 enum machine_mode innermode
, unsigned int byte
)
5273 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5277 if (GET_CODE (op
) == SUBREG
5278 || GET_CODE (op
) == CONCAT
5279 || GET_MODE (op
) == VOIDmode
)
5282 if (validate_subreg (outermode
, innermode
, op
, byte
))
5283 return gen_rtx_SUBREG (outermode
, op
, byte
);
5288 /* Simplify X, an rtx expression.
5290 Return the simplified expression or NULL if no simplifications
5293 This is the preferred entry point into the simplification routines;
5294 however, we still allow passes to call the more specific routines.
5296 Right now GCC has three (yes, three) major bodies of RTL simplification
5297 code that need to be unified.
5299 1. fold_rtx in cse.c. This code uses various CSE specific
5300 information to aid in RTL simplification.
5302 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5303 it uses combine specific information to aid in RTL
5306 3. The routines in this file.
5309 Long term we want to only have one body of simplification code; to
5310 get to that state I recommend the following steps:
5312 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5313 which are not pass dependent state into these routines.
5315 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5316 use this routine whenever possible.
5318 3. Allow for pass dependent state to be provided to these
5319 routines and add simplifications based on the pass dependent
5320 state. Remove code from cse.c & combine.c that becomes
5323 It will take time, but ultimately the compiler will be easier to
5324 maintain and improve. It's totally silly that when we add a
5325 simplification that it needs to be added to 4 places (3 for RTL
5326 simplification and 1 for tree simplification. */
5329 simplify_rtx (const_rtx x
)
5331 const enum rtx_code code
= GET_CODE (x
);
5332 const enum machine_mode mode
= GET_MODE (x
);
5334 switch (GET_RTX_CLASS (code
))
5337 return simplify_unary_operation (code
, mode
,
5338 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5339 case RTX_COMM_ARITH
:
5340 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5341 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5343 /* Fall through.... */
5346 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5349 case RTX_BITFIELD_OPS
:
5350 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5351 XEXP (x
, 0), XEXP (x
, 1),
5355 case RTX_COMM_COMPARE
:
5356 return simplify_relational_operation (code
, mode
,
5357 ((GET_MODE (XEXP (x
, 0))
5359 ? GET_MODE (XEXP (x
, 0))
5360 : GET_MODE (XEXP (x
, 1))),
5366 return simplify_subreg (mode
, SUBREG_REG (x
),
5367 GET_MODE (SUBREG_REG (x
)),
5374 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5375 if (GET_CODE (XEXP (x
, 0)) == HIGH
5376 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))