1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "coretypes.h"
32 #include "hard-reg-set.h"
35 #include "insn-config.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Much code operates on (low, high) pairs; the low value is an
47 unsigned wide int, the high value a signed wide int. We
48 occasionally need to sign extend from low to high as if low were a
50 #define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
53 static rtx
neg_const_int (enum machine_mode
, rtx
);
54 static bool plus_minus_operand_p (rtx
);
55 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
57 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
59 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
61 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
62 enum machine_mode
, rtx
, rtx
);
63 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
64 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
70 neg_const_int (enum machine_mode mode
, rtx i
)
72 return gen_int_mode (- INTVAL (i
), mode
);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
79 mode_signbit_p (enum machine_mode mode
, rtx x
)
81 unsigned HOST_WIDE_INT val
;
84 if (GET_MODE_CLASS (mode
) != MODE_INT
)
87 width
= GET_MODE_BITSIZE (mode
);
91 if (width
<= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x
) == CONST_INT
)
94 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x
) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x
) == 0)
98 val
= CONST_DOUBLE_HIGH (x
);
99 width
-= HOST_BITS_PER_WIDE_INT
;
104 if (width
< HOST_BITS_PER_WIDE_INT
)
105 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
106 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
118 /* If this simplifies, do it. */
119 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
123 /* Put complex operands first and constants second if commutative. */
124 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
125 && swap_commutative_operands_p (op0
, op1
))
126 tem
= op0
, op0
= op1
, op1
= tem
;
128 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
131 /* If X is a MEM referencing the constant pool, return the real value.
132 Otherwise return X. */
134 avoid_constant_pool_reference (rtx x
)
137 enum machine_mode cmode
;
138 HOST_WIDE_INT offset
= 0;
140 switch (GET_CODE (x
))
146 /* Handle float extensions of constant pool references. */
148 c
= avoid_constant_pool_reference (tmp
);
149 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
153 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
154 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
162 if (GET_MODE (x
) == BLKmode
)
167 /* Call target hook to avoid the effects of -fpic etc.... */
168 addr
= targetm
.delegitimize_address (addr
);
170 /* Split the address into a base and integer offset. */
171 if (GET_CODE (addr
) == CONST
172 && GET_CODE (XEXP (addr
, 0)) == PLUS
173 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST_INT
)
175 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
176 addr
= XEXP (XEXP (addr
, 0), 0);
179 if (GET_CODE (addr
) == LO_SUM
)
180 addr
= XEXP (addr
, 1);
182 /* If this is a constant pool reference, we can turn it into its
183 constant and hope that simplifications happen. */
184 if (GET_CODE (addr
) == SYMBOL_REF
185 && CONSTANT_POOL_ADDRESS_P (addr
))
187 c
= get_pool_constant (addr
);
188 cmode
= get_pool_mode (addr
);
190 /* If we're accessing the constant in a different mode than it was
191 originally stored, attempt to fix that up via subreg simplifications.
192 If that fails we have no choice but to return the original memory. */
193 if (offset
!= 0 || cmode
!= GET_MODE (x
))
195 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
196 if (tem
&& CONSTANT_P (tem
))
206 /* Make a unary operation by first seeing if it folds and otherwise making
207 the specified operation. */
210 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
211 enum machine_mode op_mode
)
215 /* If this simplifies, use it. */
216 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
219 return gen_rtx_fmt_e (code
, mode
, op
);
222 /* Likewise for ternary operations. */
225 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
226 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
230 /* If this simplifies, use it. */
231 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
235 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
238 /* Likewise, for relational operations.
239 CMP_MODE specifies mode comparison is done in. */
242 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
243 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
247 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
251 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
254 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
255 resulting RTX. Return a new RTX which is as simplified as possible. */
258 simplify_replace_rtx (rtx x
, rtx old_rtx
, rtx new_rtx
)
260 enum rtx_code code
= GET_CODE (x
);
261 enum machine_mode mode
= GET_MODE (x
);
262 enum machine_mode op_mode
;
265 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
266 to build a new expression substituting recursively. If we can't do
267 anything, return our input. */
272 switch (GET_RTX_CLASS (code
))
276 op_mode
= GET_MODE (op0
);
277 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
278 if (op0
== XEXP (x
, 0))
280 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
284 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
285 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
286 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
288 return simplify_gen_binary (code
, mode
, op0
, op1
);
291 case RTX_COMM_COMPARE
:
294 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
295 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
296 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
297 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
299 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
302 case RTX_BITFIELD_OPS
:
304 op_mode
= GET_MODE (op0
);
305 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
306 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
307 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
308 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
310 if (op_mode
== VOIDmode
)
311 op_mode
= GET_MODE (op0
);
312 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
315 /* The only case we try to handle is a SUBREG. */
318 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
319 if (op0
== SUBREG_REG (x
))
321 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
322 GET_MODE (SUBREG_REG (x
)),
324 return op0
? op0
: x
;
331 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
332 if (op0
== XEXP (x
, 0))
334 return replace_equiv_address_nv (x
, op0
);
336 else if (code
== LO_SUM
)
338 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
339 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
341 /* (lo_sum (high x) x) -> x */
342 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
345 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
347 return gen_rtx_LO_SUM (mode
, op0
, op1
);
349 else if (code
== REG
)
351 if (rtx_equal_p (x
, old_rtx
))
362 /* Try to simplify a unary operation CODE whose output mode is to be
363 MODE with input operand OP whose mode was originally OP_MODE.
364 Return zero if no simplification can be made. */
366 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
367 rtx op
, enum machine_mode op_mode
)
371 if (GET_CODE (op
) == CONST
)
374 trueop
= avoid_constant_pool_reference (op
);
376 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
380 return simplify_unary_operation_1 (code
, mode
, op
);
383 /* Perform some simplifications we can do even if the operands
386 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
388 enum rtx_code reversed
;
394 /* (not (not X)) == X. */
395 if (GET_CODE (op
) == NOT
)
398 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
399 comparison is all ones. */
400 if (COMPARISON_P (op
)
401 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
402 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
403 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
404 XEXP (op
, 0), XEXP (op
, 1));
406 /* (not (plus X -1)) can become (neg X). */
407 if (GET_CODE (op
) == PLUS
408 && XEXP (op
, 1) == constm1_rtx
)
409 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
411 /* Similarly, (not (neg X)) is (plus X -1). */
412 if (GET_CODE (op
) == NEG
)
413 return plus_constant (XEXP (op
, 0), -1);
415 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
416 if (GET_CODE (op
) == XOR
417 && GET_CODE (XEXP (op
, 1)) == CONST_INT
418 && (temp
= simplify_unary_operation (NOT
, mode
,
419 XEXP (op
, 1), mode
)) != 0)
420 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
422 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
423 if (GET_CODE (op
) == PLUS
424 && GET_CODE (XEXP (op
, 1)) == CONST_INT
425 && mode_signbit_p (mode
, XEXP (op
, 1))
426 && (temp
= simplify_unary_operation (NOT
, mode
,
427 XEXP (op
, 1), mode
)) != 0)
428 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
431 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
432 operands other than 1, but that is not valid. We could do a
433 similar simplification for (not (lshiftrt C X)) where C is
434 just the sign bit, but this doesn't seem common enough to
436 if (GET_CODE (op
) == ASHIFT
437 && XEXP (op
, 0) == const1_rtx
)
439 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
440 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
443 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
444 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
445 so we can perform the above simplification. */
447 if (STORE_FLAG_VALUE
== -1
448 && GET_CODE (op
) == ASHIFTRT
449 && GET_CODE (XEXP (op
, 1)) == CONST_INT
450 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
451 return simplify_gen_relational (GE
, mode
, VOIDmode
,
452 XEXP (op
, 0), const0_rtx
);
455 if (GET_CODE (op
) == SUBREG
456 && subreg_lowpart_p (op
)
457 && (GET_MODE_SIZE (GET_MODE (op
))
458 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
459 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
460 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
462 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
465 x
= gen_rtx_ROTATE (inner_mode
,
466 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
468 XEXP (SUBREG_REG (op
), 1));
469 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
472 /* Apply De Morgan's laws to reduce number of patterns for machines
473 with negating logical insns (and-not, nand, etc.). If result has
474 only one NOT, put it first, since that is how the patterns are
477 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
479 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
480 enum machine_mode op_mode
;
482 op_mode
= GET_MODE (in1
);
483 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
485 op_mode
= GET_MODE (in2
);
486 if (op_mode
== VOIDmode
)
488 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
490 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
493 in2
= in1
; in1
= tem
;
496 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
502 /* (neg (neg X)) == X. */
503 if (GET_CODE (op
) == NEG
)
506 /* (neg (plus X 1)) can become (not X). */
507 if (GET_CODE (op
) == PLUS
508 && XEXP (op
, 1) == const1_rtx
)
509 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
511 /* Similarly, (neg (not X)) is (plus X 1). */
512 if (GET_CODE (op
) == NOT
)
513 return plus_constant (XEXP (op
, 0), 1);
515 /* (neg (minus X Y)) can become (minus Y X). This transformation
516 isn't safe for modes with signed zeros, since if X and Y are
517 both +0, (minus Y X) is the same as (minus X Y). If the
518 rounding mode is towards +infinity (or -infinity) then the two
519 expressions will be rounded differently. */
520 if (GET_CODE (op
) == MINUS
521 && !HONOR_SIGNED_ZEROS (mode
)
522 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
523 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
525 if (GET_CODE (op
) == PLUS
526 && !HONOR_SIGNED_ZEROS (mode
)
527 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
529 /* (neg (plus A C)) is simplified to (minus -C A). */
530 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
531 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
533 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
535 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
538 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
539 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
540 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
543 /* (neg (mult A B)) becomes (mult (neg A) B).
544 This works even for floating-point values. */
545 if (GET_CODE (op
) == MULT
546 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
548 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
549 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
552 /* NEG commutes with ASHIFT since it is multiplication. Only do
553 this if we can then eliminate the NEG (e.g., if the operand
555 if (GET_CODE (op
) == ASHIFT
)
557 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
559 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
562 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
563 C is equal to the width of MODE minus 1. */
564 if (GET_CODE (op
) == ASHIFTRT
565 && GET_CODE (XEXP (op
, 1)) == CONST_INT
566 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
567 return simplify_gen_binary (LSHIFTRT
, mode
,
568 XEXP (op
, 0), XEXP (op
, 1));
570 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
571 C is equal to the width of MODE minus 1. */
572 if (GET_CODE (op
) == LSHIFTRT
573 && GET_CODE (XEXP (op
, 1)) == CONST_INT
574 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
575 return simplify_gen_binary (ASHIFTRT
, mode
,
576 XEXP (op
, 0), XEXP (op
, 1));
578 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
579 if (GET_CODE (op
) == XOR
580 && XEXP (op
, 1) == const1_rtx
581 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
582 return plus_constant (XEXP (op
, 0), -1);
584 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
585 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
586 if (GET_CODE (op
) == LT
587 && XEXP (op
, 1) == const0_rtx
)
589 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
590 int isize
= GET_MODE_BITSIZE (inner
);
591 if (STORE_FLAG_VALUE
== 1)
593 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
594 GEN_INT (isize
- 1));
597 if (GET_MODE_BITSIZE (mode
) > isize
)
598 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
599 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
601 else if (STORE_FLAG_VALUE
== -1)
603 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
604 GEN_INT (isize
- 1));
607 if (GET_MODE_BITSIZE (mode
) > isize
)
608 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
609 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
615 /* We can't handle truncation to a partial integer mode here
616 because we don't know the real bitsize of the partial
618 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
622 if ((GET_CODE (op
) == SIGN_EXTEND
623 || GET_CODE (op
) == ZERO_EXTEND
)
624 && GET_MODE (XEXP (op
, 0)) == mode
)
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
628 (OP:SI foo:SI) if OP is NEG or ABS. */
629 if ((GET_CODE (op
) == ABS
630 || GET_CODE (op
) == NEG
)
631 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
632 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
633 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
634 return simplify_gen_unary (GET_CODE (op
), mode
,
635 XEXP (XEXP (op
, 0), 0), mode
);
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is
639 if (GET_CODE (op
) == SUBREG
640 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
641 && subreg_lowpart_p (op
))
642 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
643 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
645 /* If we know that the value is already truncated, we can
646 replace the TRUNCATE with a SUBREG. Note that this is also
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
648 modes we just have to apply a different definition for
649 truncation. But don't do this for an (LSHIFTRT (MULT ...))
650 since this will cause problems with the umulXi3_highpart
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
653 GET_MODE_BITSIZE (GET_MODE (op
)))
654 ? (num_sign_bit_copies (op
, GET_MODE (op
))
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op
))
656 - GET_MODE_BITSIZE (mode
)))
657 : truncated_to_mode (mode
, op
))
658 && ! (GET_CODE (op
) == LSHIFTRT
659 && GET_CODE (XEXP (op
, 0)) == MULT
))
660 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
662 /* A truncate of a comparison can be replaced with a subreg if
663 STORE_FLAG_VALUE permits. This is like the previous test,
664 but it works even if the comparison is done in a mode larger
665 than HOST_BITS_PER_WIDE_INT. */
666 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
668 && ((HOST_WIDE_INT
) STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
669 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
673 if (DECIMAL_FLOAT_MODE_P (mode
))
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
677 if (GET_CODE (op
) == FLOAT_EXTEND
678 && GET_MODE (XEXP (op
, 0)) == mode
)
681 /* (float_truncate:SF (float_truncate:DF foo:XF))
682 = (float_truncate:SF foo:XF).
683 This may eliminate double rounding, so it is unsafe.
685 (float_truncate:SF (float_extend:XF foo:DF))
686 = (float_truncate:SF foo:DF).
688 (float_truncate:DF (float_extend:XF foo:SF))
689 = (float_extend:SF foo:DF). */
690 if ((GET_CODE (op
) == FLOAT_TRUNCATE
691 && flag_unsafe_math_optimizations
)
692 || GET_CODE (op
) == FLOAT_EXTEND
)
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
695 > GET_MODE_SIZE (mode
)
696 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
700 /* (float_truncate (float x)) is (float x) */
701 if (GET_CODE (op
) == FLOAT
702 && (flag_unsafe_math_optimizations
703 || ((unsigned)significand_size (GET_MODE (op
))
704 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
705 - num_sign_bit_copies (XEXP (op
, 0),
706 GET_MODE (XEXP (op
, 0)))))))
707 return simplify_gen_unary (FLOAT
, mode
,
709 GET_MODE (XEXP (op
, 0)));
711 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
712 (OP:SF foo:SF) if OP is NEG or ABS. */
713 if ((GET_CODE (op
) == ABS
714 || GET_CODE (op
) == NEG
)
715 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
716 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
717 return simplify_gen_unary (GET_CODE (op
), mode
,
718 XEXP (XEXP (op
, 0), 0), mode
);
720 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
721 is (float_truncate:SF x). */
722 if (GET_CODE (op
) == SUBREG
723 && subreg_lowpart_p (op
)
724 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
725 return SUBREG_REG (op
);
729 if (DECIMAL_FLOAT_MODE_P (mode
))
732 /* (float_extend (float_extend x)) is (float_extend x)
734 (float_extend (float x)) is (float x) assuming that double
735 rounding can't happen.
737 if (GET_CODE (op
) == FLOAT_EXTEND
738 || (GET_CODE (op
) == FLOAT
739 && ((unsigned)significand_size (GET_MODE (op
))
740 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
741 - num_sign_bit_copies (XEXP (op
, 0),
742 GET_MODE (XEXP (op
, 0)))))))
743 return simplify_gen_unary (GET_CODE (op
), mode
,
745 GET_MODE (XEXP (op
, 0)));
750 /* (abs (neg <foo>)) -> (abs <foo>) */
751 if (GET_CODE (op
) == NEG
)
752 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
753 GET_MODE (XEXP (op
, 0)));
755 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
757 if (GET_MODE (op
) == VOIDmode
)
760 /* If operand is something known to be positive, ignore the ABS. */
761 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
762 || ((GET_MODE_BITSIZE (GET_MODE (op
))
763 <= HOST_BITS_PER_WIDE_INT
)
764 && ((nonzero_bits (op
, GET_MODE (op
))
766 << (GET_MODE_BITSIZE (GET_MODE (op
)) - 1)))
770 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
771 if (num_sign_bit_copies (op
, mode
) == GET_MODE_BITSIZE (mode
))
772 return gen_rtx_NEG (mode
, op
);
777 /* (ffs (*_extend <X>)) = (ffs <X>) */
778 if (GET_CODE (op
) == SIGN_EXTEND
779 || GET_CODE (op
) == ZERO_EXTEND
)
780 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
781 GET_MODE (XEXP (op
, 0)));
785 switch (GET_CODE (op
))
789 /* (popcount (zero_extend <X>)) = (popcount <X>) */
790 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
791 GET_MODE (XEXP (op
, 0)));
795 /* Rotations don't affect popcount. */
796 if (!side_effects_p (XEXP (op
, 1)))
797 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
798 GET_MODE (XEXP (op
, 0)));
807 switch (GET_CODE (op
))
813 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
814 GET_MODE (XEXP (op
, 0)));
818 /* Rotations don't affect parity. */
819 if (!side_effects_p (XEXP (op
, 1)))
820 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
821 GET_MODE (XEXP (op
, 0)));
830 /* (bswap (bswap x)) -> x. */
831 if (GET_CODE (op
) == BSWAP
)
836 /* (float (sign_extend <X>)) = (float <X>). */
837 if (GET_CODE (op
) == SIGN_EXTEND
)
838 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
839 GET_MODE (XEXP (op
, 0)));
843 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
844 becomes just the MINUS if its mode is MODE. This allows
845 folding switch statements on machines using casesi (such as
847 if (GET_CODE (op
) == TRUNCATE
848 && GET_MODE (XEXP (op
, 0)) == mode
849 && GET_CODE (XEXP (op
, 0)) == MINUS
850 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
851 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
854 /* Check for a sign extension of a subreg of a promoted
855 variable, where the promotion is sign-extended, and the
856 target mode is the same as the variable's promotion. */
857 if (GET_CODE (op
) == SUBREG
858 && SUBREG_PROMOTED_VAR_P (op
)
859 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
860 && GET_MODE (XEXP (op
, 0)) == mode
)
863 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
864 if (! POINTERS_EXTEND_UNSIGNED
865 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
867 || (GET_CODE (op
) == SUBREG
868 && REG_P (SUBREG_REG (op
))
869 && REG_POINTER (SUBREG_REG (op
))
870 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
871 return convert_memory_address (Pmode
, op
);
876 /* Check for a zero extension of a subreg of a promoted
877 variable, where the promotion is zero-extended, and the
878 target mode is the same as the variable's promotion. */
879 if (GET_CODE (op
) == SUBREG
880 && SUBREG_PROMOTED_VAR_P (op
)
881 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
882 && GET_MODE (XEXP (op
, 0)) == mode
)
885 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
886 if (POINTERS_EXTEND_UNSIGNED
> 0
887 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
889 || (GET_CODE (op
) == SUBREG
890 && REG_P (SUBREG_REG (op
))
891 && REG_POINTER (SUBREG_REG (op
))
892 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
893 return convert_memory_address (Pmode
, op
);
904 /* Try to compute the value of a unary operation CODE whose output mode is to
905 be MODE with input operand OP whose mode was originally OP_MODE.
906 Return zero if the value cannot be computed. */
908 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
909 rtx op
, enum machine_mode op_mode
)
911 unsigned int width
= GET_MODE_BITSIZE (mode
);
913 if (code
== VEC_DUPLICATE
)
915 gcc_assert (VECTOR_MODE_P (mode
));
916 if (GET_MODE (op
) != VOIDmode
)
918 if (!VECTOR_MODE_P (GET_MODE (op
)))
919 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
921 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
924 if (GET_CODE (op
) == CONST_INT
|| GET_CODE (op
) == CONST_DOUBLE
925 || GET_CODE (op
) == CONST_VECTOR
)
927 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
928 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
929 rtvec v
= rtvec_alloc (n_elts
);
932 if (GET_CODE (op
) != CONST_VECTOR
)
933 for (i
= 0; i
< n_elts
; i
++)
934 RTVEC_ELT (v
, i
) = op
;
937 enum machine_mode inmode
= GET_MODE (op
);
938 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
939 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
941 gcc_assert (in_n_elts
< n_elts
);
942 gcc_assert ((n_elts
% in_n_elts
) == 0);
943 for (i
= 0; i
< n_elts
; i
++)
944 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
946 return gen_rtx_CONST_VECTOR (mode
, v
);
950 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
952 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
953 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
954 enum machine_mode opmode
= GET_MODE (op
);
955 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
956 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
957 rtvec v
= rtvec_alloc (n_elts
);
960 gcc_assert (op_n_elts
== n_elts
);
961 for (i
= 0; i
< n_elts
; i
++)
963 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
964 CONST_VECTOR_ELT (op
, i
),
965 GET_MODE_INNER (opmode
));
968 RTVEC_ELT (v
, i
) = x
;
970 return gen_rtx_CONST_VECTOR (mode
, v
);
973 /* The order of these tests is critical so that, for example, we don't
974 check the wrong mode (input vs. output) for a conversion operation,
975 such as FIX. At some point, this should be simplified. */
977 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
978 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
980 HOST_WIDE_INT hv
, lv
;
983 if (GET_CODE (op
) == CONST_INT
)
984 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
986 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
988 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
989 d
= real_value_truncate (mode
, d
);
990 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
992 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
993 && (GET_CODE (op
) == CONST_DOUBLE
994 || GET_CODE (op
) == CONST_INT
))
996 HOST_WIDE_INT hv
, lv
;
999 if (GET_CODE (op
) == CONST_INT
)
1000 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1002 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1004 if (op_mode
== VOIDmode
)
1006 /* We don't know how to interpret negative-looking numbers in
1007 this case, so don't try to fold those. */
1011 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
1014 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1016 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1017 d
= real_value_truncate (mode
, d
);
1018 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1021 if (GET_CODE (op
) == CONST_INT
1022 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1024 HOST_WIDE_INT arg0
= INTVAL (op
);
1038 val
= (arg0
>= 0 ? arg0
: - arg0
);
1042 /* Don't use ffs here. Instead, get low order bit and then its
1043 number. If arg0 is zero, this will return 0, as desired. */
1044 arg0
&= GET_MODE_MASK (mode
);
1045 val
= exact_log2 (arg0
& (- arg0
)) + 1;
1049 arg0
&= GET_MODE_MASK (mode
);
1050 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1053 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
1057 arg0
&= GET_MODE_MASK (mode
);
1060 /* Even if the value at zero is undefined, we have to come
1061 up with some replacement. Seems good enough. */
1062 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1063 val
= GET_MODE_BITSIZE (mode
);
1066 val
= exact_log2 (arg0
& -arg0
);
1070 arg0
&= GET_MODE_MASK (mode
);
1073 val
++, arg0
&= arg0
- 1;
1077 arg0
&= GET_MODE_MASK (mode
);
1080 val
++, arg0
&= arg0
- 1;
1089 for (s
= 0; s
< width
; s
+= 8)
1091 unsigned int d
= width
- s
- 8;
1092 unsigned HOST_WIDE_INT byte
;
1093 byte
= (arg0
>> s
) & 0xff;
1104 /* When zero-extending a CONST_INT, we need to know its
1106 gcc_assert (op_mode
!= VOIDmode
);
1107 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1109 /* If we were really extending the mode,
1110 we would have to distinguish between zero-extension
1111 and sign-extension. */
1112 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1115 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1116 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1122 if (op_mode
== VOIDmode
)
1124 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1126 /* If we were really extending the mode,
1127 we would have to distinguish between zero-extension
1128 and sign-extension. */
1129 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1132 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1135 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1137 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
1138 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1146 case FLOAT_TRUNCATE
:
1156 return gen_int_mode (val
, mode
);
1159 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1160 for a DImode operation on a CONST_INT. */
1161 else if (GET_MODE (op
) == VOIDmode
1162 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1163 && (GET_CODE (op
) == CONST_DOUBLE
1164 || GET_CODE (op
) == CONST_INT
))
1166 unsigned HOST_WIDE_INT l1
, lv
;
1167 HOST_WIDE_INT h1
, hv
;
1169 if (GET_CODE (op
) == CONST_DOUBLE
)
1170 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1172 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1182 neg_double (l1
, h1
, &lv
, &hv
);
1187 neg_double (l1
, h1
, &lv
, &hv
);
1199 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
1202 lv
= exact_log2 (l1
& -l1
) + 1;
1208 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
1209 - HOST_BITS_PER_WIDE_INT
;
1211 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
1212 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1213 lv
= GET_MODE_BITSIZE (mode
);
1219 lv
= exact_log2 (l1
& -l1
);
1221 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
1222 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1223 lv
= GET_MODE_BITSIZE (mode
);
1251 for (s
= 0; s
< width
; s
+= 8)
1253 unsigned int d
= width
- s
- 8;
1254 unsigned HOST_WIDE_INT byte
;
1256 if (s
< HOST_BITS_PER_WIDE_INT
)
1257 byte
= (l1
>> s
) & 0xff;
1259 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1261 if (d
< HOST_BITS_PER_WIDE_INT
)
1264 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1270 /* This is just a change-of-mode, so do nothing. */
1275 gcc_assert (op_mode
!= VOIDmode
);
1277 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1281 lv
= l1
& GET_MODE_MASK (op_mode
);
1285 if (op_mode
== VOIDmode
1286 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1290 lv
= l1
& GET_MODE_MASK (op_mode
);
1291 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
1292 && (lv
& ((HOST_WIDE_INT
) 1
1293 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
1294 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1296 hv
= HWI_SIGN_EXTEND (lv
);
1307 return immed_double_const (lv
, hv
, mode
);
1310 else if (GET_CODE (op
) == CONST_DOUBLE
1311 && SCALAR_FLOAT_MODE_P (mode
))
1313 REAL_VALUE_TYPE d
, t
;
1314 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1319 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1321 real_sqrt (&t
, mode
, &d
);
1325 d
= REAL_VALUE_ABS (d
);
1328 d
= REAL_VALUE_NEGATE (d
);
1330 case FLOAT_TRUNCATE
:
1331 d
= real_value_truncate (mode
, d
);
1334 /* All this does is change the mode. */
1337 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1344 real_to_target (tmp
, &d
, GET_MODE (op
));
1345 for (i
= 0; i
< 4; i
++)
1347 real_from_target (&d
, tmp
, mode
);
1353 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1356 else if (GET_CODE (op
) == CONST_DOUBLE
1357 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1358 && GET_MODE_CLASS (mode
) == MODE_INT
1359 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1361 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1362 operators are intentionally left unspecified (to ease implementation
1363 by target backends), for consistency, this routine implements the
1364 same semantics for constant folding as used by the middle-end. */
1366 /* This was formerly used only for non-IEEE float.
1367 eggert@twinsun.com says it is safe for IEEE also. */
1368 HOST_WIDE_INT xh
, xl
, th
, tl
;
1369 REAL_VALUE_TYPE x
, t
;
1370 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1374 if (REAL_VALUE_ISNAN (x
))
1377 /* Test against the signed upper bound. */
1378 if (width
> HOST_BITS_PER_WIDE_INT
)
1380 th
= ((unsigned HOST_WIDE_INT
) 1
1381 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1387 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1389 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1390 if (REAL_VALUES_LESS (t
, x
))
1397 /* Test against the signed lower bound. */
1398 if (width
> HOST_BITS_PER_WIDE_INT
)
1400 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1406 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1408 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1409 if (REAL_VALUES_LESS (x
, t
))
1415 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1419 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1422 /* Test against the unsigned upper bound. */
1423 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1428 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1430 th
= ((unsigned HOST_WIDE_INT
) 1
1431 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1437 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1439 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1440 if (REAL_VALUES_LESS (t
, x
))
1447 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1453 return immed_double_const (xl
, xh
, mode
);
1459 /* Subroutine of simplify_binary_operation to simplify a commutative,
1460 associative binary operation CODE with result mode MODE, operating
1461 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1462 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1463 canonicalization is possible. */
1466 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1471 /* Linearize the operator to the left. */
1472 if (GET_CODE (op1
) == code
)
1474 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1475 if (GET_CODE (op0
) == code
)
1477 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1478 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1481 /* "a op (b op c)" becomes "(b op c) op a". */
1482 if (! swap_commutative_operands_p (op1
, op0
))
1483 return simplify_gen_binary (code
, mode
, op1
, op0
);
1490 if (GET_CODE (op0
) == code
)
1492 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1493 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1495 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1496 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1499 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1500 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1501 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1502 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1504 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1506 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1507 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1508 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1509 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1511 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1518 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1519 and OP1. Return 0 if no simplification is possible.
1521 Don't use this for relational operations such as EQ or LT.
1522 Use simplify_relational_operation instead. */
1524 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1527 rtx trueop0
, trueop1
;
1530 /* Relational operations don't work here. We must know the mode
1531 of the operands in order to do the comparison correctly.
1532 Assuming a full word can give incorrect results.
1533 Consider comparing 128 with -128 in QImode. */
1534 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1535 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1537 /* Make sure the constant is second. */
1538 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1539 && swap_commutative_operands_p (op0
, op1
))
1541 tem
= op0
, op0
= op1
, op1
= tem
;
1544 trueop0
= avoid_constant_pool_reference (op0
);
1545 trueop1
= avoid_constant_pool_reference (op1
);
1547 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1550 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1553 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1554 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1555 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1556 actual constants. */
1559 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1560 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1562 rtx tem
, reversed
, opleft
, opright
;
1564 unsigned int width
= GET_MODE_BITSIZE (mode
);
1566 /* Even if we can't compute a constant result,
1567 there are some cases worth simplifying. */
1572 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1573 when x is NaN, infinite, or finite and nonzero. They aren't
1574 when x is -0 and the rounding mode is not towards -infinity,
1575 since (-0) + 0 is then 0. */
1576 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1579 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1580 transformations are safe even for IEEE. */
1581 if (GET_CODE (op0
) == NEG
)
1582 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1583 else if (GET_CODE (op1
) == NEG
)
1584 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1586 /* (~a) + 1 -> -a */
1587 if (INTEGRAL_MODE_P (mode
)
1588 && GET_CODE (op0
) == NOT
1589 && trueop1
== const1_rtx
)
1590 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1592 /* Handle both-operands-constant cases. We can only add
1593 CONST_INTs to constants since the sum of relocatable symbols
1594 can't be handled by most assemblers. Don't add CONST_INT
1595 to CONST_INT since overflow won't be computed properly if wider
1596 than HOST_BITS_PER_WIDE_INT. */
1598 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1599 && GET_CODE (op1
) == CONST_INT
)
1600 return plus_constant (op0
, INTVAL (op1
));
1601 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1602 && GET_CODE (op0
) == CONST_INT
)
1603 return plus_constant (op1
, INTVAL (op0
));
1605 /* See if this is something like X * C - X or vice versa or
1606 if the multiplication is written as a shift. If so, we can
1607 distribute and make a new multiply, shift, or maybe just
1608 have X (if C is 2 in the example above). But don't make
1609 something more expensive than we had before. */
1611 if (SCALAR_INT_MODE_P (mode
))
1613 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1614 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1615 rtx lhs
= op0
, rhs
= op1
;
1617 if (GET_CODE (lhs
) == NEG
)
1621 lhs
= XEXP (lhs
, 0);
1623 else if (GET_CODE (lhs
) == MULT
1624 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1626 coeff0l
= INTVAL (XEXP (lhs
, 1));
1627 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1628 lhs
= XEXP (lhs
, 0);
1630 else if (GET_CODE (lhs
) == ASHIFT
1631 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1632 && INTVAL (XEXP (lhs
, 1)) >= 0
1633 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1635 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1637 lhs
= XEXP (lhs
, 0);
1640 if (GET_CODE (rhs
) == NEG
)
1644 rhs
= XEXP (rhs
, 0);
1646 else if (GET_CODE (rhs
) == MULT
1647 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1649 coeff1l
= INTVAL (XEXP (rhs
, 1));
1650 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1651 rhs
= XEXP (rhs
, 0);
1653 else if (GET_CODE (rhs
) == ASHIFT
1654 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1655 && INTVAL (XEXP (rhs
, 1)) >= 0
1656 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1658 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1660 rhs
= XEXP (rhs
, 0);
1663 if (rtx_equal_p (lhs
, rhs
))
1665 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1667 unsigned HOST_WIDE_INT l
;
1670 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1671 coeff
= immed_double_const (l
, h
, mode
);
1673 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1674 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1679 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1680 if ((GET_CODE (op1
) == CONST_INT
1681 || GET_CODE (op1
) == CONST_DOUBLE
)
1682 && GET_CODE (op0
) == XOR
1683 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1684 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1685 && mode_signbit_p (mode
, op1
))
1686 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1687 simplify_gen_binary (XOR
, mode
, op1
,
1690 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1691 if (GET_CODE (op0
) == MULT
1692 && GET_CODE (XEXP (op0
, 0)) == NEG
)
1696 in1
= XEXP (XEXP (op0
, 0), 0);
1697 in2
= XEXP (op0
, 1);
1698 return simplify_gen_binary (MINUS
, mode
, op1
,
1699 simplify_gen_binary (MULT
, mode
,
1703 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1704 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1706 if (COMPARISON_P (op0
)
1707 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
1708 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
1709 && (reversed
= reversed_comparison (op0
, mode
)))
1711 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
1713 /* If one of the operands is a PLUS or a MINUS, see if we can
1714 simplify this by the associative law.
1715 Don't use the associative law for floating point.
1716 The inaccuracy makes it nonassociative,
1717 and subtle programs can break if operations are associated. */
1719 if (INTEGRAL_MODE_P (mode
)
1720 && (plus_minus_operand_p (op0
)
1721 || plus_minus_operand_p (op1
))
1722 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1725 /* Reassociate floating point addition only when the user
1726 specifies unsafe math optimizations. */
1727 if (FLOAT_MODE_P (mode
)
1728 && flag_unsafe_math_optimizations
)
1730 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1738 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1739 using cc0, in which case we want to leave it as a COMPARE
1740 so we can distinguish it from a register-register-copy.
1742 In IEEE floating point, x-0 is not the same as x. */
1744 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1745 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1746 && trueop1
== CONST0_RTX (mode
))
1750 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1751 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1752 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1753 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1755 rtx xop00
= XEXP (op0
, 0);
1756 rtx xop10
= XEXP (op1
, 0);
1759 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1761 if (REG_P (xop00
) && REG_P (xop10
)
1762 && GET_MODE (xop00
) == GET_MODE (xop10
)
1763 && REGNO (xop00
) == REGNO (xop10
)
1764 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1765 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1772 /* We can't assume x-x is 0 even with non-IEEE floating point,
1773 but since it is zero except in very strange circumstances, we
1774 will treat it as zero with -funsafe-math-optimizations. */
1775 if (rtx_equal_p (trueop0
, trueop1
)
1776 && ! side_effects_p (op0
)
1777 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1778 return CONST0_RTX (mode
);
1780 /* Change subtraction from zero into negation. (0 - x) is the
1781 same as -x when x is NaN, infinite, or finite and nonzero.
1782 But if the mode has signed zeros, and does not round towards
1783 -infinity, then 0 - 0 is 0, not -0. */
1784 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1785 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1787 /* (-1 - a) is ~a. */
1788 if (trueop0
== constm1_rtx
)
1789 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1791 /* Subtracting 0 has no effect unless the mode has signed zeros
1792 and supports rounding towards -infinity. In such a case,
1794 if (!(HONOR_SIGNED_ZEROS (mode
)
1795 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1796 && trueop1
== CONST0_RTX (mode
))
1799 /* See if this is something like X * C - X or vice versa or
1800 if the multiplication is written as a shift. If so, we can
1801 distribute and make a new multiply, shift, or maybe just
1802 have X (if C is 2 in the example above). But don't make
1803 something more expensive than we had before. */
1805 if (SCALAR_INT_MODE_P (mode
))
1807 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1808 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1809 rtx lhs
= op0
, rhs
= op1
;
1811 if (GET_CODE (lhs
) == NEG
)
1815 lhs
= XEXP (lhs
, 0);
1817 else if (GET_CODE (lhs
) == MULT
1818 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1820 coeff0l
= INTVAL (XEXP (lhs
, 1));
1821 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1822 lhs
= XEXP (lhs
, 0);
1824 else if (GET_CODE (lhs
) == ASHIFT
1825 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1826 && INTVAL (XEXP (lhs
, 1)) >= 0
1827 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1829 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1831 lhs
= XEXP (lhs
, 0);
1834 if (GET_CODE (rhs
) == NEG
)
1838 rhs
= XEXP (rhs
, 0);
1840 else if (GET_CODE (rhs
) == MULT
1841 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1843 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1844 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1845 rhs
= XEXP (rhs
, 0);
1847 else if (GET_CODE (rhs
) == ASHIFT
1848 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1849 && INTVAL (XEXP (rhs
, 1)) >= 0
1850 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1852 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
1854 rhs
= XEXP (rhs
, 0);
1857 if (rtx_equal_p (lhs
, rhs
))
1859 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1861 unsigned HOST_WIDE_INT l
;
1864 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
1865 coeff
= immed_double_const (l
, h
, mode
);
1867 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1868 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1873 /* (a - (-b)) -> (a + b). True even for IEEE. */
1874 if (GET_CODE (op1
) == NEG
)
1875 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1877 /* (-x - c) may be simplified as (-c - x). */
1878 if (GET_CODE (op0
) == NEG
1879 && (GET_CODE (op1
) == CONST_INT
1880 || GET_CODE (op1
) == CONST_DOUBLE
))
1882 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1884 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1887 /* Don't let a relocatable value get a negative coeff. */
1888 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1889 return simplify_gen_binary (PLUS
, mode
,
1891 neg_const_int (mode
, op1
));
1893 /* (x - (x & y)) -> (x & ~y) */
1894 if (GET_CODE (op1
) == AND
)
1896 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1898 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1899 GET_MODE (XEXP (op1
, 1)));
1900 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1902 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1904 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1905 GET_MODE (XEXP (op1
, 0)));
1906 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1910 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1911 by reversing the comparison code if valid. */
1912 if (STORE_FLAG_VALUE
== 1
1913 && trueop0
== const1_rtx
1914 && COMPARISON_P (op1
)
1915 && (reversed
= reversed_comparison (op1
, mode
)))
1918 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1919 if (GET_CODE (op1
) == MULT
1920 && GET_CODE (XEXP (op1
, 0)) == NEG
)
1924 in1
= XEXP (XEXP (op1
, 0), 0);
1925 in2
= XEXP (op1
, 1);
1926 return simplify_gen_binary (PLUS
, mode
,
1927 simplify_gen_binary (MULT
, mode
,
1932 /* Canonicalize (minus (neg A) (mult B C)) to
1933 (minus (mult (neg B) C) A). */
1934 if (GET_CODE (op1
) == MULT
1935 && GET_CODE (op0
) == NEG
)
1939 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
1940 in2
= XEXP (op1
, 1);
1941 return simplify_gen_binary (MINUS
, mode
,
1942 simplify_gen_binary (MULT
, mode
,
1947 /* If one of the operands is a PLUS or a MINUS, see if we can
1948 simplify this by the associative law. This will, for example,
1949 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1950 Don't use the associative law for floating point.
1951 The inaccuracy makes it nonassociative,
1952 and subtle programs can break if operations are associated. */
1954 if (INTEGRAL_MODE_P (mode
)
1955 && (plus_minus_operand_p (op0
)
1956 || plus_minus_operand_p (op1
))
1957 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1962 if (trueop1
== constm1_rtx
)
1963 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1965 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1966 x is NaN, since x * 0 is then also NaN. Nor is it valid
1967 when the mode has signed zeros, since multiplying a negative
1968 number by 0 will give -0, not 0. */
1969 if (!HONOR_NANS (mode
)
1970 && !HONOR_SIGNED_ZEROS (mode
)
1971 && trueop1
== CONST0_RTX (mode
)
1972 && ! side_effects_p (op0
))
1975 /* In IEEE floating point, x*1 is not equivalent to x for
1977 if (!HONOR_SNANS (mode
)
1978 && trueop1
== CONST1_RTX (mode
))
1981 /* Convert multiply by constant power of two into shift unless
1982 we are still generating RTL. This test is a kludge. */
1983 if (GET_CODE (trueop1
) == CONST_INT
1984 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1985 /* If the mode is larger than the host word size, and the
1986 uppermost bit is set, then this isn't a power of two due
1987 to implicit sign extension. */
1988 && (width
<= HOST_BITS_PER_WIDE_INT
1989 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1990 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1992 /* Likewise for multipliers wider than a word. */
1993 if (GET_CODE (trueop1
) == CONST_DOUBLE
1994 && (GET_MODE (trueop1
) == VOIDmode
1995 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
1996 && GET_MODE (op0
) == mode
1997 && CONST_DOUBLE_LOW (trueop1
) == 0
1998 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
1999 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2000 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2002 /* x*2 is x+x and x*(-1) is -x */
2003 if (GET_CODE (trueop1
) == CONST_DOUBLE
2004 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2005 && GET_MODE (op0
) == mode
)
2008 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2010 if (REAL_VALUES_EQUAL (d
, dconst2
))
2011 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2013 if (!HONOR_SNANS (mode
)
2014 && REAL_VALUES_EQUAL (d
, dconstm1
))
2015 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2018 /* Optimize -x * -x as x * x. */
2019 if (FLOAT_MODE_P (mode
)
2020 && GET_CODE (op0
) == NEG
2021 && GET_CODE (op1
) == NEG
2022 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2023 && !side_effects_p (XEXP (op0
, 0)))
2024 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2026 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2027 if (SCALAR_FLOAT_MODE_P (mode
)
2028 && GET_CODE (op0
) == ABS
2029 && GET_CODE (op1
) == ABS
2030 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2031 && !side_effects_p (XEXP (op0
, 0)))
2032 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2034 /* Reassociate multiplication, but for floating point MULTs
2035 only when the user specifies unsafe math optimizations. */
2036 if (! FLOAT_MODE_P (mode
)
2037 || flag_unsafe_math_optimizations
)
2039 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2046 if (trueop1
== const0_rtx
)
2048 if (GET_CODE (trueop1
) == CONST_INT
2049 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2050 == GET_MODE_MASK (mode
)))
2052 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2054 /* A | (~A) -> -1 */
2055 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2056 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2057 && ! side_effects_p (op0
)
2058 && SCALAR_INT_MODE_P (mode
))
2061 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2062 if (GET_CODE (op1
) == CONST_INT
2063 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2064 && (nonzero_bits (op0
, mode
) & ~INTVAL (op1
)) == 0)
2067 /* Canonicalize (X & C1) | C2. */
2068 if (GET_CODE (op0
) == AND
2069 && GET_CODE (trueop1
) == CONST_INT
2070 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
)
2072 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2073 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2074 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2076 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2078 && !side_effects_p (XEXP (op0
, 0)))
2081 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2082 if (((c1
|c2
) & mask
) == mask
)
2083 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2085 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2086 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2088 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2089 gen_int_mode (c1
& ~c2
, mode
));
2090 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2094 /* Convert (A & B) | A to A. */
2095 if (GET_CODE (op0
) == AND
2096 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2097 || rtx_equal_p (XEXP (op0
, 1), op1
))
2098 && ! side_effects_p (XEXP (op0
, 0))
2099 && ! side_effects_p (XEXP (op0
, 1)))
2102 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2103 mode size to (rotate A CX). */
2105 if (GET_CODE (op1
) == ASHIFT
2106 || GET_CODE (op1
) == SUBREG
)
2117 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2118 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2119 && GET_CODE (XEXP (opleft
, 1)) == CONST_INT
2120 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2121 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2122 == GET_MODE_BITSIZE (mode
)))
2123 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2125 /* Same, but for ashift that has been "simplified" to a wider mode
2126 by simplify_shift_const. */
2128 if (GET_CODE (opleft
) == SUBREG
2129 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2130 && GET_CODE (opright
) == LSHIFTRT
2131 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2132 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2133 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2134 && (GET_MODE_SIZE (GET_MODE (opleft
))
2135 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2136 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2137 SUBREG_REG (XEXP (opright
, 0)))
2138 && GET_CODE (XEXP (SUBREG_REG (opleft
), 1)) == CONST_INT
2139 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2140 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2141 == GET_MODE_BITSIZE (mode
)))
2142 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2143 XEXP (SUBREG_REG (opleft
), 1));
2145 /* If we have (ior (and (X C1) C2)), simplify this by making
2146 C1 as small as possible if C1 actually changes. */
2147 if (GET_CODE (op1
) == CONST_INT
2148 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2149 || INTVAL (op1
) > 0)
2150 && GET_CODE (op0
) == AND
2151 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2152 && GET_CODE (op1
) == CONST_INT
2153 && (INTVAL (XEXP (op0
, 1)) & INTVAL (op1
)) != 0)
2154 return simplify_gen_binary (IOR
, mode
,
2156 (AND
, mode
, XEXP (op0
, 0),
2157 GEN_INT (INTVAL (XEXP (op0
, 1))
2161 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2162 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2163 the PLUS does not affect any of the bits in OP1: then we can do
2164 the IOR as a PLUS and we can associate. This is valid if OP1
2165 can be safely shifted left C bits. */
2166 if (GET_CODE (trueop1
) == CONST_INT
&& GET_CODE (op0
) == ASHIFTRT
2167 && GET_CODE (XEXP (op0
, 0)) == PLUS
2168 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == CONST_INT
2169 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2170 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2172 int count
= INTVAL (XEXP (op0
, 1));
2173 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2175 if (mask
>> count
== INTVAL (trueop1
)
2176 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2177 return simplify_gen_binary (ASHIFTRT
, mode
,
2178 plus_constant (XEXP (op0
, 0), mask
),
2182 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2188 if (trueop1
== const0_rtx
)
2190 if (GET_CODE (trueop1
) == CONST_INT
2191 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2192 == GET_MODE_MASK (mode
)))
2193 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2194 if (rtx_equal_p (trueop0
, trueop1
)
2195 && ! side_effects_p (op0
)
2196 && GET_MODE_CLASS (mode
) != MODE_CC
)
2197 return CONST0_RTX (mode
);
2199 /* Canonicalize XOR of the most significant bit to PLUS. */
2200 if ((GET_CODE (op1
) == CONST_INT
2201 || GET_CODE (op1
) == CONST_DOUBLE
)
2202 && mode_signbit_p (mode
, op1
))
2203 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2204 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2205 if ((GET_CODE (op1
) == CONST_INT
2206 || GET_CODE (op1
) == CONST_DOUBLE
)
2207 && GET_CODE (op0
) == PLUS
2208 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
2209 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2210 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2211 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2212 simplify_gen_binary (XOR
, mode
, op1
,
2215 /* If we are XORing two things that have no bits in common,
2216 convert them into an IOR. This helps to detect rotation encoded
2217 using those methods and possibly other simplifications. */
2219 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2220 && (nonzero_bits (op0
, mode
)
2221 & nonzero_bits (op1
, mode
)) == 0)
2222 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2224 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2225 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2228 int num_negated
= 0;
2230 if (GET_CODE (op0
) == NOT
)
2231 num_negated
++, op0
= XEXP (op0
, 0);
2232 if (GET_CODE (op1
) == NOT
)
2233 num_negated
++, op1
= XEXP (op1
, 0);
2235 if (num_negated
== 2)
2236 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2237 else if (num_negated
== 1)
2238 return simplify_gen_unary (NOT
, mode
,
2239 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2243 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2244 correspond to a machine insn or result in further simplifications
2245 if B is a constant. */
2247 if (GET_CODE (op0
) == AND
2248 && rtx_equal_p (XEXP (op0
, 1), op1
)
2249 && ! side_effects_p (op1
))
2250 return simplify_gen_binary (AND
, mode
,
2251 simplify_gen_unary (NOT
, mode
,
2252 XEXP (op0
, 0), mode
),
2255 else if (GET_CODE (op0
) == AND
2256 && rtx_equal_p (XEXP (op0
, 0), op1
)
2257 && ! side_effects_p (op1
))
2258 return simplify_gen_binary (AND
, mode
,
2259 simplify_gen_unary (NOT
, mode
,
2260 XEXP (op0
, 1), mode
),
2263 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2264 comparison if STORE_FLAG_VALUE is 1. */
2265 if (STORE_FLAG_VALUE
== 1
2266 && trueop1
== const1_rtx
2267 && COMPARISON_P (op0
)
2268 && (reversed
= reversed_comparison (op0
, mode
)))
2271 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2272 is (lt foo (const_int 0)), so we can perform the above
2273 simplification if STORE_FLAG_VALUE is 1. */
2275 if (STORE_FLAG_VALUE
== 1
2276 && trueop1
== const1_rtx
2277 && GET_CODE (op0
) == LSHIFTRT
2278 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2279 && INTVAL (XEXP (op0
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
2280 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2282 /* (xor (comparison foo bar) (const_int sign-bit))
2283 when STORE_FLAG_VALUE is the sign bit. */
2284 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2285 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
2286 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
2287 && trueop1
== const_true_rtx
2288 && COMPARISON_P (op0
)
2289 && (reversed
= reversed_comparison (op0
, mode
)))
2294 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2300 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2302 /* If we are turning off bits already known off in OP0, we need
2304 if (GET_CODE (trueop1
) == CONST_INT
2305 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2306 && (nonzero_bits (trueop0
, mode
) & ~INTVAL (trueop1
)) == 0)
2308 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2309 && GET_MODE_CLASS (mode
) != MODE_CC
)
2312 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2313 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2314 && ! side_effects_p (op0
)
2315 && GET_MODE_CLASS (mode
) != MODE_CC
)
2316 return CONST0_RTX (mode
);
2318 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2319 there are no nonzero bits of C outside of X's mode. */
2320 if ((GET_CODE (op0
) == SIGN_EXTEND
2321 || GET_CODE (op0
) == ZERO_EXTEND
)
2322 && GET_CODE (trueop1
) == CONST_INT
2323 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2324 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2325 & INTVAL (trueop1
)) == 0)
2327 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2328 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2329 gen_int_mode (INTVAL (trueop1
),
2331 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2334 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2335 if (GET_CODE (op0
) == IOR
2336 && GET_CODE (trueop1
) == CONST_INT
2337 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
)
2339 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2340 return simplify_gen_binary (IOR
, mode
,
2341 simplify_gen_binary (AND
, mode
,
2342 XEXP (op0
, 0), op1
),
2343 gen_int_mode (tmp
, mode
));
2346 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2347 insn (and may simplify more). */
2348 if (GET_CODE (op0
) == XOR
2349 && rtx_equal_p (XEXP (op0
, 0), op1
)
2350 && ! side_effects_p (op1
))
2351 return simplify_gen_binary (AND
, mode
,
2352 simplify_gen_unary (NOT
, mode
,
2353 XEXP (op0
, 1), mode
),
2356 if (GET_CODE (op0
) == XOR
2357 && rtx_equal_p (XEXP (op0
, 1), op1
)
2358 && ! side_effects_p (op1
))
2359 return simplify_gen_binary (AND
, mode
,
2360 simplify_gen_unary (NOT
, mode
,
2361 XEXP (op0
, 0), mode
),
2364 /* Similarly for (~(A ^ B)) & A. */
2365 if (GET_CODE (op0
) == NOT
2366 && GET_CODE (XEXP (op0
, 0)) == XOR
2367 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2368 && ! side_effects_p (op1
))
2369 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2371 if (GET_CODE (op0
) == NOT
2372 && GET_CODE (XEXP (op0
, 0)) == XOR
2373 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2374 && ! side_effects_p (op1
))
2375 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2377 /* Convert (A | B) & A to A. */
2378 if (GET_CODE (op0
) == IOR
2379 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2380 || rtx_equal_p (XEXP (op0
, 1), op1
))
2381 && ! side_effects_p (XEXP (op0
, 0))
2382 && ! side_effects_p (XEXP (op0
, 1)))
2385 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2386 ((A & N) + B) & M -> (A + B) & M
2387 Similarly if (N & M) == 0,
2388 ((A | N) + B) & M -> (A + B) & M
2389 and for - instead of + and/or ^ instead of |. */
2390 if (GET_CODE (trueop1
) == CONST_INT
2391 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2392 && ~INTVAL (trueop1
)
2393 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
2394 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2399 pmop
[0] = XEXP (op0
, 0);
2400 pmop
[1] = XEXP (op0
, 1);
2402 for (which
= 0; which
< 2; which
++)
2405 switch (GET_CODE (tem
))
2408 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2409 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
2410 == INTVAL (trueop1
))
2411 pmop
[which
] = XEXP (tem
, 0);
2415 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2416 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
2417 pmop
[which
] = XEXP (tem
, 0);
2424 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2426 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2428 return simplify_gen_binary (code
, mode
, tem
, op1
);
2431 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2437 /* 0/x is 0 (or x&0 if x has side-effects). */
2438 if (trueop0
== CONST0_RTX (mode
))
2440 if (side_effects_p (op1
))
2441 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2445 if (trueop1
== CONST1_RTX (mode
))
2446 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2447 /* Convert divide by power of two into shift. */
2448 if (GET_CODE (trueop1
) == CONST_INT
2449 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
2450 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2454 /* Handle floating point and integers separately. */
2455 if (SCALAR_FLOAT_MODE_P (mode
))
2457 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2458 safe for modes with NaNs, since 0.0 / 0.0 will then be
2459 NaN rather than 0.0. Nor is it safe for modes with signed
2460 zeros, since dividing 0 by a negative number gives -0.0 */
2461 if (trueop0
== CONST0_RTX (mode
)
2462 && !HONOR_NANS (mode
)
2463 && !HONOR_SIGNED_ZEROS (mode
)
2464 && ! side_effects_p (op1
))
2467 if (trueop1
== CONST1_RTX (mode
)
2468 && !HONOR_SNANS (mode
))
2471 if (GET_CODE (trueop1
) == CONST_DOUBLE
2472 && trueop1
!= CONST0_RTX (mode
))
2475 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2478 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2479 && !HONOR_SNANS (mode
))
2480 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2482 /* Change FP division by a constant into multiplication.
2483 Only do this with -funsafe-math-optimizations. */
2484 if (flag_unsafe_math_optimizations
2485 && !REAL_VALUES_EQUAL (d
, dconst0
))
2487 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2488 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2489 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2495 /* 0/x is 0 (or x&0 if x has side-effects). */
2496 if (trueop0
== CONST0_RTX (mode
))
2498 if (side_effects_p (op1
))
2499 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2503 if (trueop1
== CONST1_RTX (mode
))
2504 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2506 if (trueop1
== constm1_rtx
)
2508 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2509 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2515 /* 0%x is 0 (or x&0 if x has side-effects). */
2516 if (trueop0
== CONST0_RTX (mode
))
2518 if (side_effects_p (op1
))
2519 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2522 /* x%1 is 0 (of x&0 if x has side-effects). */
2523 if (trueop1
== CONST1_RTX (mode
))
2525 if (side_effects_p (op0
))
2526 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2527 return CONST0_RTX (mode
);
2529 /* Implement modulus by power of two as AND. */
2530 if (GET_CODE (trueop1
) == CONST_INT
2531 && exact_log2 (INTVAL (trueop1
)) > 0)
2532 return simplify_gen_binary (AND
, mode
, op0
,
2533 GEN_INT (INTVAL (op1
) - 1));
2537 /* 0%x is 0 (or x&0 if x has side-effects). */
2538 if (trueop0
== CONST0_RTX (mode
))
2540 if (side_effects_p (op1
))
2541 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2544 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2545 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
2547 if (side_effects_p (op0
))
2548 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2549 return CONST0_RTX (mode
);
2556 if (trueop1
== CONST0_RTX (mode
))
2558 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2560 /* Rotating ~0 always results in ~0. */
2561 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
2562 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2563 && ! side_effects_p (op1
))
2569 if (trueop1
== CONST0_RTX (mode
))
2571 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2576 if (trueop1
== CONST0_RTX (mode
))
2578 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2580 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2581 if (GET_CODE (op0
) == CLZ
2582 && GET_CODE (trueop1
) == CONST_INT
2583 && STORE_FLAG_VALUE
== 1
2584 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
2586 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2587 unsigned HOST_WIDE_INT zero_val
= 0;
2589 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
2590 && zero_val
== GET_MODE_BITSIZE (imode
)
2591 && INTVAL (trueop1
) == exact_log2 (zero_val
))
2592 return simplify_gen_relational (EQ
, mode
, imode
,
2593 XEXP (op0
, 0), const0_rtx
);
2598 if (width
<= HOST_BITS_PER_WIDE_INT
2599 && GET_CODE (trueop1
) == CONST_INT
2600 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2601 && ! side_effects_p (op0
))
2603 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2605 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2611 if (width
<= HOST_BITS_PER_WIDE_INT
2612 && GET_CODE (trueop1
) == CONST_INT
2613 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2614 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2615 && ! side_effects_p (op0
))
2617 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2619 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2625 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2627 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2629 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2635 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2637 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2639 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2648 /* ??? There are simplifications that can be done. */
2652 if (!VECTOR_MODE_P (mode
))
2654 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2655 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2656 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2657 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2658 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
2660 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2661 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2666 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2667 gcc_assert (GET_MODE_INNER (mode
)
2668 == GET_MODE_INNER (GET_MODE (trueop0
)));
2669 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2671 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2673 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2674 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2675 rtvec v
= rtvec_alloc (n_elts
);
2678 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2679 for (i
= 0; i
< n_elts
; i
++)
2681 rtx x
= XVECEXP (trueop1
, 0, i
);
2683 gcc_assert (GET_CODE (x
) == CONST_INT
);
2684 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2688 return gen_rtx_CONST_VECTOR (mode
, v
);
2692 if (XVECLEN (trueop1
, 0) == 1
2693 && GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
2694 && GET_CODE (trueop0
) == VEC_CONCAT
)
2697 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
2699 /* Try to find the element in the VEC_CONCAT. */
2700 while (GET_MODE (vec
) != mode
2701 && GET_CODE (vec
) == VEC_CONCAT
)
2703 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
2704 if (offset
< vec_size
)
2705 vec
= XEXP (vec
, 0);
2709 vec
= XEXP (vec
, 1);
2711 vec
= avoid_constant_pool_reference (vec
);
2714 if (GET_MODE (vec
) == mode
)
2721 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2722 ? GET_MODE (trueop0
)
2723 : GET_MODE_INNER (mode
));
2724 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2725 ? GET_MODE (trueop1
)
2726 : GET_MODE_INNER (mode
));
2728 gcc_assert (VECTOR_MODE_P (mode
));
2729 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2730 == GET_MODE_SIZE (mode
));
2732 if (VECTOR_MODE_P (op0_mode
))
2733 gcc_assert (GET_MODE_INNER (mode
)
2734 == GET_MODE_INNER (op0_mode
));
2736 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2738 if (VECTOR_MODE_P (op1_mode
))
2739 gcc_assert (GET_MODE_INNER (mode
)
2740 == GET_MODE_INNER (op1_mode
));
2742 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2744 if ((GET_CODE (trueop0
) == CONST_VECTOR
2745 || GET_CODE (trueop0
) == CONST_INT
2746 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2747 && (GET_CODE (trueop1
) == CONST_VECTOR
2748 || GET_CODE (trueop1
) == CONST_INT
2749 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2751 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2752 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2753 rtvec v
= rtvec_alloc (n_elts
);
2755 unsigned in_n_elts
= 1;
2757 if (VECTOR_MODE_P (op0_mode
))
2758 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2759 for (i
= 0; i
< n_elts
; i
++)
2763 if (!VECTOR_MODE_P (op0_mode
))
2764 RTVEC_ELT (v
, i
) = trueop0
;
2766 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2770 if (!VECTOR_MODE_P (op1_mode
))
2771 RTVEC_ELT (v
, i
) = trueop1
;
2773 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2778 return gen_rtx_CONST_VECTOR (mode
, v
);
2791 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2794 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
2796 unsigned int width
= GET_MODE_BITSIZE (mode
);
2798 if (VECTOR_MODE_P (mode
)
2799 && code
!= VEC_CONCAT
2800 && GET_CODE (op0
) == CONST_VECTOR
2801 && GET_CODE (op1
) == CONST_VECTOR
)
2803 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2804 enum machine_mode op0mode
= GET_MODE (op0
);
2805 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
2806 enum machine_mode op1mode
= GET_MODE (op1
);
2807 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
2808 rtvec v
= rtvec_alloc (n_elts
);
2811 gcc_assert (op0_n_elts
== n_elts
);
2812 gcc_assert (op1_n_elts
== n_elts
);
2813 for (i
= 0; i
< n_elts
; i
++)
2815 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
2816 CONST_VECTOR_ELT (op0
, i
),
2817 CONST_VECTOR_ELT (op1
, i
));
2820 RTVEC_ELT (v
, i
) = x
;
2823 return gen_rtx_CONST_VECTOR (mode
, v
);
2826 if (VECTOR_MODE_P (mode
)
2827 && code
== VEC_CONCAT
2828 && CONSTANT_P (op0
) && CONSTANT_P (op1
))
2830 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2831 rtvec v
= rtvec_alloc (n_elts
);
2833 gcc_assert (n_elts
>= 2);
2836 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
2837 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
2839 RTVEC_ELT (v
, 0) = op0
;
2840 RTVEC_ELT (v
, 1) = op1
;
2844 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
2845 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
2848 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
2849 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
2850 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
2852 for (i
= 0; i
< op0_n_elts
; ++i
)
2853 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
2854 for (i
= 0; i
< op1_n_elts
; ++i
)
2855 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
2858 return gen_rtx_CONST_VECTOR (mode
, v
);
2861 if (SCALAR_FLOAT_MODE_P (mode
)
2862 && GET_CODE (op0
) == CONST_DOUBLE
2863 && GET_CODE (op1
) == CONST_DOUBLE
2864 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
2875 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
2877 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
2879 for (i
= 0; i
< 4; i
++)
2896 real_from_target (&r
, tmp0
, mode
);
2897 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
2901 REAL_VALUE_TYPE f0
, f1
, value
, result
;
2904 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
2905 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
2906 real_convert (&f0
, mode
, &f0
);
2907 real_convert (&f1
, mode
, &f1
);
2909 if (HONOR_SNANS (mode
)
2910 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
2914 && REAL_VALUES_EQUAL (f1
, dconst0
)
2915 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
2918 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2919 && flag_trapping_math
2920 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
2922 int s0
= REAL_VALUE_NEGATIVE (f0
);
2923 int s1
= REAL_VALUE_NEGATIVE (f1
);
2928 /* Inf + -Inf = NaN plus exception. */
2933 /* Inf - Inf = NaN plus exception. */
2938 /* Inf / Inf = NaN plus exception. */
2945 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2946 && flag_trapping_math
2947 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
2948 || (REAL_VALUE_ISINF (f1
)
2949 && REAL_VALUES_EQUAL (f0
, dconst0
))))
2950 /* Inf * 0 = NaN plus exception. */
2953 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
2955 real_convert (&result
, mode
, &value
);
2957 /* Don't constant fold this floating point operation if
2958 the result has overflowed and flag_trapping_math. */
2960 if (flag_trapping_math
2961 && MODE_HAS_INFINITIES (mode
)
2962 && REAL_VALUE_ISINF (result
)
2963 && !REAL_VALUE_ISINF (f0
)
2964 && !REAL_VALUE_ISINF (f1
))
2965 /* Overflow plus exception. */
2968 /* Don't constant fold this floating point operation if the
2969 result may dependent upon the run-time rounding mode and
2970 flag_rounding_math is set, or if GCC's software emulation
2971 is unable to accurately represent the result. */
2973 if ((flag_rounding_math
2974 || (REAL_MODE_FORMAT_COMPOSITE_P (mode
)
2975 && !flag_unsafe_math_optimizations
))
2976 && (inexact
|| !real_identical (&result
, &value
)))
2979 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
2983 /* We can fold some multi-word operations. */
2984 if (GET_MODE_CLASS (mode
) == MODE_INT
2985 && width
== HOST_BITS_PER_WIDE_INT
* 2
2986 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
2987 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
2989 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
2990 HOST_WIDE_INT h1
, h2
, hv
, ht
;
2992 if (GET_CODE (op0
) == CONST_DOUBLE
)
2993 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
2995 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
2997 if (GET_CODE (op1
) == CONST_DOUBLE
)
2998 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
3000 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
3005 /* A - B == A + (-B). */
3006 neg_double (l2
, h2
, &lv
, &hv
);
3009 /* Fall through.... */
3012 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3016 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3020 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3021 &lv
, &hv
, <
, &ht
))
3026 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3027 <
, &ht
, &lv
, &hv
))
3032 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3033 &lv
, &hv
, <
, &ht
))
3038 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3039 <
, &ht
, &lv
, &hv
))
3044 lv
= l1
& l2
, hv
= h1
& h2
;
3048 lv
= l1
| l2
, hv
= h1
| h2
;
3052 lv
= l1
^ l2
, hv
= h1
^ h2
;
3058 && ((unsigned HOST_WIDE_INT
) l1
3059 < (unsigned HOST_WIDE_INT
) l2
)))
3068 && ((unsigned HOST_WIDE_INT
) l1
3069 > (unsigned HOST_WIDE_INT
) l2
)))
3076 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
3078 && ((unsigned HOST_WIDE_INT
) l1
3079 < (unsigned HOST_WIDE_INT
) l2
)))
3086 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
3088 && ((unsigned HOST_WIDE_INT
) l1
3089 > (unsigned HOST_WIDE_INT
) l2
)))
3095 case LSHIFTRT
: case ASHIFTRT
:
3097 case ROTATE
: case ROTATERT
:
3098 if (SHIFT_COUNT_TRUNCATED
)
3099 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
3101 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
3104 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3105 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
3107 else if (code
== ASHIFT
)
3108 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
3109 else if (code
== ROTATE
)
3110 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3111 else /* code == ROTATERT */
3112 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3119 return immed_double_const (lv
, hv
, mode
);
3122 if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
3123 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3125 /* Get the integer argument values in two forms:
3126 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3128 arg0
= INTVAL (op0
);
3129 arg1
= INTVAL (op1
);
3131 if (width
< HOST_BITS_PER_WIDE_INT
)
3133 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3134 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3137 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3138 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3141 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3142 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3150 /* Compute the value of the arithmetic. */
3155 val
= arg0s
+ arg1s
;
3159 val
= arg0s
- arg1s
;
3163 val
= arg0s
* arg1s
;
3168 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3171 val
= arg0s
/ arg1s
;
3176 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3179 val
= arg0s
% arg1s
;
3184 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3187 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3192 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3195 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3213 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3214 the value is in range. We can't return any old value for
3215 out-of-range arguments because either the middle-end (via
3216 shift_truncation_mask) or the back-end might be relying on
3217 target-specific knowledge. Nor can we rely on
3218 shift_truncation_mask, since the shift might not be part of an
3219 ashlM3, lshrM3 or ashrM3 instruction. */
3220 if (SHIFT_COUNT_TRUNCATED
)
3221 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3222 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3225 val
= (code
== ASHIFT
3226 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3227 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3229 /* Sign-extend the result for arithmetic right shifts. */
3230 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3231 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
3239 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3240 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3248 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3249 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3253 /* Do nothing here. */
3257 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3261 val
= ((unsigned HOST_WIDE_INT
) arg0
3262 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3266 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3270 val
= ((unsigned HOST_WIDE_INT
) arg0
3271 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3279 /* ??? There are simplifications that can be done. */
3286 return gen_int_mode (val
, mode
);
3294 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3297 Rather than test for specific case, we do this by a brute-force method
3298 and do all possible simplifications until no more changes occur. Then
3299 we rebuild the operation. */
3301 struct simplify_plus_minus_op_data
3308 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
3310 const struct simplify_plus_minus_op_data
*d1
= p1
;
3311 const struct simplify_plus_minus_op_data
*d2
= p2
;
3314 result
= (commutative_operand_precedence (d2
->op
)
3315 - commutative_operand_precedence (d1
->op
));
3319 /* Group together equal REGs to do more simplification. */
3320 if (REG_P (d1
->op
) && REG_P (d2
->op
))
3321 return REGNO (d1
->op
) - REGNO (d2
->op
);
3327 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3330 struct simplify_plus_minus_op_data ops
[8];
3332 int n_ops
= 2, input_ops
= 2;
3333 int changed
, n_constants
= 0, canonicalized
= 0;
3336 memset (ops
, 0, sizeof ops
);
3338 /* Set up the two operands and then expand them until nothing has been
3339 changed. If we run out of room in our array, give up; this should
3340 almost never happen. */
3345 ops
[1].neg
= (code
== MINUS
);
3351 for (i
= 0; i
< n_ops
; i
++)
3353 rtx this_op
= ops
[i
].op
;
3354 int this_neg
= ops
[i
].neg
;
3355 enum rtx_code this_code
= GET_CODE (this_op
);
3364 ops
[n_ops
].op
= XEXP (this_op
, 1);
3365 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3368 ops
[i
].op
= XEXP (this_op
, 0);
3371 canonicalized
|= this_neg
;
3375 ops
[i
].op
= XEXP (this_op
, 0);
3376 ops
[i
].neg
= ! this_neg
;
3383 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3384 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3385 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3387 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3388 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3389 ops
[n_ops
].neg
= this_neg
;
3397 /* ~a -> (-a - 1) */
3400 ops
[n_ops
].op
= constm1_rtx
;
3401 ops
[n_ops
++].neg
= this_neg
;
3402 ops
[i
].op
= XEXP (this_op
, 0);
3403 ops
[i
].neg
= !this_neg
;
3413 ops
[i
].op
= neg_const_int (mode
, this_op
);
3427 if (n_constants
> 1)
3430 gcc_assert (n_ops
>= 2);
3432 /* If we only have two operands, we can avoid the loops. */
3435 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3438 /* Get the two operands. Be careful with the order, especially for
3439 the cases where code == MINUS. */
3440 if (ops
[0].neg
&& ops
[1].neg
)
3442 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3445 else if (ops
[0].neg
)
3456 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
3459 /* Now simplify each pair of operands until nothing changes. */
3462 /* Insertion sort is good enough for an eight-element array. */
3463 for (i
= 1; i
< n_ops
; i
++)
3465 struct simplify_plus_minus_op_data save
;
3467 if (simplify_plus_minus_op_data_cmp (&ops
[j
], &ops
[i
]) < 0)
3473 ops
[j
+ 1] = ops
[j
];
3474 while (j
-- && simplify_plus_minus_op_data_cmp (&ops
[j
], &save
) > 0);
3478 /* This is only useful the first time through. */
3483 for (i
= n_ops
- 1; i
> 0; i
--)
3484 for (j
= i
- 1; j
>= 0; j
--)
3486 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
3487 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
3489 if (lhs
!= 0 && rhs
!= 0)
3491 enum rtx_code ncode
= PLUS
;
3497 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3499 else if (swap_commutative_operands_p (lhs
, rhs
))
3500 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3502 if ((GET_CODE (lhs
) == CONST
|| GET_CODE (lhs
) == CONST_INT
)
3503 && (GET_CODE (rhs
) == CONST
|| GET_CODE (rhs
) == CONST_INT
))
3505 rtx tem_lhs
, tem_rhs
;
3507 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
3508 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
3509 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
3511 if (tem
&& !CONSTANT_P (tem
))
3512 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
3515 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
3517 /* Reject "simplifications" that just wrap the two
3518 arguments in a CONST. Failure to do so can result
3519 in infinite recursion with simplify_binary_operation
3520 when it calls us to simplify CONST operations. */
3522 && ! (GET_CODE (tem
) == CONST
3523 && GET_CODE (XEXP (tem
, 0)) == ncode
3524 && XEXP (XEXP (tem
, 0), 0) == lhs
3525 && XEXP (XEXP (tem
, 0), 1) == rhs
))
3528 if (GET_CODE (tem
) == NEG
)
3529 tem
= XEXP (tem
, 0), lneg
= !lneg
;
3530 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
3531 tem
= neg_const_int (mode
, tem
), lneg
= 0;
3535 ops
[j
].op
= NULL_RTX
;
3541 /* Pack all the operands to the lower-numbered entries. */
3542 for (i
= 0, j
= 0; j
< n_ops
; j
++)
3552 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3554 && GET_CODE (ops
[1].op
) == CONST_INT
3555 && CONSTANT_P (ops
[0].op
)
3557 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
3559 /* We suppressed creation of trivial CONST expressions in the
3560 combination loop to avoid recursion. Create one manually now.
3561 The combination loop should have ensured that there is exactly
3562 one CONST_INT, and the sort will have ensured that it is last
3563 in the array and that any other constant will be next-to-last. */
3566 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
3567 && CONSTANT_P (ops
[n_ops
- 2].op
))
3569 rtx value
= ops
[n_ops
- 1].op
;
3570 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
3571 value
= neg_const_int (mode
, value
);
3572 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
3576 /* Put a non-negated operand first, if possible. */
3578 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
3581 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
3590 /* Now make the result by performing the requested operations. */
3592 for (i
= 1; i
< n_ops
; i
++)
3593 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
3594 mode
, result
, ops
[i
].op
);
3599 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3601 plus_minus_operand_p (rtx x
)
3603 return GET_CODE (x
) == PLUS
3604 || GET_CODE (x
) == MINUS
3605 || (GET_CODE (x
) == CONST
3606 && GET_CODE (XEXP (x
, 0)) == PLUS
3607 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
3608 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
3611 /* Like simplify_binary_operation except used for relational operators.
3612 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3613 not also be VOIDmode.
3615 CMP_MODE specifies in which mode the comparison is done in, so it is
3616 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3617 the operands or, if both are VOIDmode, the operands are compared in
3618 "infinite precision". */
3620 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
3621 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3623 rtx tem
, trueop0
, trueop1
;
3625 if (cmp_mode
== VOIDmode
)
3626 cmp_mode
= GET_MODE (op0
);
3627 if (cmp_mode
== VOIDmode
)
3628 cmp_mode
= GET_MODE (op1
);
3630 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
3633 if (SCALAR_FLOAT_MODE_P (mode
))
3635 if (tem
== const0_rtx
)
3636 return CONST0_RTX (mode
);
3637 #ifdef FLOAT_STORE_FLAG_VALUE
3639 REAL_VALUE_TYPE val
;
3640 val
= FLOAT_STORE_FLAG_VALUE (mode
);
3641 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
3647 if (VECTOR_MODE_P (mode
))
3649 if (tem
== const0_rtx
)
3650 return CONST0_RTX (mode
);
3651 #ifdef VECTOR_STORE_FLAG_VALUE
3656 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
3657 if (val
== NULL_RTX
)
3659 if (val
== const1_rtx
)
3660 return CONST1_RTX (mode
);
3662 units
= GET_MODE_NUNITS (mode
);
3663 v
= rtvec_alloc (units
);
3664 for (i
= 0; i
< units
; i
++)
3665 RTVEC_ELT (v
, i
) = val
;
3666 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
3676 /* For the following tests, ensure const0_rtx is op1. */
3677 if (swap_commutative_operands_p (op0
, op1
)
3678 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
3679 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
3681 /* If op0 is a compare, extract the comparison arguments from it. */
3682 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3683 return simplify_relational_operation (code
, mode
, VOIDmode
,
3684 XEXP (op0
, 0), XEXP (op0
, 1));
3686 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
3690 trueop0
= avoid_constant_pool_reference (op0
);
3691 trueop1
= avoid_constant_pool_reference (op1
);
3692 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
3696 /* This part of simplify_relational_operation is only used when CMP_MODE
3697 is not in class MODE_CC (i.e. it is a real comparison).
3699 MODE is the mode of the result, while CMP_MODE specifies in which
3700 mode the comparison is done in, so it is the mode of the operands. */
3703 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
3704 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3706 enum rtx_code op0code
= GET_CODE (op0
);
3708 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
3710 /* If op0 is a comparison, extract the comparison arguments
3714 if (GET_MODE (op0
) == mode
)
3715 return simplify_rtx (op0
);
3717 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
3718 XEXP (op0
, 0), XEXP (op0
, 1));
3720 else if (code
== EQ
)
3722 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
3723 if (new_code
!= UNKNOWN
)
3724 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
3725 XEXP (op0
, 0), XEXP (op0
, 1));
3729 if (op1
== const0_rtx
)
3731 /* Canonicalize (GTU x 0) as (NE x 0). */
3733 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
3734 /* Canonicalize (LEU x 0) as (EQ x 0). */
3736 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
3738 else if (op1
== const1_rtx
)
3743 /* Canonicalize (GE x 1) as (GT x 0). */
3744 return simplify_gen_relational (GT
, mode
, cmp_mode
,
3747 /* Canonicalize (GEU x 1) as (NE x 0). */
3748 return simplify_gen_relational (NE
, mode
, cmp_mode
,
3751 /* Canonicalize (LT x 1) as (LE x 0). */
3752 return simplify_gen_relational (LE
, mode
, cmp_mode
,
3755 /* Canonicalize (LTU x 1) as (EQ x 0). */
3756 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
3762 else if (op1
== constm1_rtx
)
3764 /* Canonicalize (LE x -1) as (LT x 0). */
3766 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
3767 /* Canonicalize (GT x -1) as (GE x 0). */
3769 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
3772 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3773 if ((code
== EQ
|| code
== NE
)
3774 && (op0code
== PLUS
|| op0code
== MINUS
)
3776 && CONSTANT_P (XEXP (op0
, 1))
3777 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
3779 rtx x
= XEXP (op0
, 0);
3780 rtx c
= XEXP (op0
, 1);
3782 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
3784 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
3787 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3788 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3790 && op1
== const0_rtx
3791 && GET_MODE_CLASS (mode
) == MODE_INT
3792 && cmp_mode
!= VOIDmode
3793 /* ??? Work-around BImode bugs in the ia64 backend. */
3795 && cmp_mode
!= BImode
3796 && nonzero_bits (op0
, cmp_mode
) == 1
3797 && STORE_FLAG_VALUE
== 1)
3798 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
3799 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
3800 : lowpart_subreg (mode
, op0
, cmp_mode
);
3802 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3803 if ((code
== EQ
|| code
== NE
)
3804 && op1
== const0_rtx
3806 return simplify_gen_relational (code
, mode
, cmp_mode
,
3807 XEXP (op0
, 0), XEXP (op0
, 1));
3809 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3810 if ((code
== EQ
|| code
== NE
)
3812 && rtx_equal_p (XEXP (op0
, 0), op1
)
3813 && !side_effects_p (XEXP (op0
, 0)))
3814 return simplify_gen_relational (code
, mode
, cmp_mode
,
3815 XEXP (op0
, 1), const0_rtx
);
3817 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3818 if ((code
== EQ
|| code
== NE
)
3820 && rtx_equal_p (XEXP (op0
, 1), op1
)
3821 && !side_effects_p (XEXP (op0
, 1)))
3822 return simplify_gen_relational (code
, mode
, cmp_mode
,
3823 XEXP (op0
, 0), const0_rtx
);
3825 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3826 if ((code
== EQ
|| code
== NE
)
3828 && (GET_CODE (op1
) == CONST_INT
3829 || GET_CODE (op1
) == CONST_DOUBLE
)
3830 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
3831 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
3832 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
3833 simplify_gen_binary (XOR
, cmp_mode
,
3834 XEXP (op0
, 1), op1
));
3836 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
3842 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3843 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
3844 XEXP (op0
, 0), const0_rtx
);
3849 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3850 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
3851 XEXP (op0
, 0), const0_rtx
);
3860 /* Check if the given comparison (done in the given MODE) is actually a
3861 tautology or a contradiction.
3862 If no simplification is possible, this function returns zero.
3863 Otherwise, it returns either const_true_rtx or const0_rtx. */
3866 simplify_const_relational_operation (enum rtx_code code
,
3867 enum machine_mode mode
,
3870 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
3875 gcc_assert (mode
!= VOIDmode
3876 || (GET_MODE (op0
) == VOIDmode
3877 && GET_MODE (op1
) == VOIDmode
));
3879 /* If op0 is a compare, extract the comparison arguments from it. */
3880 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3882 op1
= XEXP (op0
, 1);
3883 op0
= XEXP (op0
, 0);
3885 if (GET_MODE (op0
) != VOIDmode
)
3886 mode
= GET_MODE (op0
);
3887 else if (GET_MODE (op1
) != VOIDmode
)
3888 mode
= GET_MODE (op1
);
3893 /* We can't simplify MODE_CC values since we don't know what the
3894 actual comparison is. */
3895 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
3898 /* Make sure the constant is second. */
3899 if (swap_commutative_operands_p (op0
, op1
))
3901 tem
= op0
, op0
= op1
, op1
= tem
;
3902 code
= swap_condition (code
);
3905 trueop0
= avoid_constant_pool_reference (op0
);
3906 trueop1
= avoid_constant_pool_reference (op1
);
3908 /* For integer comparisons of A and B maybe we can simplify A - B and can
3909 then simplify a comparison of that with zero. If A and B are both either
3910 a register or a CONST_INT, this can't help; testing for these cases will
3911 prevent infinite recursion here and speed things up.
3913 We can only do this for EQ and NE comparisons as otherwise we may
3914 lose or introduce overflow which we cannot disregard as undefined as
3915 we do not know the signedness of the operation on either the left or
3916 the right hand side of the comparison. */
3918 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
3919 && (code
== EQ
|| code
== NE
)
3920 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
3921 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
3922 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
3923 /* We cannot do this if tem is a nonzero address. */
3924 && ! nonzero_address_p (tem
))
3925 return simplify_const_relational_operation (signed_condition (code
),
3926 mode
, tem
, const0_rtx
);
3928 if (! HONOR_NANS (mode
) && code
== ORDERED
)
3929 return const_true_rtx
;
3931 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
3934 /* For modes without NaNs, if the two operands are equal, we know the
3935 result except if they have side-effects. */
3936 if (! HONOR_NANS (GET_MODE (trueop0
))
3937 && rtx_equal_p (trueop0
, trueop1
)
3938 && ! side_effects_p (trueop0
))
3939 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
3941 /* If the operands are floating-point constants, see if we can fold
3943 else if (GET_CODE (trueop0
) == CONST_DOUBLE
3944 && GET_CODE (trueop1
) == CONST_DOUBLE
3945 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
3947 REAL_VALUE_TYPE d0
, d1
;
3949 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
3950 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
3952 /* Comparisons are unordered iff at least one of the values is NaN. */
3953 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
3963 return const_true_rtx
;
3976 equal
= REAL_VALUES_EQUAL (d0
, d1
);
3977 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
3978 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
3981 /* Otherwise, see if the operands are both integers. */
3982 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
3983 && (GET_CODE (trueop0
) == CONST_DOUBLE
3984 || GET_CODE (trueop0
) == CONST_INT
)
3985 && (GET_CODE (trueop1
) == CONST_DOUBLE
3986 || GET_CODE (trueop1
) == CONST_INT
))
3988 int width
= GET_MODE_BITSIZE (mode
);
3989 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
3990 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
3992 /* Get the two words comprising each integer constant. */
3993 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
3995 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
3996 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4000 l0u
= l0s
= INTVAL (trueop0
);
4001 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4004 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
4006 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4007 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4011 l1u
= l1s
= INTVAL (trueop1
);
4012 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4015 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4016 we have to sign or zero-extend the values. */
4017 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4019 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4020 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4022 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4023 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
4025 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4026 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
4028 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4029 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4031 equal
= (h0u
== h1u
&& l0u
== l1u
);
4032 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
4033 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
4034 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
4035 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
4038 /* Otherwise, there are some code-specific tests we can make. */
4041 /* Optimize comparisons with upper and lower bounds. */
4042 if (SCALAR_INT_MODE_P (mode
)
4043 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
4056 get_mode_bounds (mode
, sign
, mode
, &mmin
, &mmax
);
4063 /* x >= min is always true. */
4064 if (rtx_equal_p (trueop1
, mmin
))
4065 tem
= const_true_rtx
;
4071 /* x <= max is always true. */
4072 if (rtx_equal_p (trueop1
, mmax
))
4073 tem
= const_true_rtx
;
4078 /* x > max is always false. */
4079 if (rtx_equal_p (trueop1
, mmax
))
4085 /* x < min is always false. */
4086 if (rtx_equal_p (trueop1
, mmin
))
4093 if (tem
== const0_rtx
4094 || tem
== const_true_rtx
)
4101 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
4106 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
4107 return const_true_rtx
;
4111 /* Optimize abs(x) < 0.0. */
4112 if (trueop1
== CONST0_RTX (mode
)
4113 && !HONOR_SNANS (mode
)
4114 && (!INTEGRAL_MODE_P (mode
)
4115 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4117 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
4119 if (GET_CODE (tem
) == ABS
)
4121 if (INTEGRAL_MODE_P (mode
)
4122 && (issue_strict_overflow_warning
4123 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4124 warning (OPT_Wstrict_overflow
,
4125 ("assuming signed overflow does not occur when "
4126 "assuming abs (x) < 0 is false"));
4131 /* Optimize popcount (x) < 0. */
4132 if (GET_CODE (trueop0
) == POPCOUNT
&& trueop1
== const0_rtx
)
4133 return const_true_rtx
;
4137 /* Optimize abs(x) >= 0.0. */
4138 if (trueop1
== CONST0_RTX (mode
)
4139 && !HONOR_NANS (mode
)
4140 && (!INTEGRAL_MODE_P (mode
)
4141 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4143 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
4145 if (GET_CODE (tem
) == ABS
)
4147 if (INTEGRAL_MODE_P (mode
)
4148 && (issue_strict_overflow_warning
4149 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4150 warning (OPT_Wstrict_overflow
,
4151 ("assuming signed overflow does not occur when "
4152 "assuming abs (x) >= 0 is true"));
4153 return const_true_rtx
;
4157 /* Optimize popcount (x) >= 0. */
4158 if (GET_CODE (trueop0
) == POPCOUNT
&& trueop1
== const0_rtx
)
4159 return const_true_rtx
;
4163 /* Optimize ! (abs(x) < 0.0). */
4164 if (trueop1
== CONST0_RTX (mode
))
4166 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
4168 if (GET_CODE (tem
) == ABS
)
4169 return const_true_rtx
;
4180 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4186 return equal
? const_true_rtx
: const0_rtx
;
4189 return ! equal
? const_true_rtx
: const0_rtx
;
4192 return op0lt
? const_true_rtx
: const0_rtx
;
4195 return op1lt
? const_true_rtx
: const0_rtx
;
4197 return op0ltu
? const_true_rtx
: const0_rtx
;
4199 return op1ltu
? const_true_rtx
: const0_rtx
;
4202 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
4205 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
4207 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
4209 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
4211 return const_true_rtx
;
4219 /* Simplify CODE, an operation with result mode MODE and three operands,
4220 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4221 a constant. Return 0 if no simplifications is possible. */
4224 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4225 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4228 unsigned int width
= GET_MODE_BITSIZE (mode
);
4230 /* VOIDmode means "infinite" precision. */
4232 width
= HOST_BITS_PER_WIDE_INT
;
4238 if (GET_CODE (op0
) == CONST_INT
4239 && GET_CODE (op1
) == CONST_INT
4240 && GET_CODE (op2
) == CONST_INT
4241 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4242 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4244 /* Extracting a bit-field from a constant */
4245 HOST_WIDE_INT val
= INTVAL (op0
);
4247 if (BITS_BIG_ENDIAN
)
4248 val
>>= (GET_MODE_BITSIZE (op0_mode
)
4249 - INTVAL (op2
) - INTVAL (op1
));
4251 val
>>= INTVAL (op2
);
4253 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
4255 /* First zero-extend. */
4256 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
4257 /* If desired, propagate sign bit. */
4258 if (code
== SIGN_EXTRACT
4259 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
4260 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
4263 /* Clear the bits that don't belong in our mode,
4264 unless they and our sign bit are all one.
4265 So we get either a reasonable negative value or a reasonable
4266 unsigned value for this mode. */
4267 if (width
< HOST_BITS_PER_WIDE_INT
4268 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
4269 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
4270 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4272 return gen_int_mode (val
, mode
);
4277 if (GET_CODE (op0
) == CONST_INT
)
4278 return op0
!= const0_rtx
? op1
: op2
;
4280 /* Convert c ? a : a into "a". */
4281 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4284 /* Convert a != b ? a : b into "a". */
4285 if (GET_CODE (op0
) == NE
4286 && ! side_effects_p (op0
)
4287 && ! HONOR_NANS (mode
)
4288 && ! HONOR_SIGNED_ZEROS (mode
)
4289 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4290 && rtx_equal_p (XEXP (op0
, 1), op2
))
4291 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4292 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4295 /* Convert a == b ? a : b into "b". */
4296 if (GET_CODE (op0
) == EQ
4297 && ! side_effects_p (op0
)
4298 && ! HONOR_NANS (mode
)
4299 && ! HONOR_SIGNED_ZEROS (mode
)
4300 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4301 && rtx_equal_p (XEXP (op0
, 1), op2
))
4302 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4303 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4306 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
4308 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
4309 ? GET_MODE (XEXP (op0
, 1))
4310 : GET_MODE (XEXP (op0
, 0)));
4313 /* Look for happy constants in op1 and op2. */
4314 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
4316 HOST_WIDE_INT t
= INTVAL (op1
);
4317 HOST_WIDE_INT f
= INTVAL (op2
);
4319 if (t
== STORE_FLAG_VALUE
&& f
== 0)
4320 code
= GET_CODE (op0
);
4321 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
4324 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
4332 return simplify_gen_relational (code
, mode
, cmp_mode
,
4333 XEXP (op0
, 0), XEXP (op0
, 1));
4336 if (cmp_mode
== VOIDmode
)
4337 cmp_mode
= op0_mode
;
4338 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
4339 cmp_mode
, XEXP (op0
, 0),
4342 /* See if any simplifications were possible. */
4345 if (GET_CODE (temp
) == CONST_INT
)
4346 return temp
== const0_rtx
? op2
: op1
;
4348 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
4354 gcc_assert (GET_MODE (op0
) == mode
);
4355 gcc_assert (GET_MODE (op1
) == mode
);
4356 gcc_assert (VECTOR_MODE_P (mode
));
4357 op2
= avoid_constant_pool_reference (op2
);
4358 if (GET_CODE (op2
) == CONST_INT
)
4360 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
4361 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
4362 int mask
= (1 << n_elts
) - 1;
4364 if (!(INTVAL (op2
) & mask
))
4366 if ((INTVAL (op2
) & mask
) == mask
)
4369 op0
= avoid_constant_pool_reference (op0
);
4370 op1
= avoid_constant_pool_reference (op1
);
4371 if (GET_CODE (op0
) == CONST_VECTOR
4372 && GET_CODE (op1
) == CONST_VECTOR
)
4374 rtvec v
= rtvec_alloc (n_elts
);
4377 for (i
= 0; i
< n_elts
; i
++)
4378 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
4379 ? CONST_VECTOR_ELT (op0
, i
)
4380 : CONST_VECTOR_ELT (op1
, i
));
4381 return gen_rtx_CONST_VECTOR (mode
, v
);
4393 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4394 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4396 Works by unpacking OP into a collection of 8-bit values
4397 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4398 and then repacking them again for OUTERMODE. */
4401 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
4402 enum machine_mode innermode
, unsigned int byte
)
4404 /* We support up to 512-bit values (for V8DFmode). */
4408 value_mask
= (1 << value_bit
) - 1
4410 unsigned char value
[max_bitsize
/ value_bit
];
4419 rtvec result_v
= NULL
;
4420 enum mode_class outer_class
;
4421 enum machine_mode outer_submode
;
4423 /* Some ports misuse CCmode. */
4424 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
4427 /* We have no way to represent a complex constant at the rtl level. */
4428 if (COMPLEX_MODE_P (outermode
))
4431 /* Unpack the value. */
4433 if (GET_CODE (op
) == CONST_VECTOR
)
4435 num_elem
= CONST_VECTOR_NUNITS (op
);
4436 elems
= &CONST_VECTOR_ELT (op
, 0);
4437 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
4443 elem_bitsize
= max_bitsize
;
4445 /* If this asserts, it is too complicated; reducing value_bit may help. */
4446 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
4447 /* I don't know how to handle endianness of sub-units. */
4448 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
4450 for (elem
= 0; elem
< num_elem
; elem
++)
4453 rtx el
= elems
[elem
];
4455 /* Vectors are kept in target memory order. (This is probably
4458 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4459 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4461 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4462 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4463 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4464 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4465 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4468 switch (GET_CODE (el
))
4472 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4474 *vp
++ = INTVAL (el
) >> i
;
4475 /* CONST_INTs are always logically sign-extended. */
4476 for (; i
< elem_bitsize
; i
+= value_bit
)
4477 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
4481 if (GET_MODE (el
) == VOIDmode
)
4483 /* If this triggers, someone should have generated a
4484 CONST_INT instead. */
4485 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
4487 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4488 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
4489 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
4492 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
4495 /* It shouldn't matter what's done here, so fill it with
4497 for (; i
< elem_bitsize
; i
+= value_bit
)
4502 long tmp
[max_bitsize
/ 32];
4503 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
4505 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
4506 gcc_assert (bitsize
<= elem_bitsize
);
4507 gcc_assert (bitsize
% value_bit
== 0);
4509 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
4512 /* real_to_target produces its result in words affected by
4513 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4514 and use WORDS_BIG_ENDIAN instead; see the documentation
4515 of SUBREG in rtl.texi. */
4516 for (i
= 0; i
< bitsize
; i
+= value_bit
)
4519 if (WORDS_BIG_ENDIAN
)
4520 ibase
= bitsize
- 1 - i
;
4523 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
4526 /* It shouldn't matter what's done here, so fill it with
4528 for (; i
< elem_bitsize
; i
+= value_bit
)
4538 /* Now, pick the right byte to start with. */
4539 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4540 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4541 will already have offset 0. */
4542 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
4544 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
4546 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4547 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4548 byte
= (subword_byte
% UNITS_PER_WORD
4549 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4552 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4553 so if it's become negative it will instead be very large.) */
4554 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4556 /* Convert from bytes to chunks of size value_bit. */
4557 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
4559 /* Re-pack the value. */
4561 if (VECTOR_MODE_P (outermode
))
4563 num_elem
= GET_MODE_NUNITS (outermode
);
4564 result_v
= rtvec_alloc (num_elem
);
4565 elems
= &RTVEC_ELT (result_v
, 0);
4566 outer_submode
= GET_MODE_INNER (outermode
);
4572 outer_submode
= outermode
;
4575 outer_class
= GET_MODE_CLASS (outer_submode
);
4576 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
4578 gcc_assert (elem_bitsize
% value_bit
== 0);
4579 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
4581 for (elem
= 0; elem
< num_elem
; elem
++)
4585 /* Vectors are stored in target memory order. (This is probably
4588 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4589 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4591 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4592 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4593 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4594 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4595 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4598 switch (outer_class
)
4601 case MODE_PARTIAL_INT
:
4603 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
4606 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4608 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
4609 for (; i
< elem_bitsize
; i
+= value_bit
)
4610 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
4611 << (i
- HOST_BITS_PER_WIDE_INT
));
4613 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4615 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4616 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
4617 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
4618 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
4625 case MODE_DECIMAL_FLOAT
:
4628 long tmp
[max_bitsize
/ 32];
4630 /* real_from_target wants its input in words affected by
4631 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4632 and use WORDS_BIG_ENDIAN instead; see the documentation
4633 of SUBREG in rtl.texi. */
4634 for (i
= 0; i
< max_bitsize
/ 32; i
++)
4636 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4639 if (WORDS_BIG_ENDIAN
)
4640 ibase
= elem_bitsize
- 1 - i
;
4643 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
4646 real_from_target (&r
, tmp
, outer_submode
);
4647 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
4655 if (VECTOR_MODE_P (outermode
))
4656 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
4661 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4662 Return 0 if no simplifications are possible. */
4664 simplify_subreg (enum machine_mode outermode
, rtx op
,
4665 enum machine_mode innermode
, unsigned int byte
)
4667 /* Little bit of sanity checking. */
4668 gcc_assert (innermode
!= VOIDmode
);
4669 gcc_assert (outermode
!= VOIDmode
);
4670 gcc_assert (innermode
!= BLKmode
);
4671 gcc_assert (outermode
!= BLKmode
);
4673 gcc_assert (GET_MODE (op
) == innermode
4674 || GET_MODE (op
) == VOIDmode
);
4676 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
4677 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4679 if (outermode
== innermode
&& !byte
)
4682 if (GET_CODE (op
) == CONST_INT
4683 || GET_CODE (op
) == CONST_DOUBLE
4684 || GET_CODE (op
) == CONST_VECTOR
)
4685 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
4687 /* Changing mode twice with SUBREG => just change it once,
4688 or not at all if changing back op starting mode. */
4689 if (GET_CODE (op
) == SUBREG
)
4691 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
4692 int final_offset
= byte
+ SUBREG_BYTE (op
);
4695 if (outermode
== innermostmode
4696 && byte
== 0 && SUBREG_BYTE (op
) == 0)
4697 return SUBREG_REG (op
);
4699 /* The SUBREG_BYTE represents offset, as if the value were stored
4700 in memory. Irritating exception is paradoxical subreg, where
4701 we define SUBREG_BYTE to be 0. On big endian machines, this
4702 value should be negative. For a moment, undo this exception. */
4703 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
4705 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
4706 if (WORDS_BIG_ENDIAN
)
4707 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4708 if (BYTES_BIG_ENDIAN
)
4709 final_offset
+= difference
% UNITS_PER_WORD
;
4711 if (SUBREG_BYTE (op
) == 0
4712 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
4714 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
4715 if (WORDS_BIG_ENDIAN
)
4716 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4717 if (BYTES_BIG_ENDIAN
)
4718 final_offset
+= difference
% UNITS_PER_WORD
;
4721 /* See whether resulting subreg will be paradoxical. */
4722 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
4724 /* In nonparadoxical subregs we can't handle negative offsets. */
4725 if (final_offset
< 0)
4727 /* Bail out in case resulting subreg would be incorrect. */
4728 if (final_offset
% GET_MODE_SIZE (outermode
)
4729 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
4735 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
4737 /* In paradoxical subreg, see if we are still looking on lower part.
4738 If so, our SUBREG_BYTE will be 0. */
4739 if (WORDS_BIG_ENDIAN
)
4740 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4741 if (BYTES_BIG_ENDIAN
)
4742 offset
+= difference
% UNITS_PER_WORD
;
4743 if (offset
== final_offset
)
4749 /* Recurse for further possible simplifications. */
4750 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
4754 if (validate_subreg (outermode
, innermostmode
,
4755 SUBREG_REG (op
), final_offset
))
4756 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
4760 /* Merge implicit and explicit truncations. */
4762 if (GET_CODE (op
) == TRUNCATE
4763 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
4764 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
4765 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
4766 GET_MODE (XEXP (op
, 0)));
4768 /* SUBREG of a hard register => just change the register number
4769 and/or mode. If the hard register is not valid in that mode,
4770 suppress this simplification. If the hard register is the stack,
4771 frame, or argument pointer, leave this as a SUBREG. */
4774 && REGNO (op
) < FIRST_PSEUDO_REGISTER
4775 #ifdef CANNOT_CHANGE_MODE_CLASS
4776 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
4777 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
4778 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
4780 && ((reload_completed
&& !frame_pointer_needed
)
4781 || (REGNO (op
) != FRAME_POINTER_REGNUM
4782 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4783 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
4786 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4787 && REGNO (op
) != ARG_POINTER_REGNUM
4789 && REGNO (op
) != STACK_POINTER_REGNUM
4790 && subreg_offset_representable_p (REGNO (op
), innermode
,
4793 unsigned int regno
= REGNO (op
);
4794 unsigned int final_regno
4795 = regno
+ subreg_regno_offset (regno
, innermode
, byte
, outermode
);
4797 /* ??? We do allow it if the current REG is not valid for
4798 its mode. This is a kludge to work around how float/complex
4799 arguments are passed on 32-bit SPARC and should be fixed. */
4800 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
4801 || ! HARD_REGNO_MODE_OK (regno
, innermode
))
4804 int final_offset
= byte
;
4806 /* Adjust offset for paradoxical subregs. */
4808 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
4810 int difference
= (GET_MODE_SIZE (innermode
)
4811 - GET_MODE_SIZE (outermode
));
4812 if (WORDS_BIG_ENDIAN
)
4813 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4814 if (BYTES_BIG_ENDIAN
)
4815 final_offset
+= difference
% UNITS_PER_WORD
;
4818 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
4820 /* Propagate original regno. We don't have any way to specify
4821 the offset inside original regno, so do so only for lowpart.
4822 The information is used only by alias analysis that can not
4823 grog partial register anyway. */
4825 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
4826 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
4831 /* If we have a SUBREG of a register that we are replacing and we are
4832 replacing it with a MEM, make a new MEM and try replacing the
4833 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4834 or if we would be widening it. */
4837 && ! mode_dependent_address_p (XEXP (op
, 0))
4838 /* Allow splitting of volatile memory references in case we don't
4839 have instruction to move the whole thing. */
4840 && (! MEM_VOLATILE_P (op
)
4841 || ! have_insn_for (SET
, innermode
))
4842 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
4843 return adjust_address_nv (op
, outermode
, byte
);
4845 /* Handle complex values represented as CONCAT
4846 of real and imaginary part. */
4847 if (GET_CODE (op
) == CONCAT
)
4849 unsigned int part_size
, final_offset
;
4852 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
4853 if (byte
< part_size
)
4855 part
= XEXP (op
, 0);
4856 final_offset
= byte
;
4860 part
= XEXP (op
, 1);
4861 final_offset
= byte
- part_size
;
4864 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
4867 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
4870 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
4871 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
4875 /* Optimize SUBREG truncations of zero and sign extended values. */
4876 if ((GET_CODE (op
) == ZERO_EXTEND
4877 || GET_CODE (op
) == SIGN_EXTEND
)
4878 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
4880 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
4882 /* If we're requesting the lowpart of a zero or sign extension,
4883 there are three possibilities. If the outermode is the same
4884 as the origmode, we can omit both the extension and the subreg.
4885 If the outermode is not larger than the origmode, we can apply
4886 the truncation without the extension. Finally, if the outermode
4887 is larger than the origmode, but both are integer modes, we
4888 can just extend to the appropriate mode. */
4891 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
4892 if (outermode
== origmode
)
4893 return XEXP (op
, 0);
4894 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
4895 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
4896 subreg_lowpart_offset (outermode
,
4898 if (SCALAR_INT_MODE_P (outermode
))
4899 return simplify_gen_unary (GET_CODE (op
), outermode
,
4900 XEXP (op
, 0), origmode
);
4903 /* A SUBREG resulting from a zero extension may fold to zero if
4904 it extracts higher bits that the ZERO_EXTEND's source bits. */
4905 if (GET_CODE (op
) == ZERO_EXTEND
4906 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
4907 return CONST0_RTX (outermode
);
4910 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4911 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4912 the outer subreg is effectively a truncation to the original mode. */
4913 if ((GET_CODE (op
) == LSHIFTRT
4914 || GET_CODE (op
) == ASHIFTRT
)
4915 && SCALAR_INT_MODE_P (outermode
)
4916 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4917 to avoid the possibility that an outer LSHIFTRT shifts by more
4918 than the sign extension's sign_bit_copies and introduces zeros
4919 into the high bits of the result. */
4920 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
4921 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4922 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
4923 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4924 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4925 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4926 return simplify_gen_binary (ASHIFTRT
, outermode
,
4927 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4929 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4930 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4931 the outer subreg is effectively a truncation to the original mode. */
4932 if ((GET_CODE (op
) == LSHIFTRT
4933 || GET_CODE (op
) == ASHIFTRT
)
4934 && SCALAR_INT_MODE_P (outermode
)
4935 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4936 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4937 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4938 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4939 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4940 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4941 return simplify_gen_binary (LSHIFTRT
, outermode
,
4942 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4944 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4945 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4946 the outer subreg is effectively a truncation to the original mode. */
4947 if (GET_CODE (op
) == ASHIFT
4948 && SCALAR_INT_MODE_P (outermode
)
4949 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4950 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4951 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4952 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
4953 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4954 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4955 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4956 return simplify_gen_binary (ASHIFT
, outermode
,
4957 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4962 /* Make a SUBREG operation or equivalent if it folds. */
4965 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
4966 enum machine_mode innermode
, unsigned int byte
)
4970 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
4974 if (GET_CODE (op
) == SUBREG
4975 || GET_CODE (op
) == CONCAT
4976 || GET_MODE (op
) == VOIDmode
)
4979 if (validate_subreg (outermode
, innermode
, op
, byte
))
4980 return gen_rtx_SUBREG (outermode
, op
, byte
);
4985 /* Simplify X, an rtx expression.
4987 Return the simplified expression or NULL if no simplifications
4990 This is the preferred entry point into the simplification routines;
4991 however, we still allow passes to call the more specific routines.
4993 Right now GCC has three (yes, three) major bodies of RTL simplification
4994 code that need to be unified.
4996 1. fold_rtx in cse.c. This code uses various CSE specific
4997 information to aid in RTL simplification.
4999 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5000 it uses combine specific information to aid in RTL
5003 3. The routines in this file.
5006 Long term we want to only have one body of simplification code; to
5007 get to that state I recommend the following steps:
5009 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5010 which are not pass dependent state into these routines.
5012 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5013 use this routine whenever possible.
5015 3. Allow for pass dependent state to be provided to these
5016 routines and add simplifications based on the pass dependent
5017 state. Remove code from cse.c & combine.c that becomes
5020 It will take time, but ultimately the compiler will be easier to
5021 maintain and improve. It's totally silly that when we add a
5022 simplification that it needs to be added to 4 places (3 for RTL
5023 simplification and 1 for tree simplification. */
5026 simplify_rtx (rtx x
)
5028 enum rtx_code code
= GET_CODE (x
);
5029 enum machine_mode mode
= GET_MODE (x
);
5031 switch (GET_RTX_CLASS (code
))
5034 return simplify_unary_operation (code
, mode
,
5035 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5036 case RTX_COMM_ARITH
:
5037 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5038 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5040 /* Fall through.... */
5043 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5046 case RTX_BITFIELD_OPS
:
5047 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5048 XEXP (x
, 0), XEXP (x
, 1),
5052 case RTX_COMM_COMPARE
:
5053 return simplify_relational_operation (code
, mode
,
5054 ((GET_MODE (XEXP (x
, 0))
5056 ? GET_MODE (XEXP (x
, 0))
5057 : GET_MODE (XEXP (x
, 1))),
5063 return simplify_subreg (mode
, SUBREG_REG (x
),
5064 GET_MODE (SUBREG_REG (x
)),
5071 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5072 if (GET_CODE (XEXP (x
, 0)) == HIGH
5073 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))