1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "coretypes.h"
32 #include "hard-reg-set.h"
35 #include "insn-config.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Much code operates on (low, high) pairs; the low value is an
47 unsigned wide int, the high value a signed wide int. We
48 occasionally need to sign extend from low to high as if low were a
50 #define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
53 static rtx
neg_const_int (enum machine_mode
, rtx
);
54 static bool plus_minus_operand_p (rtx
);
55 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
57 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
59 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
61 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
62 enum machine_mode
, rtx
, rtx
);
63 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
64 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
70 neg_const_int (enum machine_mode mode
, rtx i
)
72 return gen_int_mode (- INTVAL (i
), mode
);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
79 mode_signbit_p (enum machine_mode mode
, rtx x
)
81 unsigned HOST_WIDE_INT val
;
84 if (GET_MODE_CLASS (mode
) != MODE_INT
)
87 width
= GET_MODE_BITSIZE (mode
);
91 if (width
<= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x
) == CONST_INT
)
94 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x
) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x
) == 0)
98 val
= CONST_DOUBLE_HIGH (x
);
99 width
-= HOST_BITS_PER_WIDE_INT
;
104 if (width
< HOST_BITS_PER_WIDE_INT
)
105 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
106 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
118 /* If this simplifies, do it. */
119 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
123 /* Put complex operands first and constants second if commutative. */
124 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
125 && swap_commutative_operands_p (op0
, op1
))
126 tem
= op0
, op0
= op1
, op1
= tem
;
128 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
131 /* If X is a MEM referencing the constant pool, return the real value.
132 Otherwise return X. */
134 avoid_constant_pool_reference (rtx x
)
137 enum machine_mode cmode
;
138 HOST_WIDE_INT offset
= 0;
140 switch (GET_CODE (x
))
146 /* Handle float extensions of constant pool references. */
148 c
= avoid_constant_pool_reference (tmp
);
149 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
153 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
154 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
162 if (GET_MODE (x
) == BLKmode
)
167 /* Call target hook to avoid the effects of -fpic etc.... */
168 addr
= targetm
.delegitimize_address (addr
);
170 /* Split the address into a base and integer offset. */
171 if (GET_CODE (addr
) == CONST
172 && GET_CODE (XEXP (addr
, 0)) == PLUS
173 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST_INT
)
175 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
176 addr
= XEXP (XEXP (addr
, 0), 0);
179 if (GET_CODE (addr
) == LO_SUM
)
180 addr
= XEXP (addr
, 1);
182 /* If this is a constant pool reference, we can turn it into its
183 constant and hope that simplifications happen. */
184 if (GET_CODE (addr
) == SYMBOL_REF
185 && CONSTANT_POOL_ADDRESS_P (addr
))
187 c
= get_pool_constant (addr
);
188 cmode
= get_pool_mode (addr
);
190 /* If we're accessing the constant in a different mode than it was
191 originally stored, attempt to fix that up via subreg simplifications.
192 If that fails we have no choice but to return the original memory. */
193 if (offset
!= 0 || cmode
!= GET_MODE (x
))
195 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
196 if (tem
&& CONSTANT_P (tem
))
206 /* Return true if X is a MEM referencing the constant pool. */
209 constant_pool_reference_p (rtx x
)
211 return avoid_constant_pool_reference (x
) != x
;
214 /* Make a unary operation by first seeing if it folds and otherwise making
215 the specified operation. */
218 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
219 enum machine_mode op_mode
)
223 /* If this simplifies, use it. */
224 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
227 return gen_rtx_fmt_e (code
, mode
, op
);
230 /* Likewise for ternary operations. */
233 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
234 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
238 /* If this simplifies, use it. */
239 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
243 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
246 /* Likewise, for relational operations.
247 CMP_MODE specifies mode comparison is done in. */
250 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
251 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
255 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
259 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
262 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
263 resulting RTX. Return a new RTX which is as simplified as possible. */
266 simplify_replace_rtx (rtx x
, rtx old_rtx
, rtx new_rtx
)
268 enum rtx_code code
= GET_CODE (x
);
269 enum machine_mode mode
= GET_MODE (x
);
270 enum machine_mode op_mode
;
273 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
274 to build a new expression substituting recursively. If we can't do
275 anything, return our input. */
280 switch (GET_RTX_CLASS (code
))
284 op_mode
= GET_MODE (op0
);
285 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
286 if (op0
== XEXP (x
, 0))
288 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
292 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
293 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
294 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
296 return simplify_gen_binary (code
, mode
, op0
, op1
);
299 case RTX_COMM_COMPARE
:
302 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
303 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
304 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
305 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
307 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
310 case RTX_BITFIELD_OPS
:
312 op_mode
= GET_MODE (op0
);
313 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
314 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
315 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
316 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
318 if (op_mode
== VOIDmode
)
319 op_mode
= GET_MODE (op0
);
320 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
323 /* The only case we try to handle is a SUBREG. */
326 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
327 if (op0
== SUBREG_REG (x
))
329 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
330 GET_MODE (SUBREG_REG (x
)),
332 return op0
? op0
: x
;
339 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
340 if (op0
== XEXP (x
, 0))
342 return replace_equiv_address_nv (x
, op0
);
344 else if (code
== LO_SUM
)
346 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
347 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
349 /* (lo_sum (high x) x) -> x */
350 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
353 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
355 return gen_rtx_LO_SUM (mode
, op0
, op1
);
357 else if (code
== REG
)
359 if (rtx_equal_p (x
, old_rtx
))
370 /* Try to simplify a unary operation CODE whose output mode is to be
371 MODE with input operand OP whose mode was originally OP_MODE.
372 Return zero if no simplification can be made. */
374 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
375 rtx op
, enum machine_mode op_mode
)
379 if (GET_CODE (op
) == CONST
)
382 trueop
= avoid_constant_pool_reference (op
);
384 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
388 return simplify_unary_operation_1 (code
, mode
, op
);
391 /* Perform some simplifications we can do even if the operands
394 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
396 enum rtx_code reversed
;
402 /* (not (not X)) == X. */
403 if (GET_CODE (op
) == NOT
)
406 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
407 comparison is all ones. */
408 if (COMPARISON_P (op
)
409 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
410 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
411 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
412 XEXP (op
, 0), XEXP (op
, 1));
414 /* (not (plus X -1)) can become (neg X). */
415 if (GET_CODE (op
) == PLUS
416 && XEXP (op
, 1) == constm1_rtx
)
417 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
419 /* Similarly, (not (neg X)) is (plus X -1). */
420 if (GET_CODE (op
) == NEG
)
421 return plus_constant (XEXP (op
, 0), -1);
423 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
424 if (GET_CODE (op
) == XOR
425 && GET_CODE (XEXP (op
, 1)) == CONST_INT
426 && (temp
= simplify_unary_operation (NOT
, mode
,
427 XEXP (op
, 1), mode
)) != 0)
428 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
430 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
431 if (GET_CODE (op
) == PLUS
432 && GET_CODE (XEXP (op
, 1)) == CONST_INT
433 && mode_signbit_p (mode
, XEXP (op
, 1))
434 && (temp
= simplify_unary_operation (NOT
, mode
,
435 XEXP (op
, 1), mode
)) != 0)
436 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
439 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
440 operands other than 1, but that is not valid. We could do a
441 similar simplification for (not (lshiftrt C X)) where C is
442 just the sign bit, but this doesn't seem common enough to
444 if (GET_CODE (op
) == ASHIFT
445 && XEXP (op
, 0) == const1_rtx
)
447 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
448 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
451 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
452 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
453 so we can perform the above simplification. */
455 if (STORE_FLAG_VALUE
== -1
456 && GET_CODE (op
) == ASHIFTRT
457 && GET_CODE (XEXP (op
, 1)) == CONST_INT
458 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
459 return simplify_gen_relational (GE
, mode
, VOIDmode
,
460 XEXP (op
, 0), const0_rtx
);
463 if (GET_CODE (op
) == SUBREG
464 && subreg_lowpart_p (op
)
465 && (GET_MODE_SIZE (GET_MODE (op
))
466 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
467 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
468 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
470 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
473 x
= gen_rtx_ROTATE (inner_mode
,
474 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
476 XEXP (SUBREG_REG (op
), 1));
477 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
480 /* Apply De Morgan's laws to reduce number of patterns for machines
481 with negating logical insns (and-not, nand, etc.). If result has
482 only one NOT, put it first, since that is how the patterns are
485 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
487 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
488 enum machine_mode op_mode
;
490 op_mode
= GET_MODE (in1
);
491 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
493 op_mode
= GET_MODE (in2
);
494 if (op_mode
== VOIDmode
)
496 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
498 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
501 in2
= in1
; in1
= tem
;
504 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
510 /* (neg (neg X)) == X. */
511 if (GET_CODE (op
) == NEG
)
514 /* (neg (plus X 1)) can become (not X). */
515 if (GET_CODE (op
) == PLUS
516 && XEXP (op
, 1) == const1_rtx
)
517 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
519 /* Similarly, (neg (not X)) is (plus X 1). */
520 if (GET_CODE (op
) == NOT
)
521 return plus_constant (XEXP (op
, 0), 1);
523 /* (neg (minus X Y)) can become (minus Y X). This transformation
524 isn't safe for modes with signed zeros, since if X and Y are
525 both +0, (minus Y X) is the same as (minus X Y). If the
526 rounding mode is towards +infinity (or -infinity) then the two
527 expressions will be rounded differently. */
528 if (GET_CODE (op
) == MINUS
529 && !HONOR_SIGNED_ZEROS (mode
)
530 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
531 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
533 if (GET_CODE (op
) == PLUS
534 && !HONOR_SIGNED_ZEROS (mode
)
535 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
537 /* (neg (plus A C)) is simplified to (minus -C A). */
538 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
539 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
541 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
543 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
546 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
547 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
548 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
551 /* (neg (mult A B)) becomes (mult (neg A) B).
552 This works even for floating-point values. */
553 if (GET_CODE (op
) == MULT
554 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
556 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
557 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
560 /* NEG commutes with ASHIFT since it is multiplication. Only do
561 this if we can then eliminate the NEG (e.g., if the operand
563 if (GET_CODE (op
) == ASHIFT
)
565 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
567 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
570 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
571 C is equal to the width of MODE minus 1. */
572 if (GET_CODE (op
) == ASHIFTRT
573 && GET_CODE (XEXP (op
, 1)) == CONST_INT
574 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
575 return simplify_gen_binary (LSHIFTRT
, mode
,
576 XEXP (op
, 0), XEXP (op
, 1));
578 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
579 C is equal to the width of MODE minus 1. */
580 if (GET_CODE (op
) == LSHIFTRT
581 && GET_CODE (XEXP (op
, 1)) == CONST_INT
582 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
583 return simplify_gen_binary (ASHIFTRT
, mode
,
584 XEXP (op
, 0), XEXP (op
, 1));
586 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
587 if (GET_CODE (op
) == XOR
588 && XEXP (op
, 1) == const1_rtx
589 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
590 return plus_constant (XEXP (op
, 0), -1);
592 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
593 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
594 if (GET_CODE (op
) == LT
595 && XEXP (op
, 1) == const0_rtx
)
597 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
598 int isize
= GET_MODE_BITSIZE (inner
);
599 if (STORE_FLAG_VALUE
== 1)
601 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
602 GEN_INT (isize
- 1));
605 if (GET_MODE_BITSIZE (mode
) > isize
)
606 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
607 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
609 else if (STORE_FLAG_VALUE
== -1)
611 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
612 GEN_INT (isize
- 1));
615 if (GET_MODE_BITSIZE (mode
) > isize
)
616 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
617 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
623 /* We can't handle truncation to a partial integer mode here
624 because we don't know the real bitsize of the partial
626 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
629 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
630 if ((GET_CODE (op
) == SIGN_EXTEND
631 || GET_CODE (op
) == ZERO_EXTEND
)
632 && GET_MODE (XEXP (op
, 0)) == mode
)
635 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
636 (OP:SI foo:SI) if OP is NEG or ABS. */
637 if ((GET_CODE (op
) == ABS
638 || GET_CODE (op
) == NEG
)
639 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
640 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
641 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
642 return simplify_gen_unary (GET_CODE (op
), mode
,
643 XEXP (XEXP (op
, 0), 0), mode
);
645 /* (truncate:A (subreg:B (truncate:C X) 0)) is
647 if (GET_CODE (op
) == SUBREG
648 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
649 && subreg_lowpart_p (op
))
650 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
651 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
653 /* If we know that the value is already truncated, we can
654 replace the TRUNCATE with a SUBREG. Note that this is also
655 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
656 modes we just have to apply a different definition for
657 truncation. But don't do this for an (LSHIFTRT (MULT ...))
658 since this will cause problems with the umulXi3_highpart
660 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
661 GET_MODE_BITSIZE (GET_MODE (op
)))
662 ? (num_sign_bit_copies (op
, GET_MODE (op
))
663 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op
))
664 - GET_MODE_BITSIZE (mode
)))
665 : truncated_to_mode (mode
, op
))
666 && ! (GET_CODE (op
) == LSHIFTRT
667 && GET_CODE (XEXP (op
, 0)) == MULT
))
668 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
670 /* A truncate of a comparison can be replaced with a subreg if
671 STORE_FLAG_VALUE permits. This is like the previous test,
672 but it works even if the comparison is done in a mode larger
673 than HOST_BITS_PER_WIDE_INT. */
674 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
676 && ((HOST_WIDE_INT
) STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
677 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
681 if (DECIMAL_FLOAT_MODE_P (mode
))
684 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
685 if (GET_CODE (op
) == FLOAT_EXTEND
686 && GET_MODE (XEXP (op
, 0)) == mode
)
689 /* (float_truncate:SF (float_truncate:DF foo:XF))
690 = (float_truncate:SF foo:XF).
691 This may eliminate double rounding, so it is unsafe.
693 (float_truncate:SF (float_extend:XF foo:DF))
694 = (float_truncate:SF foo:DF).
696 (float_truncate:DF (float_extend:XF foo:SF))
697 = (float_extend:SF foo:DF). */
698 if ((GET_CODE (op
) == FLOAT_TRUNCATE
699 && flag_unsafe_math_optimizations
)
700 || GET_CODE (op
) == FLOAT_EXTEND
)
701 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
703 > GET_MODE_SIZE (mode
)
704 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
708 /* (float_truncate (float x)) is (float x) */
709 if (GET_CODE (op
) == FLOAT
710 && (flag_unsafe_math_optimizations
711 || ((unsigned)significand_size (GET_MODE (op
))
712 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
713 - num_sign_bit_copies (XEXP (op
, 0),
714 GET_MODE (XEXP (op
, 0)))))))
715 return simplify_gen_unary (FLOAT
, mode
,
717 GET_MODE (XEXP (op
, 0)));
719 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
720 (OP:SF foo:SF) if OP is NEG or ABS. */
721 if ((GET_CODE (op
) == ABS
722 || GET_CODE (op
) == NEG
)
723 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
724 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
725 return simplify_gen_unary (GET_CODE (op
), mode
,
726 XEXP (XEXP (op
, 0), 0), mode
);
728 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
729 is (float_truncate:SF x). */
730 if (GET_CODE (op
) == SUBREG
731 && subreg_lowpart_p (op
)
732 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
733 return SUBREG_REG (op
);
737 if (DECIMAL_FLOAT_MODE_P (mode
))
740 /* (float_extend (float_extend x)) is (float_extend x)
742 (float_extend (float x)) is (float x) assuming that double
743 rounding can't happen.
745 if (GET_CODE (op
) == FLOAT_EXTEND
746 || (GET_CODE (op
) == FLOAT
747 && ((unsigned)significand_size (GET_MODE (op
))
748 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
749 - num_sign_bit_copies (XEXP (op
, 0),
750 GET_MODE (XEXP (op
, 0)))))))
751 return simplify_gen_unary (GET_CODE (op
), mode
,
753 GET_MODE (XEXP (op
, 0)));
758 /* (abs (neg <foo>)) -> (abs <foo>) */
759 if (GET_CODE (op
) == NEG
)
760 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
761 GET_MODE (XEXP (op
, 0)));
763 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
765 if (GET_MODE (op
) == VOIDmode
)
768 /* If operand is something known to be positive, ignore the ABS. */
769 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
770 || ((GET_MODE_BITSIZE (GET_MODE (op
))
771 <= HOST_BITS_PER_WIDE_INT
)
772 && ((nonzero_bits (op
, GET_MODE (op
))
774 << (GET_MODE_BITSIZE (GET_MODE (op
)) - 1)))
778 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
779 if (num_sign_bit_copies (op
, mode
) == GET_MODE_BITSIZE (mode
))
780 return gen_rtx_NEG (mode
, op
);
785 /* (ffs (*_extend <X>)) = (ffs <X>) */
786 if (GET_CODE (op
) == SIGN_EXTEND
787 || GET_CODE (op
) == ZERO_EXTEND
)
788 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
789 GET_MODE (XEXP (op
, 0)));
793 switch (GET_CODE (op
))
797 /* (popcount (zero_extend <X>)) = (popcount <X>) */
798 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
799 GET_MODE (XEXP (op
, 0)));
803 /* Rotations don't affect popcount. */
804 if (!side_effects_p (XEXP (op
, 1)))
805 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
806 GET_MODE (XEXP (op
, 0)));
815 switch (GET_CODE (op
))
821 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
822 GET_MODE (XEXP (op
, 0)));
826 /* Rotations don't affect parity. */
827 if (!side_effects_p (XEXP (op
, 1)))
828 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
829 GET_MODE (XEXP (op
, 0)));
838 /* (bswap (bswap x)) -> x. */
839 if (GET_CODE (op
) == BSWAP
)
844 /* (float (sign_extend <X>)) = (float <X>). */
845 if (GET_CODE (op
) == SIGN_EXTEND
)
846 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
847 GET_MODE (XEXP (op
, 0)));
851 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
852 becomes just the MINUS if its mode is MODE. This allows
853 folding switch statements on machines using casesi (such as
855 if (GET_CODE (op
) == TRUNCATE
856 && GET_MODE (XEXP (op
, 0)) == mode
857 && GET_CODE (XEXP (op
, 0)) == MINUS
858 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
859 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
862 /* Check for a sign extension of a subreg of a promoted
863 variable, where the promotion is sign-extended, and the
864 target mode is the same as the variable's promotion. */
865 if (GET_CODE (op
) == SUBREG
866 && SUBREG_PROMOTED_VAR_P (op
)
867 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
868 && GET_MODE (XEXP (op
, 0)) == mode
)
871 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
872 if (! POINTERS_EXTEND_UNSIGNED
873 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
875 || (GET_CODE (op
) == SUBREG
876 && REG_P (SUBREG_REG (op
))
877 && REG_POINTER (SUBREG_REG (op
))
878 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
879 return convert_memory_address (Pmode
, op
);
884 /* Check for a zero extension of a subreg of a promoted
885 variable, where the promotion is zero-extended, and the
886 target mode is the same as the variable's promotion. */
887 if (GET_CODE (op
) == SUBREG
888 && SUBREG_PROMOTED_VAR_P (op
)
889 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
890 && GET_MODE (XEXP (op
, 0)) == mode
)
893 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
894 if (POINTERS_EXTEND_UNSIGNED
> 0
895 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
897 || (GET_CODE (op
) == SUBREG
898 && REG_P (SUBREG_REG (op
))
899 && REG_POINTER (SUBREG_REG (op
))
900 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
901 return convert_memory_address (Pmode
, op
);
912 /* Try to compute the value of a unary operation CODE whose output mode is to
913 be MODE with input operand OP whose mode was originally OP_MODE.
914 Return zero if the value cannot be computed. */
916 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
917 rtx op
, enum machine_mode op_mode
)
919 unsigned int width
= GET_MODE_BITSIZE (mode
);
921 if (code
== VEC_DUPLICATE
)
923 gcc_assert (VECTOR_MODE_P (mode
));
924 if (GET_MODE (op
) != VOIDmode
)
926 if (!VECTOR_MODE_P (GET_MODE (op
)))
927 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
929 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
932 if (GET_CODE (op
) == CONST_INT
|| GET_CODE (op
) == CONST_DOUBLE
933 || GET_CODE (op
) == CONST_VECTOR
)
935 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
936 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
937 rtvec v
= rtvec_alloc (n_elts
);
940 if (GET_CODE (op
) != CONST_VECTOR
)
941 for (i
= 0; i
< n_elts
; i
++)
942 RTVEC_ELT (v
, i
) = op
;
945 enum machine_mode inmode
= GET_MODE (op
);
946 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
947 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
949 gcc_assert (in_n_elts
< n_elts
);
950 gcc_assert ((n_elts
% in_n_elts
) == 0);
951 for (i
= 0; i
< n_elts
; i
++)
952 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
954 return gen_rtx_CONST_VECTOR (mode
, v
);
958 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
960 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
961 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
962 enum machine_mode opmode
= GET_MODE (op
);
963 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
964 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
965 rtvec v
= rtvec_alloc (n_elts
);
968 gcc_assert (op_n_elts
== n_elts
);
969 for (i
= 0; i
< n_elts
; i
++)
971 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
972 CONST_VECTOR_ELT (op
, i
),
973 GET_MODE_INNER (opmode
));
976 RTVEC_ELT (v
, i
) = x
;
978 return gen_rtx_CONST_VECTOR (mode
, v
);
981 /* The order of these tests is critical so that, for example, we don't
982 check the wrong mode (input vs. output) for a conversion operation,
983 such as FIX. At some point, this should be simplified. */
985 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
986 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
988 HOST_WIDE_INT hv
, lv
;
991 if (GET_CODE (op
) == CONST_INT
)
992 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
994 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
996 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
997 d
= real_value_truncate (mode
, d
);
998 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1000 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
1001 && (GET_CODE (op
) == CONST_DOUBLE
1002 || GET_CODE (op
) == CONST_INT
))
1004 HOST_WIDE_INT hv
, lv
;
1007 if (GET_CODE (op
) == CONST_INT
)
1008 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1010 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1012 if (op_mode
== VOIDmode
)
1014 /* We don't know how to interpret negative-looking numbers in
1015 this case, so don't try to fold those. */
1019 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
1022 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1024 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1025 d
= real_value_truncate (mode
, d
);
1026 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1029 if (GET_CODE (op
) == CONST_INT
1030 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1032 HOST_WIDE_INT arg0
= INTVAL (op
);
1046 val
= (arg0
>= 0 ? arg0
: - arg0
);
1050 /* Don't use ffs here. Instead, get low order bit and then its
1051 number. If arg0 is zero, this will return 0, as desired. */
1052 arg0
&= GET_MODE_MASK (mode
);
1053 val
= exact_log2 (arg0
& (- arg0
)) + 1;
1057 arg0
&= GET_MODE_MASK (mode
);
1058 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1061 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
1065 arg0
&= GET_MODE_MASK (mode
);
1068 /* Even if the value at zero is undefined, we have to come
1069 up with some replacement. Seems good enough. */
1070 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1071 val
= GET_MODE_BITSIZE (mode
);
1074 val
= exact_log2 (arg0
& -arg0
);
1078 arg0
&= GET_MODE_MASK (mode
);
1081 val
++, arg0
&= arg0
- 1;
1085 arg0
&= GET_MODE_MASK (mode
);
1088 val
++, arg0
&= arg0
- 1;
1097 for (s
= 0; s
< width
; s
+= 8)
1099 unsigned int d
= width
- s
- 8;
1100 unsigned HOST_WIDE_INT byte
;
1101 byte
= (arg0
>> s
) & 0xff;
1112 /* When zero-extending a CONST_INT, we need to know its
1114 gcc_assert (op_mode
!= VOIDmode
);
1115 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1117 /* If we were really extending the mode,
1118 we would have to distinguish between zero-extension
1119 and sign-extension. */
1120 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1123 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1124 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1130 if (op_mode
== VOIDmode
)
1132 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1134 /* If we were really extending the mode,
1135 we would have to distinguish between zero-extension
1136 and sign-extension. */
1137 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1140 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1143 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1145 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
1146 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1154 case FLOAT_TRUNCATE
:
1164 return gen_int_mode (val
, mode
);
1167 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1168 for a DImode operation on a CONST_INT. */
1169 else if (GET_MODE (op
) == VOIDmode
1170 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1171 && (GET_CODE (op
) == CONST_DOUBLE
1172 || GET_CODE (op
) == CONST_INT
))
1174 unsigned HOST_WIDE_INT l1
, lv
;
1175 HOST_WIDE_INT h1
, hv
;
1177 if (GET_CODE (op
) == CONST_DOUBLE
)
1178 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1180 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1190 neg_double (l1
, h1
, &lv
, &hv
);
1195 neg_double (l1
, h1
, &lv
, &hv
);
1207 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
1210 lv
= exact_log2 (l1
& -l1
) + 1;
1216 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
1217 - HOST_BITS_PER_WIDE_INT
;
1219 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
1220 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1221 lv
= GET_MODE_BITSIZE (mode
);
1227 lv
= exact_log2 (l1
& -l1
);
1229 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
1230 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1231 lv
= GET_MODE_BITSIZE (mode
);
1259 for (s
= 0; s
< width
; s
+= 8)
1261 unsigned int d
= width
- s
- 8;
1262 unsigned HOST_WIDE_INT byte
;
1264 if (s
< HOST_BITS_PER_WIDE_INT
)
1265 byte
= (l1
>> s
) & 0xff;
1267 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1269 if (d
< HOST_BITS_PER_WIDE_INT
)
1272 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1278 /* This is just a change-of-mode, so do nothing. */
1283 gcc_assert (op_mode
!= VOIDmode
);
1285 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1289 lv
= l1
& GET_MODE_MASK (op_mode
);
1293 if (op_mode
== VOIDmode
1294 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1298 lv
= l1
& GET_MODE_MASK (op_mode
);
1299 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
1300 && (lv
& ((HOST_WIDE_INT
) 1
1301 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
1302 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1304 hv
= HWI_SIGN_EXTEND (lv
);
1315 return immed_double_const (lv
, hv
, mode
);
1318 else if (GET_CODE (op
) == CONST_DOUBLE
1319 && SCALAR_FLOAT_MODE_P (mode
))
1321 REAL_VALUE_TYPE d
, t
;
1322 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1327 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1329 real_sqrt (&t
, mode
, &d
);
1333 d
= REAL_VALUE_ABS (d
);
1336 d
= REAL_VALUE_NEGATE (d
);
1338 case FLOAT_TRUNCATE
:
1339 d
= real_value_truncate (mode
, d
);
1342 /* All this does is change the mode. */
1345 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1352 real_to_target (tmp
, &d
, GET_MODE (op
));
1353 for (i
= 0; i
< 4; i
++)
1355 real_from_target (&d
, tmp
, mode
);
1361 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1364 else if (GET_CODE (op
) == CONST_DOUBLE
1365 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1366 && GET_MODE_CLASS (mode
) == MODE_INT
1367 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1369 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1370 operators are intentionally left unspecified (to ease implementation
1371 by target backends), for consistency, this routine implements the
1372 same semantics for constant folding as used by the middle-end. */
1374 /* This was formerly used only for non-IEEE float.
1375 eggert@twinsun.com says it is safe for IEEE also. */
1376 HOST_WIDE_INT xh
, xl
, th
, tl
;
1377 REAL_VALUE_TYPE x
, t
;
1378 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1382 if (REAL_VALUE_ISNAN (x
))
1385 /* Test against the signed upper bound. */
1386 if (width
> HOST_BITS_PER_WIDE_INT
)
1388 th
= ((unsigned HOST_WIDE_INT
) 1
1389 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1395 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1397 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1398 if (REAL_VALUES_LESS (t
, x
))
1405 /* Test against the signed lower bound. */
1406 if (width
> HOST_BITS_PER_WIDE_INT
)
1408 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1414 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1416 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1417 if (REAL_VALUES_LESS (x
, t
))
1423 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1427 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1430 /* Test against the unsigned upper bound. */
1431 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1436 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1438 th
= ((unsigned HOST_WIDE_INT
) 1
1439 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1445 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1447 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1448 if (REAL_VALUES_LESS (t
, x
))
1455 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1461 return immed_double_const (xl
, xh
, mode
);
1467 /* Subroutine of simplify_binary_operation to simplify a commutative,
1468 associative binary operation CODE with result mode MODE, operating
1469 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1470 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1471 canonicalization is possible. */
1474 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1479 /* Linearize the operator to the left. */
1480 if (GET_CODE (op1
) == code
)
1482 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1483 if (GET_CODE (op0
) == code
)
1485 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1486 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1489 /* "a op (b op c)" becomes "(b op c) op a". */
1490 if (! swap_commutative_operands_p (op1
, op0
))
1491 return simplify_gen_binary (code
, mode
, op1
, op0
);
1498 if (GET_CODE (op0
) == code
)
1500 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1501 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1503 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1504 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1507 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1508 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1509 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1510 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1512 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1514 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1515 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1516 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1517 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1519 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1526 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1527 and OP1. Return 0 if no simplification is possible.
1529 Don't use this for relational operations such as EQ or LT.
1530 Use simplify_relational_operation instead. */
1532 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1535 rtx trueop0
, trueop1
;
1538 /* Relational operations don't work here. We must know the mode
1539 of the operands in order to do the comparison correctly.
1540 Assuming a full word can give incorrect results.
1541 Consider comparing 128 with -128 in QImode. */
1542 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1543 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1545 /* Make sure the constant is second. */
1546 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1547 && swap_commutative_operands_p (op0
, op1
))
1549 tem
= op0
, op0
= op1
, op1
= tem
;
1552 trueop0
= avoid_constant_pool_reference (op0
);
1553 trueop1
= avoid_constant_pool_reference (op1
);
1555 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1558 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1561 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1562 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1563 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1564 actual constants. */
1567 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1568 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1570 rtx tem
, reversed
, opleft
, opright
;
1572 unsigned int width
= GET_MODE_BITSIZE (mode
);
1574 /* Even if we can't compute a constant result,
1575 there are some cases worth simplifying. */
1580 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1581 when x is NaN, infinite, or finite and nonzero. They aren't
1582 when x is -0 and the rounding mode is not towards -infinity,
1583 since (-0) + 0 is then 0. */
1584 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1587 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1588 transformations are safe even for IEEE. */
1589 if (GET_CODE (op0
) == NEG
)
1590 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1591 else if (GET_CODE (op1
) == NEG
)
1592 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1594 /* (~a) + 1 -> -a */
1595 if (INTEGRAL_MODE_P (mode
)
1596 && GET_CODE (op0
) == NOT
1597 && trueop1
== const1_rtx
)
1598 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1600 /* Handle both-operands-constant cases. We can only add
1601 CONST_INTs to constants since the sum of relocatable symbols
1602 can't be handled by most assemblers. Don't add CONST_INT
1603 to CONST_INT since overflow won't be computed properly if wider
1604 than HOST_BITS_PER_WIDE_INT. */
1606 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1607 && GET_CODE (op1
) == CONST_INT
)
1608 return plus_constant (op0
, INTVAL (op1
));
1609 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1610 && GET_CODE (op0
) == CONST_INT
)
1611 return plus_constant (op1
, INTVAL (op0
));
1613 /* See if this is something like X * C - X or vice versa or
1614 if the multiplication is written as a shift. If so, we can
1615 distribute and make a new multiply, shift, or maybe just
1616 have X (if C is 2 in the example above). But don't make
1617 something more expensive than we had before. */
1619 if (SCALAR_INT_MODE_P (mode
))
1621 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1622 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1623 rtx lhs
= op0
, rhs
= op1
;
1625 if (GET_CODE (lhs
) == NEG
)
1629 lhs
= XEXP (lhs
, 0);
1631 else if (GET_CODE (lhs
) == MULT
1632 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1634 coeff0l
= INTVAL (XEXP (lhs
, 1));
1635 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1636 lhs
= XEXP (lhs
, 0);
1638 else if (GET_CODE (lhs
) == ASHIFT
1639 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1640 && INTVAL (XEXP (lhs
, 1)) >= 0
1641 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1643 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1645 lhs
= XEXP (lhs
, 0);
1648 if (GET_CODE (rhs
) == NEG
)
1652 rhs
= XEXP (rhs
, 0);
1654 else if (GET_CODE (rhs
) == MULT
1655 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1657 coeff1l
= INTVAL (XEXP (rhs
, 1));
1658 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1659 rhs
= XEXP (rhs
, 0);
1661 else if (GET_CODE (rhs
) == ASHIFT
1662 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1663 && INTVAL (XEXP (rhs
, 1)) >= 0
1664 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1666 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1668 rhs
= XEXP (rhs
, 0);
1671 if (rtx_equal_p (lhs
, rhs
))
1673 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1675 unsigned HOST_WIDE_INT l
;
1678 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1679 coeff
= immed_double_const (l
, h
, mode
);
1681 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1682 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1687 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1688 if ((GET_CODE (op1
) == CONST_INT
1689 || GET_CODE (op1
) == CONST_DOUBLE
)
1690 && GET_CODE (op0
) == XOR
1691 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1692 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1693 && mode_signbit_p (mode
, op1
))
1694 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1695 simplify_gen_binary (XOR
, mode
, op1
,
1698 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1699 if (GET_CODE (op0
) == MULT
1700 && GET_CODE (XEXP (op0
, 0)) == NEG
)
1704 in1
= XEXP (XEXP (op0
, 0), 0);
1705 in2
= XEXP (op0
, 1);
1706 return simplify_gen_binary (MINUS
, mode
, op1
,
1707 simplify_gen_binary (MULT
, mode
,
1711 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1712 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1714 if (COMPARISON_P (op0
)
1715 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
1716 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
1717 && (reversed
= reversed_comparison (op0
, mode
)))
1719 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
1721 /* If one of the operands is a PLUS or a MINUS, see if we can
1722 simplify this by the associative law.
1723 Don't use the associative law for floating point.
1724 The inaccuracy makes it nonassociative,
1725 and subtle programs can break if operations are associated. */
1727 if (INTEGRAL_MODE_P (mode
)
1728 && (plus_minus_operand_p (op0
)
1729 || plus_minus_operand_p (op1
))
1730 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1733 /* Reassociate floating point addition only when the user
1734 specifies unsafe math optimizations. */
1735 if (FLOAT_MODE_P (mode
)
1736 && flag_unsafe_math_optimizations
)
1738 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1746 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1747 using cc0, in which case we want to leave it as a COMPARE
1748 so we can distinguish it from a register-register-copy.
1750 In IEEE floating point, x-0 is not the same as x. */
1752 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1753 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1754 && trueop1
== CONST0_RTX (mode
))
1758 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1759 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1760 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1761 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1763 rtx xop00
= XEXP (op0
, 0);
1764 rtx xop10
= XEXP (op1
, 0);
1767 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1769 if (REG_P (xop00
) && REG_P (xop10
)
1770 && GET_MODE (xop00
) == GET_MODE (xop10
)
1771 && REGNO (xop00
) == REGNO (xop10
)
1772 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1773 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1780 /* We can't assume x-x is 0 even with non-IEEE floating point,
1781 but since it is zero except in very strange circumstances, we
1782 will treat it as zero with -funsafe-math-optimizations. */
1783 if (rtx_equal_p (trueop0
, trueop1
)
1784 && ! side_effects_p (op0
)
1785 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1786 return CONST0_RTX (mode
);
1788 /* Change subtraction from zero into negation. (0 - x) is the
1789 same as -x when x is NaN, infinite, or finite and nonzero.
1790 But if the mode has signed zeros, and does not round towards
1791 -infinity, then 0 - 0 is 0, not -0. */
1792 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1793 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1795 /* (-1 - a) is ~a. */
1796 if (trueop0
== constm1_rtx
)
1797 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1799 /* Subtracting 0 has no effect unless the mode has signed zeros
1800 and supports rounding towards -infinity. In such a case,
1802 if (!(HONOR_SIGNED_ZEROS (mode
)
1803 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1804 && trueop1
== CONST0_RTX (mode
))
1807 /* See if this is something like X * C - X or vice versa or
1808 if the multiplication is written as a shift. If so, we can
1809 distribute and make a new multiply, shift, or maybe just
1810 have X (if C is 2 in the example above). But don't make
1811 something more expensive than we had before. */
1813 if (SCALAR_INT_MODE_P (mode
))
1815 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1816 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1817 rtx lhs
= op0
, rhs
= op1
;
1819 if (GET_CODE (lhs
) == NEG
)
1823 lhs
= XEXP (lhs
, 0);
1825 else if (GET_CODE (lhs
) == MULT
1826 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1828 coeff0l
= INTVAL (XEXP (lhs
, 1));
1829 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1830 lhs
= XEXP (lhs
, 0);
1832 else if (GET_CODE (lhs
) == ASHIFT
1833 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1834 && INTVAL (XEXP (lhs
, 1)) >= 0
1835 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1837 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1839 lhs
= XEXP (lhs
, 0);
1842 if (GET_CODE (rhs
) == NEG
)
1846 rhs
= XEXP (rhs
, 0);
1848 else if (GET_CODE (rhs
) == MULT
1849 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1851 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1852 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1853 rhs
= XEXP (rhs
, 0);
1855 else if (GET_CODE (rhs
) == ASHIFT
1856 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1857 && INTVAL (XEXP (rhs
, 1)) >= 0
1858 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1860 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
1862 rhs
= XEXP (rhs
, 0);
1865 if (rtx_equal_p (lhs
, rhs
))
1867 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1869 unsigned HOST_WIDE_INT l
;
1872 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
1873 coeff
= immed_double_const (l
, h
, mode
);
1875 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1876 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1881 /* (a - (-b)) -> (a + b). True even for IEEE. */
1882 if (GET_CODE (op1
) == NEG
)
1883 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1885 /* (-x - c) may be simplified as (-c - x). */
1886 if (GET_CODE (op0
) == NEG
1887 && (GET_CODE (op1
) == CONST_INT
1888 || GET_CODE (op1
) == CONST_DOUBLE
))
1890 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1892 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1895 /* Don't let a relocatable value get a negative coeff. */
1896 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1897 return simplify_gen_binary (PLUS
, mode
,
1899 neg_const_int (mode
, op1
));
1901 /* (x - (x & y)) -> (x & ~y) */
1902 if (GET_CODE (op1
) == AND
)
1904 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1906 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1907 GET_MODE (XEXP (op1
, 1)));
1908 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1910 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1912 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1913 GET_MODE (XEXP (op1
, 0)));
1914 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1918 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1919 by reversing the comparison code if valid. */
1920 if (STORE_FLAG_VALUE
== 1
1921 && trueop0
== const1_rtx
1922 && COMPARISON_P (op1
)
1923 && (reversed
= reversed_comparison (op1
, mode
)))
1926 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1927 if (GET_CODE (op1
) == MULT
1928 && GET_CODE (XEXP (op1
, 0)) == NEG
)
1932 in1
= XEXP (XEXP (op1
, 0), 0);
1933 in2
= XEXP (op1
, 1);
1934 return simplify_gen_binary (PLUS
, mode
,
1935 simplify_gen_binary (MULT
, mode
,
1940 /* Canonicalize (minus (neg A) (mult B C)) to
1941 (minus (mult (neg B) C) A). */
1942 if (GET_CODE (op1
) == MULT
1943 && GET_CODE (op0
) == NEG
)
1947 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
1948 in2
= XEXP (op1
, 1);
1949 return simplify_gen_binary (MINUS
, mode
,
1950 simplify_gen_binary (MULT
, mode
,
1955 /* If one of the operands is a PLUS or a MINUS, see if we can
1956 simplify this by the associative law. This will, for example,
1957 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1958 Don't use the associative law for floating point.
1959 The inaccuracy makes it nonassociative,
1960 and subtle programs can break if operations are associated. */
1962 if (INTEGRAL_MODE_P (mode
)
1963 && (plus_minus_operand_p (op0
)
1964 || plus_minus_operand_p (op1
))
1965 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1970 if (trueop1
== constm1_rtx
)
1971 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1973 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1974 x is NaN, since x * 0 is then also NaN. Nor is it valid
1975 when the mode has signed zeros, since multiplying a negative
1976 number by 0 will give -0, not 0. */
1977 if (!HONOR_NANS (mode
)
1978 && !HONOR_SIGNED_ZEROS (mode
)
1979 && trueop1
== CONST0_RTX (mode
)
1980 && ! side_effects_p (op0
))
1983 /* In IEEE floating point, x*1 is not equivalent to x for
1985 if (!HONOR_SNANS (mode
)
1986 && trueop1
== CONST1_RTX (mode
))
1989 /* Convert multiply by constant power of two into shift unless
1990 we are still generating RTL. This test is a kludge. */
1991 if (GET_CODE (trueop1
) == CONST_INT
1992 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1993 /* If the mode is larger than the host word size, and the
1994 uppermost bit is set, then this isn't a power of two due
1995 to implicit sign extension. */
1996 && (width
<= HOST_BITS_PER_WIDE_INT
1997 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1998 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2000 /* Likewise for multipliers wider than a word. */
2001 if (GET_CODE (trueop1
) == CONST_DOUBLE
2002 && (GET_MODE (trueop1
) == VOIDmode
2003 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
2004 && GET_MODE (op0
) == mode
2005 && CONST_DOUBLE_LOW (trueop1
) == 0
2006 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
2007 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2008 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2010 /* x*2 is x+x and x*(-1) is -x */
2011 if (GET_CODE (trueop1
) == CONST_DOUBLE
2012 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2013 && GET_MODE (op0
) == mode
)
2016 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2018 if (REAL_VALUES_EQUAL (d
, dconst2
))
2019 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2021 if (!HONOR_SNANS (mode
)
2022 && REAL_VALUES_EQUAL (d
, dconstm1
))
2023 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2026 /* Optimize -x * -x as x * x. */
2027 if (FLOAT_MODE_P (mode
)
2028 && GET_CODE (op0
) == NEG
2029 && GET_CODE (op1
) == NEG
2030 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2031 && !side_effects_p (XEXP (op0
, 0)))
2032 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2034 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2035 if (SCALAR_FLOAT_MODE_P (mode
)
2036 && GET_CODE (op0
) == ABS
2037 && GET_CODE (op1
) == ABS
2038 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2039 && !side_effects_p (XEXP (op0
, 0)))
2040 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2042 /* Reassociate multiplication, but for floating point MULTs
2043 only when the user specifies unsafe math optimizations. */
2044 if (! FLOAT_MODE_P (mode
)
2045 || flag_unsafe_math_optimizations
)
2047 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2054 if (trueop1
== const0_rtx
)
2056 if (GET_CODE (trueop1
) == CONST_INT
2057 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2058 == GET_MODE_MASK (mode
)))
2060 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2062 /* A | (~A) -> -1 */
2063 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2064 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2065 && ! side_effects_p (op0
)
2066 && SCALAR_INT_MODE_P (mode
))
2069 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2070 if (GET_CODE (op1
) == CONST_INT
2071 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2072 && (nonzero_bits (op0
, mode
) & ~INTVAL (op1
)) == 0)
2075 /* Canonicalize (X & C1) | C2. */
2076 if (GET_CODE (op0
) == AND
2077 && GET_CODE (trueop1
) == CONST_INT
2078 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
)
2080 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2081 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2082 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2084 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2086 && !side_effects_p (XEXP (op0
, 0)))
2089 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2090 if (((c1
|c2
) & mask
) == mask
)
2091 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2093 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2094 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2096 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2097 gen_int_mode (c1
& ~c2
, mode
));
2098 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2102 /* Convert (A & B) | A to A. */
2103 if (GET_CODE (op0
) == AND
2104 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2105 || rtx_equal_p (XEXP (op0
, 1), op1
))
2106 && ! side_effects_p (XEXP (op0
, 0))
2107 && ! side_effects_p (XEXP (op0
, 1)))
2110 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2111 mode size to (rotate A CX). */
2113 if (GET_CODE (op1
) == ASHIFT
2114 || GET_CODE (op1
) == SUBREG
)
2125 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2126 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2127 && GET_CODE (XEXP (opleft
, 1)) == CONST_INT
2128 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2129 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2130 == GET_MODE_BITSIZE (mode
)))
2131 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2133 /* Same, but for ashift that has been "simplified" to a wider mode
2134 by simplify_shift_const. */
2136 if (GET_CODE (opleft
) == SUBREG
2137 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2138 && GET_CODE (opright
) == LSHIFTRT
2139 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2140 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2141 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2142 && (GET_MODE_SIZE (GET_MODE (opleft
))
2143 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2144 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2145 SUBREG_REG (XEXP (opright
, 0)))
2146 && GET_CODE (XEXP (SUBREG_REG (opleft
), 1)) == CONST_INT
2147 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2148 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2149 == GET_MODE_BITSIZE (mode
)))
2150 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2151 XEXP (SUBREG_REG (opleft
), 1));
2153 /* If we have (ior (and (X C1) C2)), simplify this by making
2154 C1 as small as possible if C1 actually changes. */
2155 if (GET_CODE (op1
) == CONST_INT
2156 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2157 || INTVAL (op1
) > 0)
2158 && GET_CODE (op0
) == AND
2159 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2160 && GET_CODE (op1
) == CONST_INT
2161 && (INTVAL (XEXP (op0
, 1)) & INTVAL (op1
)) != 0)
2162 return simplify_gen_binary (IOR
, mode
,
2164 (AND
, mode
, XEXP (op0
, 0),
2165 GEN_INT (INTVAL (XEXP (op0
, 1))
2169 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2170 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2171 the PLUS does not affect any of the bits in OP1: then we can do
2172 the IOR as a PLUS and we can associate. This is valid if OP1
2173 can be safely shifted left C bits. */
2174 if (GET_CODE (trueop1
) == CONST_INT
&& GET_CODE (op0
) == ASHIFTRT
2175 && GET_CODE (XEXP (op0
, 0)) == PLUS
2176 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == CONST_INT
2177 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2178 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2180 int count
= INTVAL (XEXP (op0
, 1));
2181 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2183 if (mask
>> count
== INTVAL (trueop1
)
2184 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2185 return simplify_gen_binary (ASHIFTRT
, mode
,
2186 plus_constant (XEXP (op0
, 0), mask
),
2190 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2196 if (trueop1
== const0_rtx
)
2198 if (GET_CODE (trueop1
) == CONST_INT
2199 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2200 == GET_MODE_MASK (mode
)))
2201 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2202 if (rtx_equal_p (trueop0
, trueop1
)
2203 && ! side_effects_p (op0
)
2204 && GET_MODE_CLASS (mode
) != MODE_CC
)
2205 return CONST0_RTX (mode
);
2207 /* Canonicalize XOR of the most significant bit to PLUS. */
2208 if ((GET_CODE (op1
) == CONST_INT
2209 || GET_CODE (op1
) == CONST_DOUBLE
)
2210 && mode_signbit_p (mode
, op1
))
2211 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2212 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2213 if ((GET_CODE (op1
) == CONST_INT
2214 || GET_CODE (op1
) == CONST_DOUBLE
)
2215 && GET_CODE (op0
) == PLUS
2216 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
2217 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2218 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2219 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2220 simplify_gen_binary (XOR
, mode
, op1
,
2223 /* If we are XORing two things that have no bits in common,
2224 convert them into an IOR. This helps to detect rotation encoded
2225 using those methods and possibly other simplifications. */
2227 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2228 && (nonzero_bits (op0
, mode
)
2229 & nonzero_bits (op1
, mode
)) == 0)
2230 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2232 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2233 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2236 int num_negated
= 0;
2238 if (GET_CODE (op0
) == NOT
)
2239 num_negated
++, op0
= XEXP (op0
, 0);
2240 if (GET_CODE (op1
) == NOT
)
2241 num_negated
++, op1
= XEXP (op1
, 0);
2243 if (num_negated
== 2)
2244 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2245 else if (num_negated
== 1)
2246 return simplify_gen_unary (NOT
, mode
,
2247 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2251 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2252 correspond to a machine insn or result in further simplifications
2253 if B is a constant. */
2255 if (GET_CODE (op0
) == AND
2256 && rtx_equal_p (XEXP (op0
, 1), op1
)
2257 && ! side_effects_p (op1
))
2258 return simplify_gen_binary (AND
, mode
,
2259 simplify_gen_unary (NOT
, mode
,
2260 XEXP (op0
, 0), mode
),
2263 else if (GET_CODE (op0
) == AND
2264 && rtx_equal_p (XEXP (op0
, 0), op1
)
2265 && ! side_effects_p (op1
))
2266 return simplify_gen_binary (AND
, mode
,
2267 simplify_gen_unary (NOT
, mode
,
2268 XEXP (op0
, 1), mode
),
2271 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2272 comparison if STORE_FLAG_VALUE is 1. */
2273 if (STORE_FLAG_VALUE
== 1
2274 && trueop1
== const1_rtx
2275 && COMPARISON_P (op0
)
2276 && (reversed
= reversed_comparison (op0
, mode
)))
2279 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2280 is (lt foo (const_int 0)), so we can perform the above
2281 simplification if STORE_FLAG_VALUE is 1. */
2283 if (STORE_FLAG_VALUE
== 1
2284 && trueop1
== const1_rtx
2285 && GET_CODE (op0
) == LSHIFTRT
2286 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2287 && INTVAL (XEXP (op0
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
2288 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2290 /* (xor (comparison foo bar) (const_int sign-bit))
2291 when STORE_FLAG_VALUE is the sign bit. */
2292 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2293 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
2294 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
2295 && trueop1
== const_true_rtx
2296 && COMPARISON_P (op0
)
2297 && (reversed
= reversed_comparison (op0
, mode
)))
2302 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2308 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2310 /* If we are turning off bits already known off in OP0, we need
2312 if (GET_CODE (trueop1
) == CONST_INT
2313 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2314 && (nonzero_bits (trueop0
, mode
) & ~INTVAL (trueop1
)) == 0)
2316 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2317 && GET_MODE_CLASS (mode
) != MODE_CC
)
2320 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2321 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2322 && ! side_effects_p (op0
)
2323 && GET_MODE_CLASS (mode
) != MODE_CC
)
2324 return CONST0_RTX (mode
);
2326 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2327 there are no nonzero bits of C outside of X's mode. */
2328 if ((GET_CODE (op0
) == SIGN_EXTEND
2329 || GET_CODE (op0
) == ZERO_EXTEND
)
2330 && GET_CODE (trueop1
) == CONST_INT
2331 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2332 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2333 & INTVAL (trueop1
)) == 0)
2335 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2336 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2337 gen_int_mode (INTVAL (trueop1
),
2339 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2342 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2343 if (GET_CODE (op0
) == IOR
2344 && GET_CODE (trueop1
) == CONST_INT
2345 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
)
2347 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2348 return simplify_gen_binary (IOR
, mode
,
2349 simplify_gen_binary (AND
, mode
,
2350 XEXP (op0
, 0), op1
),
2351 gen_int_mode (tmp
, mode
));
2354 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2355 insn (and may simplify more). */
2356 if (GET_CODE (op0
) == XOR
2357 && rtx_equal_p (XEXP (op0
, 0), op1
)
2358 && ! side_effects_p (op1
))
2359 return simplify_gen_binary (AND
, mode
,
2360 simplify_gen_unary (NOT
, mode
,
2361 XEXP (op0
, 1), mode
),
2364 if (GET_CODE (op0
) == XOR
2365 && rtx_equal_p (XEXP (op0
, 1), op1
)
2366 && ! side_effects_p (op1
))
2367 return simplify_gen_binary (AND
, mode
,
2368 simplify_gen_unary (NOT
, mode
,
2369 XEXP (op0
, 0), mode
),
2372 /* Similarly for (~(A ^ B)) & A. */
2373 if (GET_CODE (op0
) == NOT
2374 && GET_CODE (XEXP (op0
, 0)) == XOR
2375 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2376 && ! side_effects_p (op1
))
2377 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2379 if (GET_CODE (op0
) == NOT
2380 && GET_CODE (XEXP (op0
, 0)) == XOR
2381 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2382 && ! side_effects_p (op1
))
2383 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2385 /* Convert (A | B) & A to A. */
2386 if (GET_CODE (op0
) == IOR
2387 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2388 || rtx_equal_p (XEXP (op0
, 1), op1
))
2389 && ! side_effects_p (XEXP (op0
, 0))
2390 && ! side_effects_p (XEXP (op0
, 1)))
2393 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2394 ((A & N) + B) & M -> (A + B) & M
2395 Similarly if (N & M) == 0,
2396 ((A | N) + B) & M -> (A + B) & M
2397 and for - instead of + and/or ^ instead of |. */
2398 if (GET_CODE (trueop1
) == CONST_INT
2399 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2400 && ~INTVAL (trueop1
)
2401 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
2402 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2407 pmop
[0] = XEXP (op0
, 0);
2408 pmop
[1] = XEXP (op0
, 1);
2410 for (which
= 0; which
< 2; which
++)
2413 switch (GET_CODE (tem
))
2416 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2417 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
2418 == INTVAL (trueop1
))
2419 pmop
[which
] = XEXP (tem
, 0);
2423 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2424 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
2425 pmop
[which
] = XEXP (tem
, 0);
2432 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2434 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2436 return simplify_gen_binary (code
, mode
, tem
, op1
);
2439 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2445 /* 0/x is 0 (or x&0 if x has side-effects). */
2446 if (trueop0
== CONST0_RTX (mode
))
2448 if (side_effects_p (op1
))
2449 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2453 if (trueop1
== CONST1_RTX (mode
))
2454 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2455 /* Convert divide by power of two into shift. */
2456 if (GET_CODE (trueop1
) == CONST_INT
2457 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
2458 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2462 /* Handle floating point and integers separately. */
2463 if (SCALAR_FLOAT_MODE_P (mode
))
2465 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2466 safe for modes with NaNs, since 0.0 / 0.0 will then be
2467 NaN rather than 0.0. Nor is it safe for modes with signed
2468 zeros, since dividing 0 by a negative number gives -0.0 */
2469 if (trueop0
== CONST0_RTX (mode
)
2470 && !HONOR_NANS (mode
)
2471 && !HONOR_SIGNED_ZEROS (mode
)
2472 && ! side_effects_p (op1
))
2475 if (trueop1
== CONST1_RTX (mode
)
2476 && !HONOR_SNANS (mode
))
2479 if (GET_CODE (trueop1
) == CONST_DOUBLE
2480 && trueop1
!= CONST0_RTX (mode
))
2483 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2486 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2487 && !HONOR_SNANS (mode
))
2488 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2490 /* Change FP division by a constant into multiplication.
2491 Only do this with -funsafe-math-optimizations. */
2492 if (flag_unsafe_math_optimizations
2493 && !REAL_VALUES_EQUAL (d
, dconst0
))
2495 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2496 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2497 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2503 /* 0/x is 0 (or x&0 if x has side-effects). */
2504 if (trueop0
== CONST0_RTX (mode
))
2506 if (side_effects_p (op1
))
2507 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2511 if (trueop1
== CONST1_RTX (mode
))
2512 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2514 if (trueop1
== constm1_rtx
)
2516 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2517 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2523 /* 0%x is 0 (or x&0 if x has side-effects). */
2524 if (trueop0
== CONST0_RTX (mode
))
2526 if (side_effects_p (op1
))
2527 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2530 /* x%1 is 0 (of x&0 if x has side-effects). */
2531 if (trueop1
== CONST1_RTX (mode
))
2533 if (side_effects_p (op0
))
2534 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2535 return CONST0_RTX (mode
);
2537 /* Implement modulus by power of two as AND. */
2538 if (GET_CODE (trueop1
) == CONST_INT
2539 && exact_log2 (INTVAL (trueop1
)) > 0)
2540 return simplify_gen_binary (AND
, mode
, op0
,
2541 GEN_INT (INTVAL (op1
) - 1));
2545 /* 0%x is 0 (or x&0 if x has side-effects). */
2546 if (trueop0
== CONST0_RTX (mode
))
2548 if (side_effects_p (op1
))
2549 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2552 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2553 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
2555 if (side_effects_p (op0
))
2556 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2557 return CONST0_RTX (mode
);
2564 if (trueop1
== CONST0_RTX (mode
))
2566 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2568 /* Rotating ~0 always results in ~0. */
2569 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
2570 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2571 && ! side_effects_p (op1
))
2577 if (trueop1
== CONST0_RTX (mode
))
2579 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2584 if (trueop1
== CONST0_RTX (mode
))
2586 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2588 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2589 if (GET_CODE (op0
) == CLZ
2590 && GET_CODE (trueop1
) == CONST_INT
2591 && STORE_FLAG_VALUE
== 1
2592 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
2594 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2595 unsigned HOST_WIDE_INT zero_val
= 0;
2597 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
2598 && zero_val
== GET_MODE_BITSIZE (imode
)
2599 && INTVAL (trueop1
) == exact_log2 (zero_val
))
2600 return simplify_gen_relational (EQ
, mode
, imode
,
2601 XEXP (op0
, 0), const0_rtx
);
2606 if (width
<= HOST_BITS_PER_WIDE_INT
2607 && GET_CODE (trueop1
) == CONST_INT
2608 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2609 && ! side_effects_p (op0
))
2611 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2613 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2619 if (width
<= HOST_BITS_PER_WIDE_INT
2620 && GET_CODE (trueop1
) == CONST_INT
2621 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2622 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2623 && ! side_effects_p (op0
))
2625 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2627 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2633 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2635 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2637 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2643 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2645 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2647 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2656 /* ??? There are simplifications that can be done. */
2660 if (!VECTOR_MODE_P (mode
))
2662 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2663 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2664 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2665 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2666 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
2668 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2669 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2674 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2675 gcc_assert (GET_MODE_INNER (mode
)
2676 == GET_MODE_INNER (GET_MODE (trueop0
)));
2677 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2679 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2681 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2682 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2683 rtvec v
= rtvec_alloc (n_elts
);
2686 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2687 for (i
= 0; i
< n_elts
; i
++)
2689 rtx x
= XVECEXP (trueop1
, 0, i
);
2691 gcc_assert (GET_CODE (x
) == CONST_INT
);
2692 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2696 return gen_rtx_CONST_VECTOR (mode
, v
);
2700 if (XVECLEN (trueop1
, 0) == 1
2701 && GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
2702 && GET_CODE (trueop0
) == VEC_CONCAT
)
2705 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
2707 /* Try to find the element in the VEC_CONCAT. */
2708 while (GET_MODE (vec
) != mode
2709 && GET_CODE (vec
) == VEC_CONCAT
)
2711 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
2712 if (offset
< vec_size
)
2713 vec
= XEXP (vec
, 0);
2717 vec
= XEXP (vec
, 1);
2719 vec
= avoid_constant_pool_reference (vec
);
2722 if (GET_MODE (vec
) == mode
)
2729 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2730 ? GET_MODE (trueop0
)
2731 : GET_MODE_INNER (mode
));
2732 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2733 ? GET_MODE (trueop1
)
2734 : GET_MODE_INNER (mode
));
2736 gcc_assert (VECTOR_MODE_P (mode
));
2737 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2738 == GET_MODE_SIZE (mode
));
2740 if (VECTOR_MODE_P (op0_mode
))
2741 gcc_assert (GET_MODE_INNER (mode
)
2742 == GET_MODE_INNER (op0_mode
));
2744 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2746 if (VECTOR_MODE_P (op1_mode
))
2747 gcc_assert (GET_MODE_INNER (mode
)
2748 == GET_MODE_INNER (op1_mode
));
2750 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2752 if ((GET_CODE (trueop0
) == CONST_VECTOR
2753 || GET_CODE (trueop0
) == CONST_INT
2754 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2755 && (GET_CODE (trueop1
) == CONST_VECTOR
2756 || GET_CODE (trueop1
) == CONST_INT
2757 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2759 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2760 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2761 rtvec v
= rtvec_alloc (n_elts
);
2763 unsigned in_n_elts
= 1;
2765 if (VECTOR_MODE_P (op0_mode
))
2766 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2767 for (i
= 0; i
< n_elts
; i
++)
2771 if (!VECTOR_MODE_P (op0_mode
))
2772 RTVEC_ELT (v
, i
) = trueop0
;
2774 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2778 if (!VECTOR_MODE_P (op1_mode
))
2779 RTVEC_ELT (v
, i
) = trueop1
;
2781 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2786 return gen_rtx_CONST_VECTOR (mode
, v
);
2799 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2802 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
2804 unsigned int width
= GET_MODE_BITSIZE (mode
);
2806 if (VECTOR_MODE_P (mode
)
2807 && code
!= VEC_CONCAT
2808 && GET_CODE (op0
) == CONST_VECTOR
2809 && GET_CODE (op1
) == CONST_VECTOR
)
2811 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2812 enum machine_mode op0mode
= GET_MODE (op0
);
2813 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
2814 enum machine_mode op1mode
= GET_MODE (op1
);
2815 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
2816 rtvec v
= rtvec_alloc (n_elts
);
2819 gcc_assert (op0_n_elts
== n_elts
);
2820 gcc_assert (op1_n_elts
== n_elts
);
2821 for (i
= 0; i
< n_elts
; i
++)
2823 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
2824 CONST_VECTOR_ELT (op0
, i
),
2825 CONST_VECTOR_ELT (op1
, i
));
2828 RTVEC_ELT (v
, i
) = x
;
2831 return gen_rtx_CONST_VECTOR (mode
, v
);
2834 if (VECTOR_MODE_P (mode
)
2835 && code
== VEC_CONCAT
2836 && CONSTANT_P (op0
) && CONSTANT_P (op1
))
2838 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2839 rtvec v
= rtvec_alloc (n_elts
);
2841 gcc_assert (n_elts
>= 2);
2844 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
2845 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
2847 RTVEC_ELT (v
, 0) = op0
;
2848 RTVEC_ELT (v
, 1) = op1
;
2852 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
2853 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
2856 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
2857 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
2858 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
2860 for (i
= 0; i
< op0_n_elts
; ++i
)
2861 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
2862 for (i
= 0; i
< op1_n_elts
; ++i
)
2863 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
2866 return gen_rtx_CONST_VECTOR (mode
, v
);
2869 if (SCALAR_FLOAT_MODE_P (mode
)
2870 && GET_CODE (op0
) == CONST_DOUBLE
2871 && GET_CODE (op1
) == CONST_DOUBLE
2872 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
2883 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
2885 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
2887 for (i
= 0; i
< 4; i
++)
2904 real_from_target (&r
, tmp0
, mode
);
2905 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
2909 REAL_VALUE_TYPE f0
, f1
, value
, result
;
2912 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
2913 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
2914 real_convert (&f0
, mode
, &f0
);
2915 real_convert (&f1
, mode
, &f1
);
2917 if (HONOR_SNANS (mode
)
2918 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
2922 && REAL_VALUES_EQUAL (f1
, dconst0
)
2923 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
2926 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2927 && flag_trapping_math
2928 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
2930 int s0
= REAL_VALUE_NEGATIVE (f0
);
2931 int s1
= REAL_VALUE_NEGATIVE (f1
);
2936 /* Inf + -Inf = NaN plus exception. */
2941 /* Inf - Inf = NaN plus exception. */
2946 /* Inf / Inf = NaN plus exception. */
2953 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2954 && flag_trapping_math
2955 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
2956 || (REAL_VALUE_ISINF (f1
)
2957 && REAL_VALUES_EQUAL (f0
, dconst0
))))
2958 /* Inf * 0 = NaN plus exception. */
2961 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
2963 real_convert (&result
, mode
, &value
);
2965 /* Don't constant fold this floating point operation if
2966 the result has overflowed and flag_trapping_math. */
2968 if (flag_trapping_math
2969 && MODE_HAS_INFINITIES (mode
)
2970 && REAL_VALUE_ISINF (result
)
2971 && !REAL_VALUE_ISINF (f0
)
2972 && !REAL_VALUE_ISINF (f1
))
2973 /* Overflow plus exception. */
2976 /* Don't constant fold this floating point operation if the
2977 result may dependent upon the run-time rounding mode and
2978 flag_rounding_math is set, or if GCC's software emulation
2979 is unable to accurately represent the result. */
2981 if ((flag_rounding_math
2982 || (REAL_MODE_FORMAT_COMPOSITE_P (mode
)
2983 && !flag_unsafe_math_optimizations
))
2984 && (inexact
|| !real_identical (&result
, &value
)))
2987 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
2991 /* We can fold some multi-word operations. */
2992 if (GET_MODE_CLASS (mode
) == MODE_INT
2993 && width
== HOST_BITS_PER_WIDE_INT
* 2
2994 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
2995 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
2997 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
2998 HOST_WIDE_INT h1
, h2
, hv
, ht
;
3000 if (GET_CODE (op0
) == CONST_DOUBLE
)
3001 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
3003 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
3005 if (GET_CODE (op1
) == CONST_DOUBLE
)
3006 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
3008 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
3013 /* A - B == A + (-B). */
3014 neg_double (l2
, h2
, &lv
, &hv
);
3017 /* Fall through.... */
3020 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3024 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3028 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3029 &lv
, &hv
, <
, &ht
))
3034 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3035 <
, &ht
, &lv
, &hv
))
3040 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3041 &lv
, &hv
, <
, &ht
))
3046 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3047 <
, &ht
, &lv
, &hv
))
3052 lv
= l1
& l2
, hv
= h1
& h2
;
3056 lv
= l1
| l2
, hv
= h1
| h2
;
3060 lv
= l1
^ l2
, hv
= h1
^ h2
;
3066 && ((unsigned HOST_WIDE_INT
) l1
3067 < (unsigned HOST_WIDE_INT
) l2
)))
3076 && ((unsigned HOST_WIDE_INT
) l1
3077 > (unsigned HOST_WIDE_INT
) l2
)))
3084 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
3086 && ((unsigned HOST_WIDE_INT
) l1
3087 < (unsigned HOST_WIDE_INT
) l2
)))
3094 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
3096 && ((unsigned HOST_WIDE_INT
) l1
3097 > (unsigned HOST_WIDE_INT
) l2
)))
3103 case LSHIFTRT
: case ASHIFTRT
:
3105 case ROTATE
: case ROTATERT
:
3106 if (SHIFT_COUNT_TRUNCATED
)
3107 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
3109 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
3112 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3113 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
3115 else if (code
== ASHIFT
)
3116 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
3117 else if (code
== ROTATE
)
3118 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3119 else /* code == ROTATERT */
3120 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3127 return immed_double_const (lv
, hv
, mode
);
3130 if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
3131 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3133 /* Get the integer argument values in two forms:
3134 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3136 arg0
= INTVAL (op0
);
3137 arg1
= INTVAL (op1
);
3139 if (width
< HOST_BITS_PER_WIDE_INT
)
3141 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3142 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3145 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3146 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3149 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3150 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3158 /* Compute the value of the arithmetic. */
3163 val
= arg0s
+ arg1s
;
3167 val
= arg0s
- arg1s
;
3171 val
= arg0s
* arg1s
;
3176 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3179 val
= arg0s
/ arg1s
;
3184 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3187 val
= arg0s
% arg1s
;
3192 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3195 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3200 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3203 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3221 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3222 the value is in range. We can't return any old value for
3223 out-of-range arguments because either the middle-end (via
3224 shift_truncation_mask) or the back-end might be relying on
3225 target-specific knowledge. Nor can we rely on
3226 shift_truncation_mask, since the shift might not be part of an
3227 ashlM3, lshrM3 or ashrM3 instruction. */
3228 if (SHIFT_COUNT_TRUNCATED
)
3229 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3230 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3233 val
= (code
== ASHIFT
3234 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3235 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3237 /* Sign-extend the result for arithmetic right shifts. */
3238 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3239 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
3247 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3248 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3256 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3257 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3261 /* Do nothing here. */
3265 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3269 val
= ((unsigned HOST_WIDE_INT
) arg0
3270 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3274 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3278 val
= ((unsigned HOST_WIDE_INT
) arg0
3279 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3287 /* ??? There are simplifications that can be done. */
3294 return gen_int_mode (val
, mode
);
3302 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3305 Rather than test for specific case, we do this by a brute-force method
3306 and do all possible simplifications until no more changes occur. Then
3307 we rebuild the operation. */
3309 struct simplify_plus_minus_op_data
3316 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
3318 const struct simplify_plus_minus_op_data
*d1
= p1
;
3319 const struct simplify_plus_minus_op_data
*d2
= p2
;
3322 result
= (commutative_operand_precedence (d2
->op
)
3323 - commutative_operand_precedence (d1
->op
));
3327 /* Group together equal REGs to do more simplification. */
3328 if (REG_P (d1
->op
) && REG_P (d2
->op
))
3329 return REGNO (d1
->op
) - REGNO (d2
->op
);
3335 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3338 struct simplify_plus_minus_op_data ops
[8];
3340 int n_ops
= 2, input_ops
= 2;
3341 int changed
, n_constants
= 0, canonicalized
= 0;
3344 memset (ops
, 0, sizeof ops
);
3346 /* Set up the two operands and then expand them until nothing has been
3347 changed. If we run out of room in our array, give up; this should
3348 almost never happen. */
3353 ops
[1].neg
= (code
== MINUS
);
3359 for (i
= 0; i
< n_ops
; i
++)
3361 rtx this_op
= ops
[i
].op
;
3362 int this_neg
= ops
[i
].neg
;
3363 enum rtx_code this_code
= GET_CODE (this_op
);
3372 ops
[n_ops
].op
= XEXP (this_op
, 1);
3373 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3376 ops
[i
].op
= XEXP (this_op
, 0);
3379 canonicalized
|= this_neg
;
3383 ops
[i
].op
= XEXP (this_op
, 0);
3384 ops
[i
].neg
= ! this_neg
;
3391 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3392 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3393 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3395 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3396 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3397 ops
[n_ops
].neg
= this_neg
;
3405 /* ~a -> (-a - 1) */
3408 ops
[n_ops
].op
= constm1_rtx
;
3409 ops
[n_ops
++].neg
= this_neg
;
3410 ops
[i
].op
= XEXP (this_op
, 0);
3411 ops
[i
].neg
= !this_neg
;
3421 ops
[i
].op
= neg_const_int (mode
, this_op
);
3435 if (n_constants
> 1)
3438 gcc_assert (n_ops
>= 2);
3440 /* If we only have two operands, we can avoid the loops. */
3443 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3446 /* Get the two operands. Be careful with the order, especially for
3447 the cases where code == MINUS. */
3448 if (ops
[0].neg
&& ops
[1].neg
)
3450 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3453 else if (ops
[0].neg
)
3464 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
3467 /* Now simplify each pair of operands until nothing changes. */
3470 /* Insertion sort is good enough for an eight-element array. */
3471 for (i
= 1; i
< n_ops
; i
++)
3473 struct simplify_plus_minus_op_data save
;
3475 if (simplify_plus_minus_op_data_cmp (&ops
[j
], &ops
[i
]) < 0)
3481 ops
[j
+ 1] = ops
[j
];
3482 while (j
-- && simplify_plus_minus_op_data_cmp (&ops
[j
], &save
) > 0);
3486 /* This is only useful the first time through. */
3491 for (i
= n_ops
- 1; i
> 0; i
--)
3492 for (j
= i
- 1; j
>= 0; j
--)
3494 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
3495 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
3497 if (lhs
!= 0 && rhs
!= 0)
3499 enum rtx_code ncode
= PLUS
;
3505 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3507 else if (swap_commutative_operands_p (lhs
, rhs
))
3508 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3510 if ((GET_CODE (lhs
) == CONST
|| GET_CODE (lhs
) == CONST_INT
)
3511 && (GET_CODE (rhs
) == CONST
|| GET_CODE (rhs
) == CONST_INT
))
3513 rtx tem_lhs
, tem_rhs
;
3515 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
3516 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
3517 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
3519 if (tem
&& !CONSTANT_P (tem
))
3520 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
3523 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
3525 /* Reject "simplifications" that just wrap the two
3526 arguments in a CONST. Failure to do so can result
3527 in infinite recursion with simplify_binary_operation
3528 when it calls us to simplify CONST operations. */
3530 && ! (GET_CODE (tem
) == CONST
3531 && GET_CODE (XEXP (tem
, 0)) == ncode
3532 && XEXP (XEXP (tem
, 0), 0) == lhs
3533 && XEXP (XEXP (tem
, 0), 1) == rhs
))
3536 if (GET_CODE (tem
) == NEG
)
3537 tem
= XEXP (tem
, 0), lneg
= !lneg
;
3538 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
3539 tem
= neg_const_int (mode
, tem
), lneg
= 0;
3543 ops
[j
].op
= NULL_RTX
;
3549 /* Pack all the operands to the lower-numbered entries. */
3550 for (i
= 0, j
= 0; j
< n_ops
; j
++)
3560 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3562 && GET_CODE (ops
[1].op
) == CONST_INT
3563 && CONSTANT_P (ops
[0].op
)
3565 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
3567 /* We suppressed creation of trivial CONST expressions in the
3568 combination loop to avoid recursion. Create one manually now.
3569 The combination loop should have ensured that there is exactly
3570 one CONST_INT, and the sort will have ensured that it is last
3571 in the array and that any other constant will be next-to-last. */
3574 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
3575 && CONSTANT_P (ops
[n_ops
- 2].op
))
3577 rtx value
= ops
[n_ops
- 1].op
;
3578 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
3579 value
= neg_const_int (mode
, value
);
3580 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
3584 /* Put a non-negated operand first, if possible. */
3586 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
3589 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
3598 /* Now make the result by performing the requested operations. */
3600 for (i
= 1; i
< n_ops
; i
++)
3601 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
3602 mode
, result
, ops
[i
].op
);
3607 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3609 plus_minus_operand_p (rtx x
)
3611 return GET_CODE (x
) == PLUS
3612 || GET_CODE (x
) == MINUS
3613 || (GET_CODE (x
) == CONST
3614 && GET_CODE (XEXP (x
, 0)) == PLUS
3615 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
3616 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
3619 /* Like simplify_binary_operation except used for relational operators.
3620 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3621 not also be VOIDmode.
3623 CMP_MODE specifies in which mode the comparison is done in, so it is
3624 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3625 the operands or, if both are VOIDmode, the operands are compared in
3626 "infinite precision". */
3628 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
3629 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3631 rtx tem
, trueop0
, trueop1
;
3633 if (cmp_mode
== VOIDmode
)
3634 cmp_mode
= GET_MODE (op0
);
3635 if (cmp_mode
== VOIDmode
)
3636 cmp_mode
= GET_MODE (op1
);
3638 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
3641 if (SCALAR_FLOAT_MODE_P (mode
))
3643 if (tem
== const0_rtx
)
3644 return CONST0_RTX (mode
);
3645 #ifdef FLOAT_STORE_FLAG_VALUE
3647 REAL_VALUE_TYPE val
;
3648 val
= FLOAT_STORE_FLAG_VALUE (mode
);
3649 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
3655 if (VECTOR_MODE_P (mode
))
3657 if (tem
== const0_rtx
)
3658 return CONST0_RTX (mode
);
3659 #ifdef VECTOR_STORE_FLAG_VALUE
3664 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
3665 if (val
== NULL_RTX
)
3667 if (val
== const1_rtx
)
3668 return CONST1_RTX (mode
);
3670 units
= GET_MODE_NUNITS (mode
);
3671 v
= rtvec_alloc (units
);
3672 for (i
= 0; i
< units
; i
++)
3673 RTVEC_ELT (v
, i
) = val
;
3674 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
3684 /* For the following tests, ensure const0_rtx is op1. */
3685 if (swap_commutative_operands_p (op0
, op1
)
3686 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
3687 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
3689 /* If op0 is a compare, extract the comparison arguments from it. */
3690 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3691 return simplify_relational_operation (code
, mode
, VOIDmode
,
3692 XEXP (op0
, 0), XEXP (op0
, 1));
3694 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
3698 trueop0
= avoid_constant_pool_reference (op0
);
3699 trueop1
= avoid_constant_pool_reference (op1
);
3700 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
3704 /* This part of simplify_relational_operation is only used when CMP_MODE
3705 is not in class MODE_CC (i.e. it is a real comparison).
3707 MODE is the mode of the result, while CMP_MODE specifies in which
3708 mode the comparison is done in, so it is the mode of the operands. */
3711 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
3712 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3714 enum rtx_code op0code
= GET_CODE (op0
);
3716 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
3718 /* If op0 is a comparison, extract the comparison arguments
3722 if (GET_MODE (op0
) == mode
)
3723 return simplify_rtx (op0
);
3725 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
3726 XEXP (op0
, 0), XEXP (op0
, 1));
3728 else if (code
== EQ
)
3730 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
3731 if (new_code
!= UNKNOWN
)
3732 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
3733 XEXP (op0
, 0), XEXP (op0
, 1));
3737 if (op1
== const0_rtx
)
3739 /* Canonicalize (GTU x 0) as (NE x 0). */
3741 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
3742 /* Canonicalize (LEU x 0) as (EQ x 0). */
3744 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
3746 else if (op1
== const1_rtx
)
3751 /* Canonicalize (GE x 1) as (GT x 0). */
3752 return simplify_gen_relational (GT
, mode
, cmp_mode
,
3755 /* Canonicalize (GEU x 1) as (NE x 0). */
3756 return simplify_gen_relational (NE
, mode
, cmp_mode
,
3759 /* Canonicalize (LT x 1) as (LE x 0). */
3760 return simplify_gen_relational (LE
, mode
, cmp_mode
,
3763 /* Canonicalize (LTU x 1) as (EQ x 0). */
3764 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
3770 else if (op1
== constm1_rtx
)
3772 /* Canonicalize (LE x -1) as (LT x 0). */
3774 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
3775 /* Canonicalize (GT x -1) as (GE x 0). */
3777 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
3780 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3781 if ((code
== EQ
|| code
== NE
)
3782 && (op0code
== PLUS
|| op0code
== MINUS
)
3784 && CONSTANT_P (XEXP (op0
, 1))
3785 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
3787 rtx x
= XEXP (op0
, 0);
3788 rtx c
= XEXP (op0
, 1);
3790 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
3792 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
3795 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3796 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3798 && op1
== const0_rtx
3799 && GET_MODE_CLASS (mode
) == MODE_INT
3800 && cmp_mode
!= VOIDmode
3801 /* ??? Work-around BImode bugs in the ia64 backend. */
3803 && cmp_mode
!= BImode
3804 && nonzero_bits (op0
, cmp_mode
) == 1
3805 && STORE_FLAG_VALUE
== 1)
3806 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
3807 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
3808 : lowpart_subreg (mode
, op0
, cmp_mode
);
3810 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3811 if ((code
== EQ
|| code
== NE
)
3812 && op1
== const0_rtx
3814 return simplify_gen_relational (code
, mode
, cmp_mode
,
3815 XEXP (op0
, 0), XEXP (op0
, 1));
3817 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3818 if ((code
== EQ
|| code
== NE
)
3820 && rtx_equal_p (XEXP (op0
, 0), op1
)
3821 && !side_effects_p (XEXP (op0
, 0)))
3822 return simplify_gen_relational (code
, mode
, cmp_mode
,
3823 XEXP (op0
, 1), const0_rtx
);
3825 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3826 if ((code
== EQ
|| code
== NE
)
3828 && rtx_equal_p (XEXP (op0
, 1), op1
)
3829 && !side_effects_p (XEXP (op0
, 1)))
3830 return simplify_gen_relational (code
, mode
, cmp_mode
,
3831 XEXP (op0
, 0), const0_rtx
);
3833 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3834 if ((code
== EQ
|| code
== NE
)
3836 && (GET_CODE (op1
) == CONST_INT
3837 || GET_CODE (op1
) == CONST_DOUBLE
)
3838 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
3839 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
3840 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
3841 simplify_gen_binary (XOR
, cmp_mode
,
3842 XEXP (op0
, 1), op1
));
3844 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
3850 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3851 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
3852 XEXP (op0
, 0), const0_rtx
);
3857 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3858 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
3859 XEXP (op0
, 0), const0_rtx
);
3868 /* Check if the given comparison (done in the given MODE) is actually a
3869 tautology or a contradiction.
3870 If no simplification is possible, this function returns zero.
3871 Otherwise, it returns either const_true_rtx or const0_rtx. */
3874 simplify_const_relational_operation (enum rtx_code code
,
3875 enum machine_mode mode
,
3878 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
3883 gcc_assert (mode
!= VOIDmode
3884 || (GET_MODE (op0
) == VOIDmode
3885 && GET_MODE (op1
) == VOIDmode
));
3887 /* If op0 is a compare, extract the comparison arguments from it. */
3888 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3890 op1
= XEXP (op0
, 1);
3891 op0
= XEXP (op0
, 0);
3893 if (GET_MODE (op0
) != VOIDmode
)
3894 mode
= GET_MODE (op0
);
3895 else if (GET_MODE (op1
) != VOIDmode
)
3896 mode
= GET_MODE (op1
);
3901 /* We can't simplify MODE_CC values since we don't know what the
3902 actual comparison is. */
3903 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
3906 /* Make sure the constant is second. */
3907 if (swap_commutative_operands_p (op0
, op1
))
3909 tem
= op0
, op0
= op1
, op1
= tem
;
3910 code
= swap_condition (code
);
3913 trueop0
= avoid_constant_pool_reference (op0
);
3914 trueop1
= avoid_constant_pool_reference (op1
);
3916 /* For integer comparisons of A and B maybe we can simplify A - B and can
3917 then simplify a comparison of that with zero. If A and B are both either
3918 a register or a CONST_INT, this can't help; testing for these cases will
3919 prevent infinite recursion here and speed things up.
3921 We can only do this for EQ and NE comparisons as otherwise we may
3922 lose or introduce overflow which we cannot disregard as undefined as
3923 we do not know the signedness of the operation on either the left or
3924 the right hand side of the comparison. */
3926 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
3927 && (code
== EQ
|| code
== NE
)
3928 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
3929 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
3930 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
3931 /* We cannot do this if tem is a nonzero address. */
3932 && ! nonzero_address_p (tem
))
3933 return simplify_const_relational_operation (signed_condition (code
),
3934 mode
, tem
, const0_rtx
);
3936 if (! HONOR_NANS (mode
) && code
== ORDERED
)
3937 return const_true_rtx
;
3939 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
3942 /* For modes without NaNs, if the two operands are equal, we know the
3943 result except if they have side-effects. */
3944 if (! HONOR_NANS (GET_MODE (trueop0
))
3945 && rtx_equal_p (trueop0
, trueop1
)
3946 && ! side_effects_p (trueop0
))
3947 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
3949 /* If the operands are floating-point constants, see if we can fold
3951 else if (GET_CODE (trueop0
) == CONST_DOUBLE
3952 && GET_CODE (trueop1
) == CONST_DOUBLE
3953 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
3955 REAL_VALUE_TYPE d0
, d1
;
3957 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
3958 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
3960 /* Comparisons are unordered iff at least one of the values is NaN. */
3961 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
3971 return const_true_rtx
;
3984 equal
= REAL_VALUES_EQUAL (d0
, d1
);
3985 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
3986 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
3989 /* Otherwise, see if the operands are both integers. */
3990 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
3991 && (GET_CODE (trueop0
) == CONST_DOUBLE
3992 || GET_CODE (trueop0
) == CONST_INT
)
3993 && (GET_CODE (trueop1
) == CONST_DOUBLE
3994 || GET_CODE (trueop1
) == CONST_INT
))
3996 int width
= GET_MODE_BITSIZE (mode
);
3997 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
3998 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4000 /* Get the two words comprising each integer constant. */
4001 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
4003 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4004 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4008 l0u
= l0s
= INTVAL (trueop0
);
4009 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4012 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
4014 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4015 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4019 l1u
= l1s
= INTVAL (trueop1
);
4020 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4023 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4024 we have to sign or zero-extend the values. */
4025 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4027 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4028 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4030 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4031 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
4033 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4034 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
4036 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4037 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4039 equal
= (h0u
== h1u
&& l0u
== l1u
);
4040 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
4041 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
4042 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
4043 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
4046 /* Otherwise, there are some code-specific tests we can make. */
4049 /* Optimize comparisons with upper and lower bounds. */
4050 if (SCALAR_INT_MODE_P (mode
)
4051 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
4064 get_mode_bounds (mode
, sign
, mode
, &mmin
, &mmax
);
4071 /* x >= min is always true. */
4072 if (rtx_equal_p (trueop1
, mmin
))
4073 tem
= const_true_rtx
;
4079 /* x <= max is always true. */
4080 if (rtx_equal_p (trueop1
, mmax
))
4081 tem
= const_true_rtx
;
4086 /* x > max is always false. */
4087 if (rtx_equal_p (trueop1
, mmax
))
4093 /* x < min is always false. */
4094 if (rtx_equal_p (trueop1
, mmin
))
4101 if (tem
== const0_rtx
4102 || tem
== const_true_rtx
)
4109 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
4114 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
4115 return const_true_rtx
;
4119 /* Optimize abs(x) < 0.0. */
4120 if (trueop1
== CONST0_RTX (mode
)
4121 && !HONOR_SNANS (mode
)
4122 && (!INTEGRAL_MODE_P (mode
)
4123 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4125 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
4127 if (GET_CODE (tem
) == ABS
)
4129 if (INTEGRAL_MODE_P (mode
)
4130 && (issue_strict_overflow_warning
4131 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4132 warning (OPT_Wstrict_overflow
,
4133 ("assuming signed overflow does not occur when "
4134 "assuming abs (x) < 0 is false"));
4139 /* Optimize popcount (x) < 0. */
4140 if (GET_CODE (trueop0
) == POPCOUNT
&& trueop1
== const0_rtx
)
4141 return const_true_rtx
;
4145 /* Optimize abs(x) >= 0.0. */
4146 if (trueop1
== CONST0_RTX (mode
)
4147 && !HONOR_NANS (mode
)
4148 && (!INTEGRAL_MODE_P (mode
)
4149 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4151 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
4153 if (GET_CODE (tem
) == ABS
)
4155 if (INTEGRAL_MODE_P (mode
)
4156 && (issue_strict_overflow_warning
4157 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4158 warning (OPT_Wstrict_overflow
,
4159 ("assuming signed overflow does not occur when "
4160 "assuming abs (x) >= 0 is true"));
4161 return const_true_rtx
;
4165 /* Optimize popcount (x) >= 0. */
4166 if (GET_CODE (trueop0
) == POPCOUNT
&& trueop1
== const0_rtx
)
4167 return const_true_rtx
;
4171 /* Optimize ! (abs(x) < 0.0). */
4172 if (trueop1
== CONST0_RTX (mode
))
4174 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
4176 if (GET_CODE (tem
) == ABS
)
4177 return const_true_rtx
;
4188 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4194 return equal
? const_true_rtx
: const0_rtx
;
4197 return ! equal
? const_true_rtx
: const0_rtx
;
4200 return op0lt
? const_true_rtx
: const0_rtx
;
4203 return op1lt
? const_true_rtx
: const0_rtx
;
4205 return op0ltu
? const_true_rtx
: const0_rtx
;
4207 return op1ltu
? const_true_rtx
: const0_rtx
;
4210 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
4213 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
4215 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
4217 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
4219 return const_true_rtx
;
4227 /* Simplify CODE, an operation with result mode MODE and three operands,
4228 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4229 a constant. Return 0 if no simplifications is possible. */
4232 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4233 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4236 unsigned int width
= GET_MODE_BITSIZE (mode
);
4238 /* VOIDmode means "infinite" precision. */
4240 width
= HOST_BITS_PER_WIDE_INT
;
4246 if (GET_CODE (op0
) == CONST_INT
4247 && GET_CODE (op1
) == CONST_INT
4248 && GET_CODE (op2
) == CONST_INT
4249 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4250 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4252 /* Extracting a bit-field from a constant */
4253 HOST_WIDE_INT val
= INTVAL (op0
);
4255 if (BITS_BIG_ENDIAN
)
4256 val
>>= (GET_MODE_BITSIZE (op0_mode
)
4257 - INTVAL (op2
) - INTVAL (op1
));
4259 val
>>= INTVAL (op2
);
4261 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
4263 /* First zero-extend. */
4264 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
4265 /* If desired, propagate sign bit. */
4266 if (code
== SIGN_EXTRACT
4267 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
4268 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
4271 /* Clear the bits that don't belong in our mode,
4272 unless they and our sign bit are all one.
4273 So we get either a reasonable negative value or a reasonable
4274 unsigned value for this mode. */
4275 if (width
< HOST_BITS_PER_WIDE_INT
4276 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
4277 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
4278 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4280 return gen_int_mode (val
, mode
);
4285 if (GET_CODE (op0
) == CONST_INT
)
4286 return op0
!= const0_rtx
? op1
: op2
;
4288 /* Convert c ? a : a into "a". */
4289 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4292 /* Convert a != b ? a : b into "a". */
4293 if (GET_CODE (op0
) == NE
4294 && ! side_effects_p (op0
)
4295 && ! HONOR_NANS (mode
)
4296 && ! HONOR_SIGNED_ZEROS (mode
)
4297 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4298 && rtx_equal_p (XEXP (op0
, 1), op2
))
4299 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4300 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4303 /* Convert a == b ? a : b into "b". */
4304 if (GET_CODE (op0
) == EQ
4305 && ! side_effects_p (op0
)
4306 && ! HONOR_NANS (mode
)
4307 && ! HONOR_SIGNED_ZEROS (mode
)
4308 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4309 && rtx_equal_p (XEXP (op0
, 1), op2
))
4310 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4311 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4314 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
4316 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
4317 ? GET_MODE (XEXP (op0
, 1))
4318 : GET_MODE (XEXP (op0
, 0)));
4321 /* Look for happy constants in op1 and op2. */
4322 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
4324 HOST_WIDE_INT t
= INTVAL (op1
);
4325 HOST_WIDE_INT f
= INTVAL (op2
);
4327 if (t
== STORE_FLAG_VALUE
&& f
== 0)
4328 code
= GET_CODE (op0
);
4329 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
4332 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
4340 return simplify_gen_relational (code
, mode
, cmp_mode
,
4341 XEXP (op0
, 0), XEXP (op0
, 1));
4344 if (cmp_mode
== VOIDmode
)
4345 cmp_mode
= op0_mode
;
4346 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
4347 cmp_mode
, XEXP (op0
, 0),
4350 /* See if any simplifications were possible. */
4353 if (GET_CODE (temp
) == CONST_INT
)
4354 return temp
== const0_rtx
? op2
: op1
;
4356 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
4362 gcc_assert (GET_MODE (op0
) == mode
);
4363 gcc_assert (GET_MODE (op1
) == mode
);
4364 gcc_assert (VECTOR_MODE_P (mode
));
4365 op2
= avoid_constant_pool_reference (op2
);
4366 if (GET_CODE (op2
) == CONST_INT
)
4368 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
4369 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
4370 int mask
= (1 << n_elts
) - 1;
4372 if (!(INTVAL (op2
) & mask
))
4374 if ((INTVAL (op2
) & mask
) == mask
)
4377 op0
= avoid_constant_pool_reference (op0
);
4378 op1
= avoid_constant_pool_reference (op1
);
4379 if (GET_CODE (op0
) == CONST_VECTOR
4380 && GET_CODE (op1
) == CONST_VECTOR
)
4382 rtvec v
= rtvec_alloc (n_elts
);
4385 for (i
= 0; i
< n_elts
; i
++)
4386 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
4387 ? CONST_VECTOR_ELT (op0
, i
)
4388 : CONST_VECTOR_ELT (op1
, i
));
4389 return gen_rtx_CONST_VECTOR (mode
, v
);
4401 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4402 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4404 Works by unpacking OP into a collection of 8-bit values
4405 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4406 and then repacking them again for OUTERMODE. */
4409 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
4410 enum machine_mode innermode
, unsigned int byte
)
4412 /* We support up to 512-bit values (for V8DFmode). */
4416 value_mask
= (1 << value_bit
) - 1
4418 unsigned char value
[max_bitsize
/ value_bit
];
4427 rtvec result_v
= NULL
;
4428 enum mode_class outer_class
;
4429 enum machine_mode outer_submode
;
4431 /* Some ports misuse CCmode. */
4432 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
4435 /* We have no way to represent a complex constant at the rtl level. */
4436 if (COMPLEX_MODE_P (outermode
))
4439 /* Unpack the value. */
4441 if (GET_CODE (op
) == CONST_VECTOR
)
4443 num_elem
= CONST_VECTOR_NUNITS (op
);
4444 elems
= &CONST_VECTOR_ELT (op
, 0);
4445 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
4451 elem_bitsize
= max_bitsize
;
4453 /* If this asserts, it is too complicated; reducing value_bit may help. */
4454 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
4455 /* I don't know how to handle endianness of sub-units. */
4456 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
4458 for (elem
= 0; elem
< num_elem
; elem
++)
4461 rtx el
= elems
[elem
];
4463 /* Vectors are kept in target memory order. (This is probably
4466 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4467 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4469 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4470 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4471 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4472 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4473 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4476 switch (GET_CODE (el
))
4480 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4482 *vp
++ = INTVAL (el
) >> i
;
4483 /* CONST_INTs are always logically sign-extended. */
4484 for (; i
< elem_bitsize
; i
+= value_bit
)
4485 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
4489 if (GET_MODE (el
) == VOIDmode
)
4491 /* If this triggers, someone should have generated a
4492 CONST_INT instead. */
4493 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
4495 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4496 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
4497 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
4500 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
4503 /* It shouldn't matter what's done here, so fill it with
4505 for (; i
< elem_bitsize
; i
+= value_bit
)
4510 long tmp
[max_bitsize
/ 32];
4511 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
4513 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
4514 gcc_assert (bitsize
<= elem_bitsize
);
4515 gcc_assert (bitsize
% value_bit
== 0);
4517 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
4520 /* real_to_target produces its result in words affected by
4521 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4522 and use WORDS_BIG_ENDIAN instead; see the documentation
4523 of SUBREG in rtl.texi. */
4524 for (i
= 0; i
< bitsize
; i
+= value_bit
)
4527 if (WORDS_BIG_ENDIAN
)
4528 ibase
= bitsize
- 1 - i
;
4531 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
4534 /* It shouldn't matter what's done here, so fill it with
4536 for (; i
< elem_bitsize
; i
+= value_bit
)
4546 /* Now, pick the right byte to start with. */
4547 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4548 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4549 will already have offset 0. */
4550 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
4552 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
4554 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4555 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4556 byte
= (subword_byte
% UNITS_PER_WORD
4557 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4560 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4561 so if it's become negative it will instead be very large.) */
4562 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4564 /* Convert from bytes to chunks of size value_bit. */
4565 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
4567 /* Re-pack the value. */
4569 if (VECTOR_MODE_P (outermode
))
4571 num_elem
= GET_MODE_NUNITS (outermode
);
4572 result_v
= rtvec_alloc (num_elem
);
4573 elems
= &RTVEC_ELT (result_v
, 0);
4574 outer_submode
= GET_MODE_INNER (outermode
);
4580 outer_submode
= outermode
;
4583 outer_class
= GET_MODE_CLASS (outer_submode
);
4584 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
4586 gcc_assert (elem_bitsize
% value_bit
== 0);
4587 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
4589 for (elem
= 0; elem
< num_elem
; elem
++)
4593 /* Vectors are stored in target memory order. (This is probably
4596 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4597 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4599 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4600 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4601 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4602 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4603 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4606 switch (outer_class
)
4609 case MODE_PARTIAL_INT
:
4611 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
4614 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4616 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
4617 for (; i
< elem_bitsize
; i
+= value_bit
)
4618 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
4619 << (i
- HOST_BITS_PER_WIDE_INT
));
4621 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4623 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4624 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
4625 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
4626 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
4633 case MODE_DECIMAL_FLOAT
:
4636 long tmp
[max_bitsize
/ 32];
4638 /* real_from_target wants its input in words affected by
4639 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4640 and use WORDS_BIG_ENDIAN instead; see the documentation
4641 of SUBREG in rtl.texi. */
4642 for (i
= 0; i
< max_bitsize
/ 32; i
++)
4644 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4647 if (WORDS_BIG_ENDIAN
)
4648 ibase
= elem_bitsize
- 1 - i
;
4651 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
4654 real_from_target (&r
, tmp
, outer_submode
);
4655 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
4663 if (VECTOR_MODE_P (outermode
))
4664 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
4669 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4670 Return 0 if no simplifications are possible. */
4672 simplify_subreg (enum machine_mode outermode
, rtx op
,
4673 enum machine_mode innermode
, unsigned int byte
)
4675 /* Little bit of sanity checking. */
4676 gcc_assert (innermode
!= VOIDmode
);
4677 gcc_assert (outermode
!= VOIDmode
);
4678 gcc_assert (innermode
!= BLKmode
);
4679 gcc_assert (outermode
!= BLKmode
);
4681 gcc_assert (GET_MODE (op
) == innermode
4682 || GET_MODE (op
) == VOIDmode
);
4684 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
4685 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4687 if (outermode
== innermode
&& !byte
)
4690 if (GET_CODE (op
) == CONST_INT
4691 || GET_CODE (op
) == CONST_DOUBLE
4692 || GET_CODE (op
) == CONST_VECTOR
)
4693 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
4695 /* Changing mode twice with SUBREG => just change it once,
4696 or not at all if changing back op starting mode. */
4697 if (GET_CODE (op
) == SUBREG
)
4699 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
4700 int final_offset
= byte
+ SUBREG_BYTE (op
);
4703 if (outermode
== innermostmode
4704 && byte
== 0 && SUBREG_BYTE (op
) == 0)
4705 return SUBREG_REG (op
);
4707 /* The SUBREG_BYTE represents offset, as if the value were stored
4708 in memory. Irritating exception is paradoxical subreg, where
4709 we define SUBREG_BYTE to be 0. On big endian machines, this
4710 value should be negative. For a moment, undo this exception. */
4711 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
4713 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
4714 if (WORDS_BIG_ENDIAN
)
4715 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4716 if (BYTES_BIG_ENDIAN
)
4717 final_offset
+= difference
% UNITS_PER_WORD
;
4719 if (SUBREG_BYTE (op
) == 0
4720 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
4722 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
4723 if (WORDS_BIG_ENDIAN
)
4724 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4725 if (BYTES_BIG_ENDIAN
)
4726 final_offset
+= difference
% UNITS_PER_WORD
;
4729 /* See whether resulting subreg will be paradoxical. */
4730 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
4732 /* In nonparadoxical subregs we can't handle negative offsets. */
4733 if (final_offset
< 0)
4735 /* Bail out in case resulting subreg would be incorrect. */
4736 if (final_offset
% GET_MODE_SIZE (outermode
)
4737 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
4743 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
4745 /* In paradoxical subreg, see if we are still looking on lower part.
4746 If so, our SUBREG_BYTE will be 0. */
4747 if (WORDS_BIG_ENDIAN
)
4748 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4749 if (BYTES_BIG_ENDIAN
)
4750 offset
+= difference
% UNITS_PER_WORD
;
4751 if (offset
== final_offset
)
4757 /* Recurse for further possible simplifications. */
4758 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
4762 if (validate_subreg (outermode
, innermostmode
,
4763 SUBREG_REG (op
), final_offset
))
4764 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
4768 /* Merge implicit and explicit truncations. */
4770 if (GET_CODE (op
) == TRUNCATE
4771 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
4772 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
4773 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
4774 GET_MODE (XEXP (op
, 0)));
4776 /* SUBREG of a hard register => just change the register number
4777 and/or mode. If the hard register is not valid in that mode,
4778 suppress this simplification. If the hard register is the stack,
4779 frame, or argument pointer, leave this as a SUBREG. */
4782 && REGNO (op
) < FIRST_PSEUDO_REGISTER
4783 #ifdef CANNOT_CHANGE_MODE_CLASS
4784 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
4785 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
4786 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
4788 && ((reload_completed
&& !frame_pointer_needed
)
4789 || (REGNO (op
) != FRAME_POINTER_REGNUM
4790 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4791 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
4794 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4795 && REGNO (op
) != ARG_POINTER_REGNUM
4797 && REGNO (op
) != STACK_POINTER_REGNUM
4798 && subreg_offset_representable_p (REGNO (op
), innermode
,
4801 unsigned int regno
= REGNO (op
);
4802 unsigned int final_regno
4803 = regno
+ subreg_regno_offset (regno
, innermode
, byte
, outermode
);
4805 /* ??? We do allow it if the current REG is not valid for
4806 its mode. This is a kludge to work around how float/complex
4807 arguments are passed on 32-bit SPARC and should be fixed. */
4808 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
4809 || ! HARD_REGNO_MODE_OK (regno
, innermode
))
4812 int final_offset
= byte
;
4814 /* Adjust offset for paradoxical subregs. */
4816 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
4818 int difference
= (GET_MODE_SIZE (innermode
)
4819 - GET_MODE_SIZE (outermode
));
4820 if (WORDS_BIG_ENDIAN
)
4821 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4822 if (BYTES_BIG_ENDIAN
)
4823 final_offset
+= difference
% UNITS_PER_WORD
;
4826 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
4828 /* Propagate original regno. We don't have any way to specify
4829 the offset inside original regno, so do so only for lowpart.
4830 The information is used only by alias analysis that can not
4831 grog partial register anyway. */
4833 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
4834 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
4839 /* If we have a SUBREG of a register that we are replacing and we are
4840 replacing it with a MEM, make a new MEM and try replacing the
4841 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4842 or if we would be widening it. */
4845 && ! mode_dependent_address_p (XEXP (op
, 0))
4846 /* Allow splitting of volatile memory references in case we don't
4847 have instruction to move the whole thing. */
4848 && (! MEM_VOLATILE_P (op
)
4849 || ! have_insn_for (SET
, innermode
))
4850 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
4851 return adjust_address_nv (op
, outermode
, byte
);
4853 /* Handle complex values represented as CONCAT
4854 of real and imaginary part. */
4855 if (GET_CODE (op
) == CONCAT
)
4857 unsigned int part_size
, final_offset
;
4860 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
4861 if (byte
< part_size
)
4863 part
= XEXP (op
, 0);
4864 final_offset
= byte
;
4868 part
= XEXP (op
, 1);
4869 final_offset
= byte
- part_size
;
4872 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
4875 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
4878 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
4879 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
4883 /* Optimize SUBREG truncations of zero and sign extended values. */
4884 if ((GET_CODE (op
) == ZERO_EXTEND
4885 || GET_CODE (op
) == SIGN_EXTEND
)
4886 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
4888 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
4890 /* If we're requesting the lowpart of a zero or sign extension,
4891 there are three possibilities. If the outermode is the same
4892 as the origmode, we can omit both the extension and the subreg.
4893 If the outermode is not larger than the origmode, we can apply
4894 the truncation without the extension. Finally, if the outermode
4895 is larger than the origmode, but both are integer modes, we
4896 can just extend to the appropriate mode. */
4899 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
4900 if (outermode
== origmode
)
4901 return XEXP (op
, 0);
4902 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
4903 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
4904 subreg_lowpart_offset (outermode
,
4906 if (SCALAR_INT_MODE_P (outermode
))
4907 return simplify_gen_unary (GET_CODE (op
), outermode
,
4908 XEXP (op
, 0), origmode
);
4911 /* A SUBREG resulting from a zero extension may fold to zero if
4912 it extracts higher bits that the ZERO_EXTEND's source bits. */
4913 if (GET_CODE (op
) == ZERO_EXTEND
4914 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
4915 return CONST0_RTX (outermode
);
4918 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4919 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4920 the outer subreg is effectively a truncation to the original mode. */
4921 if ((GET_CODE (op
) == LSHIFTRT
4922 || GET_CODE (op
) == ASHIFTRT
)
4923 && SCALAR_INT_MODE_P (outermode
)
4924 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4925 to avoid the possibility that an outer LSHIFTRT shifts by more
4926 than the sign extension's sign_bit_copies and introduces zeros
4927 into the high bits of the result. */
4928 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
4929 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4930 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
4931 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4932 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4933 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4934 return simplify_gen_binary (ASHIFTRT
, outermode
,
4935 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4937 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4938 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4939 the outer subreg is effectively a truncation to the original mode. */
4940 if ((GET_CODE (op
) == LSHIFTRT
4941 || GET_CODE (op
) == ASHIFTRT
)
4942 && SCALAR_INT_MODE_P (outermode
)
4943 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4944 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4945 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4946 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4947 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4948 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4949 return simplify_gen_binary (LSHIFTRT
, outermode
,
4950 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4952 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4953 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4954 the outer subreg is effectively a truncation to the original mode. */
4955 if (GET_CODE (op
) == ASHIFT
4956 && SCALAR_INT_MODE_P (outermode
)
4957 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4958 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4959 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4960 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
4961 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4962 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4963 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4964 return simplify_gen_binary (ASHIFT
, outermode
,
4965 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4970 /* Make a SUBREG operation or equivalent if it folds. */
4973 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
4974 enum machine_mode innermode
, unsigned int byte
)
4978 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
4982 if (GET_CODE (op
) == SUBREG
4983 || GET_CODE (op
) == CONCAT
4984 || GET_MODE (op
) == VOIDmode
)
4987 if (validate_subreg (outermode
, innermode
, op
, byte
))
4988 return gen_rtx_SUBREG (outermode
, op
, byte
);
4993 /* Simplify X, an rtx expression.
4995 Return the simplified expression or NULL if no simplifications
4998 This is the preferred entry point into the simplification routines;
4999 however, we still allow passes to call the more specific routines.
5001 Right now GCC has three (yes, three) major bodies of RTL simplification
5002 code that need to be unified.
5004 1. fold_rtx in cse.c. This code uses various CSE specific
5005 information to aid in RTL simplification.
5007 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5008 it uses combine specific information to aid in RTL
5011 3. The routines in this file.
5014 Long term we want to only have one body of simplification code; to
5015 get to that state I recommend the following steps:
5017 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5018 which are not pass dependent state into these routines.
5020 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5021 use this routine whenever possible.
5023 3. Allow for pass dependent state to be provided to these
5024 routines and add simplifications based on the pass dependent
5025 state. Remove code from cse.c & combine.c that becomes
5028 It will take time, but ultimately the compiler will be easier to
5029 maintain and improve. It's totally silly that when we add a
5030 simplification that it needs to be added to 4 places (3 for RTL
5031 simplification and 1 for tree simplification. */
5034 simplify_rtx (rtx x
)
5036 enum rtx_code code
= GET_CODE (x
);
5037 enum machine_mode mode
= GET_MODE (x
);
5039 switch (GET_RTX_CLASS (code
))
5042 return simplify_unary_operation (code
, mode
,
5043 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5044 case RTX_COMM_ARITH
:
5045 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5046 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5048 /* Fall through.... */
5051 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5054 case RTX_BITFIELD_OPS
:
5055 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5056 XEXP (x
, 0), XEXP (x
, 1),
5060 case RTX_COMM_COMPARE
:
5061 return simplify_relational_operation (code
, mode
,
5062 ((GET_MODE (XEXP (x
, 0))
5064 ? GET_MODE (XEXP (x
, 0))
5065 : GET_MODE (XEXP (x
, 1))),
5071 return simplify_subreg (mode
, SUBREG_REG (x
),
5072 GET_MODE (SUBREG_REG (x
)),
5079 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5080 if (GET_CODE (XEXP (x
, 0)) == HIGH
5081 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))