1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "coretypes.h"
32 #include "hard-reg-set.h"
35 #include "insn-config.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Much code operates on (low, high) pairs; the low value is an
47 unsigned wide int, the high value a signed wide int. We
48 occasionally need to sign extend from low to high as if low were a
50 #define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
53 static rtx
neg_const_int (enum machine_mode
, rtx
);
54 static bool plus_minus_operand_p (rtx
);
55 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
57 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
59 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
61 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
62 enum machine_mode
, rtx
, rtx
);
63 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
64 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
70 neg_const_int (enum machine_mode mode
, rtx i
)
72 return gen_int_mode (- INTVAL (i
), mode
);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
79 mode_signbit_p (enum machine_mode mode
, rtx x
)
81 unsigned HOST_WIDE_INT val
;
84 if (GET_MODE_CLASS (mode
) != MODE_INT
)
87 width
= GET_MODE_BITSIZE (mode
);
91 if (width
<= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x
) == CONST_INT
)
94 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x
) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x
) == 0)
98 val
= CONST_DOUBLE_HIGH (x
);
99 width
-= HOST_BITS_PER_WIDE_INT
;
104 if (width
< HOST_BITS_PER_WIDE_INT
)
105 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
106 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
118 /* If this simplifies, do it. */
119 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
123 /* Put complex operands first and constants second if commutative. */
124 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
125 && swap_commutative_operands_p (op0
, op1
))
126 tem
= op0
, op0
= op1
, op1
= tem
;
128 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
131 /* If X is a MEM referencing the constant pool, return the real value.
132 Otherwise return X. */
134 avoid_constant_pool_reference (rtx x
)
137 enum machine_mode cmode
;
138 HOST_WIDE_INT offset
= 0;
140 switch (GET_CODE (x
))
146 /* Handle float extensions of constant pool references. */
148 c
= avoid_constant_pool_reference (tmp
);
149 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
153 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
154 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
164 /* Call target hook to avoid the effects of -fpic etc.... */
165 addr
= targetm
.delegitimize_address (addr
);
167 /* Split the address into a base and integer offset. */
168 if (GET_CODE (addr
) == CONST
169 && GET_CODE (XEXP (addr
, 0)) == PLUS
170 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST_INT
)
172 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
173 addr
= XEXP (XEXP (addr
, 0), 0);
176 if (GET_CODE (addr
) == LO_SUM
)
177 addr
= XEXP (addr
, 1);
179 /* If this is a constant pool reference, we can turn it into its
180 constant and hope that simplifications happen. */
181 if (GET_CODE (addr
) == SYMBOL_REF
182 && CONSTANT_POOL_ADDRESS_P (addr
))
184 c
= get_pool_constant (addr
);
185 cmode
= get_pool_mode (addr
);
187 /* If we're accessing the constant in a different mode than it was
188 originally stored, attempt to fix that up via subreg simplifications.
189 If that fails we have no choice but to return the original memory. */
190 if (offset
!= 0 || cmode
!= GET_MODE (x
))
192 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
193 if (tem
&& CONSTANT_P (tem
))
203 /* Return true if X is a MEM referencing the constant pool. */
206 constant_pool_reference_p (rtx x
)
208 return avoid_constant_pool_reference (x
) != x
;
211 /* Make a unary operation by first seeing if it folds and otherwise making
212 the specified operation. */
215 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
216 enum machine_mode op_mode
)
220 /* If this simplifies, use it. */
221 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
224 return gen_rtx_fmt_e (code
, mode
, op
);
227 /* Likewise for ternary operations. */
230 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
231 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
235 /* If this simplifies, use it. */
236 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
240 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
243 /* Likewise, for relational operations.
244 CMP_MODE specifies mode comparison is done in. */
247 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
248 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
252 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
256 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
259 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
260 resulting RTX. Return a new RTX which is as simplified as possible. */
263 simplify_replace_rtx (rtx x
, rtx old_rtx
, rtx new_rtx
)
265 enum rtx_code code
= GET_CODE (x
);
266 enum machine_mode mode
= GET_MODE (x
);
267 enum machine_mode op_mode
;
270 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
271 to build a new expression substituting recursively. If we can't do
272 anything, return our input. */
277 switch (GET_RTX_CLASS (code
))
281 op_mode
= GET_MODE (op0
);
282 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
283 if (op0
== XEXP (x
, 0))
285 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
289 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
290 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
291 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
293 return simplify_gen_binary (code
, mode
, op0
, op1
);
296 case RTX_COMM_COMPARE
:
299 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
300 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
301 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
302 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
304 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
307 case RTX_BITFIELD_OPS
:
309 op_mode
= GET_MODE (op0
);
310 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
311 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
312 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
313 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
315 if (op_mode
== VOIDmode
)
316 op_mode
= GET_MODE (op0
);
317 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
320 /* The only case we try to handle is a SUBREG. */
323 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
324 if (op0
== SUBREG_REG (x
))
326 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
327 GET_MODE (SUBREG_REG (x
)),
329 return op0
? op0
: x
;
336 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
337 if (op0
== XEXP (x
, 0))
339 return replace_equiv_address_nv (x
, op0
);
341 else if (code
== LO_SUM
)
343 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
344 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
346 /* (lo_sum (high x) x) -> x */
347 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
350 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
352 return gen_rtx_LO_SUM (mode
, op0
, op1
);
354 else if (code
== REG
)
356 if (rtx_equal_p (x
, old_rtx
))
367 /* Try to simplify a unary operation CODE whose output mode is to be
368 MODE with input operand OP whose mode was originally OP_MODE.
369 Return zero if no simplification can be made. */
371 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
372 rtx op
, enum machine_mode op_mode
)
376 if (GET_CODE (op
) == CONST
)
379 trueop
= avoid_constant_pool_reference (op
);
381 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
385 return simplify_unary_operation_1 (code
, mode
, op
);
388 /* Perform some simplifications we can do even if the operands
391 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
393 enum rtx_code reversed
;
399 /* (not (not X)) == X. */
400 if (GET_CODE (op
) == NOT
)
403 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
404 comparison is all ones. */
405 if (COMPARISON_P (op
)
406 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
407 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
408 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
409 XEXP (op
, 0), XEXP (op
, 1));
411 /* (not (plus X -1)) can become (neg X). */
412 if (GET_CODE (op
) == PLUS
413 && XEXP (op
, 1) == constm1_rtx
)
414 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
416 /* Similarly, (not (neg X)) is (plus X -1). */
417 if (GET_CODE (op
) == NEG
)
418 return plus_constant (XEXP (op
, 0), -1);
420 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
421 if (GET_CODE (op
) == XOR
422 && GET_CODE (XEXP (op
, 1)) == CONST_INT
423 && (temp
= simplify_unary_operation (NOT
, mode
,
424 XEXP (op
, 1), mode
)) != 0)
425 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
427 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
428 if (GET_CODE (op
) == PLUS
429 && GET_CODE (XEXP (op
, 1)) == CONST_INT
430 && mode_signbit_p (mode
, XEXP (op
, 1))
431 && (temp
= simplify_unary_operation (NOT
, mode
,
432 XEXP (op
, 1), mode
)) != 0)
433 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
436 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
437 operands other than 1, but that is not valid. We could do a
438 similar simplification for (not (lshiftrt C X)) where C is
439 just the sign bit, but this doesn't seem common enough to
441 if (GET_CODE (op
) == ASHIFT
442 && XEXP (op
, 0) == const1_rtx
)
444 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
445 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
448 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
449 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
450 so we can perform the above simplification. */
452 if (STORE_FLAG_VALUE
== -1
453 && GET_CODE (op
) == ASHIFTRT
454 && GET_CODE (XEXP (op
, 1)) == CONST_INT
455 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
456 return simplify_gen_relational (GE
, mode
, VOIDmode
,
457 XEXP (op
, 0), const0_rtx
);
460 if (GET_CODE (op
) == SUBREG
461 && subreg_lowpart_p (op
)
462 && (GET_MODE_SIZE (GET_MODE (op
))
463 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
464 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
465 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
467 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
470 x
= gen_rtx_ROTATE (inner_mode
,
471 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
473 XEXP (SUBREG_REG (op
), 1));
474 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
477 /* Apply De Morgan's laws to reduce number of patterns for machines
478 with negating logical insns (and-not, nand, etc.). If result has
479 only one NOT, put it first, since that is how the patterns are
482 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
484 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
485 enum machine_mode op_mode
;
487 op_mode
= GET_MODE (in1
);
488 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
490 op_mode
= GET_MODE (in2
);
491 if (op_mode
== VOIDmode
)
493 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
495 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
498 in2
= in1
; in1
= tem
;
501 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
507 /* (neg (neg X)) == X. */
508 if (GET_CODE (op
) == NEG
)
511 /* (neg (plus X 1)) can become (not X). */
512 if (GET_CODE (op
) == PLUS
513 && XEXP (op
, 1) == const1_rtx
)
514 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
516 /* Similarly, (neg (not X)) is (plus X 1). */
517 if (GET_CODE (op
) == NOT
)
518 return plus_constant (XEXP (op
, 0), 1);
520 /* (neg (minus X Y)) can become (minus Y X). This transformation
521 isn't safe for modes with signed zeros, since if X and Y are
522 both +0, (minus Y X) is the same as (minus X Y). If the
523 rounding mode is towards +infinity (or -infinity) then the two
524 expressions will be rounded differently. */
525 if (GET_CODE (op
) == MINUS
526 && !HONOR_SIGNED_ZEROS (mode
)
527 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
528 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
530 if (GET_CODE (op
) == PLUS
531 && !HONOR_SIGNED_ZEROS (mode
)
532 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
534 /* (neg (plus A C)) is simplified to (minus -C A). */
535 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
536 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
538 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
540 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
543 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
544 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
545 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
548 /* (neg (mult A B)) becomes (mult (neg A) B).
549 This works even for floating-point values. */
550 if (GET_CODE (op
) == MULT
551 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
553 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
554 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
557 /* NEG commutes with ASHIFT since it is multiplication. Only do
558 this if we can then eliminate the NEG (e.g., if the operand
560 if (GET_CODE (op
) == ASHIFT
)
562 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
564 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
567 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
568 C is equal to the width of MODE minus 1. */
569 if (GET_CODE (op
) == ASHIFTRT
570 && GET_CODE (XEXP (op
, 1)) == CONST_INT
571 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
572 return simplify_gen_binary (LSHIFTRT
, mode
,
573 XEXP (op
, 0), XEXP (op
, 1));
575 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
576 C is equal to the width of MODE minus 1. */
577 if (GET_CODE (op
) == LSHIFTRT
578 && GET_CODE (XEXP (op
, 1)) == CONST_INT
579 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
580 return simplify_gen_binary (ASHIFTRT
, mode
,
581 XEXP (op
, 0), XEXP (op
, 1));
583 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
584 if (GET_CODE (op
) == XOR
585 && XEXP (op
, 1) == const1_rtx
586 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
587 return plus_constant (XEXP (op
, 0), -1);
589 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
590 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
591 if (GET_CODE (op
) == LT
592 && XEXP (op
, 1) == const0_rtx
)
594 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
595 int isize
= GET_MODE_BITSIZE (inner
);
596 if (STORE_FLAG_VALUE
== 1)
598 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
599 GEN_INT (isize
- 1));
602 if (GET_MODE_BITSIZE (mode
) > isize
)
603 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
604 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
606 else if (STORE_FLAG_VALUE
== -1)
608 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
609 GEN_INT (isize
- 1));
612 if (GET_MODE_BITSIZE (mode
) > isize
)
613 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
614 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
620 /* We can't handle truncation to a partial integer mode here
621 because we don't know the real bitsize of the partial
623 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
626 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
627 if ((GET_CODE (op
) == SIGN_EXTEND
628 || GET_CODE (op
) == ZERO_EXTEND
)
629 && GET_MODE (XEXP (op
, 0)) == mode
)
632 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
633 (OP:SI foo:SI) if OP is NEG or ABS. */
634 if ((GET_CODE (op
) == ABS
635 || GET_CODE (op
) == NEG
)
636 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
637 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
638 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
639 return simplify_gen_unary (GET_CODE (op
), mode
,
640 XEXP (XEXP (op
, 0), 0), mode
);
642 /* (truncate:A (subreg:B (truncate:C X) 0)) is
644 if (GET_CODE (op
) == SUBREG
645 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
646 && subreg_lowpart_p (op
))
647 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
648 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
650 /* If we know that the value is already truncated, we can
651 replace the TRUNCATE with a SUBREG. Note that this is also
652 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
653 modes we just have to apply a different definition for
654 truncation. But don't do this for an (LSHIFTRT (MULT ...))
655 since this will cause problems with the umulXi3_highpart
657 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
658 GET_MODE_BITSIZE (GET_MODE (op
)))
659 ? (num_sign_bit_copies (op
, GET_MODE (op
))
660 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op
))
661 - GET_MODE_BITSIZE (mode
)))
662 : truncated_to_mode (mode
, op
))
663 && ! (GET_CODE (op
) == LSHIFTRT
664 && GET_CODE (XEXP (op
, 0)) == MULT
))
665 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
667 /* A truncate of a comparison can be replaced with a subreg if
668 STORE_FLAG_VALUE permits. This is like the previous test,
669 but it works even if the comparison is done in a mode larger
670 than HOST_BITS_PER_WIDE_INT. */
671 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
673 && ((HOST_WIDE_INT
) STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
674 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
678 if (DECIMAL_FLOAT_MODE_P (mode
))
681 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
682 if (GET_CODE (op
) == FLOAT_EXTEND
683 && GET_MODE (XEXP (op
, 0)) == mode
)
686 /* (float_truncate:SF (float_truncate:DF foo:XF))
687 = (float_truncate:SF foo:XF).
688 This may eliminate double rounding, so it is unsafe.
690 (float_truncate:SF (float_extend:XF foo:DF))
691 = (float_truncate:SF foo:DF).
693 (float_truncate:DF (float_extend:XF foo:SF))
694 = (float_extend:SF foo:DF). */
695 if ((GET_CODE (op
) == FLOAT_TRUNCATE
696 && flag_unsafe_math_optimizations
)
697 || GET_CODE (op
) == FLOAT_EXTEND
)
698 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
700 > GET_MODE_SIZE (mode
)
701 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
705 /* (float_truncate (float x)) is (float x) */
706 if (GET_CODE (op
) == FLOAT
707 && (flag_unsafe_math_optimizations
708 || ((unsigned)significand_size (GET_MODE (op
))
709 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
710 - num_sign_bit_copies (XEXP (op
, 0),
711 GET_MODE (XEXP (op
, 0)))))))
712 return simplify_gen_unary (FLOAT
, mode
,
714 GET_MODE (XEXP (op
, 0)));
716 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
717 (OP:SF foo:SF) if OP is NEG or ABS. */
718 if ((GET_CODE (op
) == ABS
719 || GET_CODE (op
) == NEG
)
720 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
721 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
722 return simplify_gen_unary (GET_CODE (op
), mode
,
723 XEXP (XEXP (op
, 0), 0), mode
);
725 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
726 is (float_truncate:SF x). */
727 if (GET_CODE (op
) == SUBREG
728 && subreg_lowpart_p (op
)
729 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
730 return SUBREG_REG (op
);
734 if (DECIMAL_FLOAT_MODE_P (mode
))
737 /* (float_extend (float_extend x)) is (float_extend x)
739 (float_extend (float x)) is (float x) assuming that double
740 rounding can't happen.
742 if (GET_CODE (op
) == FLOAT_EXTEND
743 || (GET_CODE (op
) == FLOAT
744 && ((unsigned)significand_size (GET_MODE (op
))
745 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
746 - num_sign_bit_copies (XEXP (op
, 0),
747 GET_MODE (XEXP (op
, 0)))))))
748 return simplify_gen_unary (GET_CODE (op
), mode
,
750 GET_MODE (XEXP (op
, 0)));
755 /* (abs (neg <foo>)) -> (abs <foo>) */
756 if (GET_CODE (op
) == NEG
)
757 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
758 GET_MODE (XEXP (op
, 0)));
760 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
762 if (GET_MODE (op
) == VOIDmode
)
765 /* If operand is something known to be positive, ignore the ABS. */
766 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
767 || ((GET_MODE_BITSIZE (GET_MODE (op
))
768 <= HOST_BITS_PER_WIDE_INT
)
769 && ((nonzero_bits (op
, GET_MODE (op
))
771 << (GET_MODE_BITSIZE (GET_MODE (op
)) - 1)))
775 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
776 if (num_sign_bit_copies (op
, mode
) == GET_MODE_BITSIZE (mode
))
777 return gen_rtx_NEG (mode
, op
);
782 /* (ffs (*_extend <X>)) = (ffs <X>) */
783 if (GET_CODE (op
) == SIGN_EXTEND
784 || GET_CODE (op
) == ZERO_EXTEND
)
785 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
786 GET_MODE (XEXP (op
, 0)));
791 /* (pop* (zero_extend <X>)) = (pop* <X>) */
792 if (GET_CODE (op
) == ZERO_EXTEND
)
793 return simplify_gen_unary (code
, mode
, XEXP (op
, 0),
794 GET_MODE (XEXP (op
, 0)));
798 /* (float (sign_extend <X>)) = (float <X>). */
799 if (GET_CODE (op
) == SIGN_EXTEND
)
800 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
801 GET_MODE (XEXP (op
, 0)));
805 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
806 becomes just the MINUS if its mode is MODE. This allows
807 folding switch statements on machines using casesi (such as
809 if (GET_CODE (op
) == TRUNCATE
810 && GET_MODE (XEXP (op
, 0)) == mode
811 && GET_CODE (XEXP (op
, 0)) == MINUS
812 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
813 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
816 /* Check for a sign extension of a subreg of a promoted
817 variable, where the promotion is sign-extended, and the
818 target mode is the same as the variable's promotion. */
819 if (GET_CODE (op
) == SUBREG
820 && SUBREG_PROMOTED_VAR_P (op
)
821 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
822 && GET_MODE (XEXP (op
, 0)) == mode
)
825 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
826 if (! POINTERS_EXTEND_UNSIGNED
827 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
829 || (GET_CODE (op
) == SUBREG
830 && REG_P (SUBREG_REG (op
))
831 && REG_POINTER (SUBREG_REG (op
))
832 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
833 return convert_memory_address (Pmode
, op
);
838 /* Check for a zero extension of a subreg of a promoted
839 variable, where the promotion is zero-extended, and the
840 target mode is the same as the variable's promotion. */
841 if (GET_CODE (op
) == SUBREG
842 && SUBREG_PROMOTED_VAR_P (op
)
843 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
844 && GET_MODE (XEXP (op
, 0)) == mode
)
847 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
848 if (POINTERS_EXTEND_UNSIGNED
> 0
849 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
851 || (GET_CODE (op
) == SUBREG
852 && REG_P (SUBREG_REG (op
))
853 && REG_POINTER (SUBREG_REG (op
))
854 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
855 return convert_memory_address (Pmode
, op
);
866 /* Try to compute the value of a unary operation CODE whose output mode is to
867 be MODE with input operand OP whose mode was originally OP_MODE.
868 Return zero if the value cannot be computed. */
870 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
871 rtx op
, enum machine_mode op_mode
)
873 unsigned int width
= GET_MODE_BITSIZE (mode
);
875 if (code
== VEC_DUPLICATE
)
877 gcc_assert (VECTOR_MODE_P (mode
));
878 if (GET_MODE (op
) != VOIDmode
)
880 if (!VECTOR_MODE_P (GET_MODE (op
)))
881 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
883 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
886 if (GET_CODE (op
) == CONST_INT
|| GET_CODE (op
) == CONST_DOUBLE
887 || GET_CODE (op
) == CONST_VECTOR
)
889 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
890 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
891 rtvec v
= rtvec_alloc (n_elts
);
894 if (GET_CODE (op
) != CONST_VECTOR
)
895 for (i
= 0; i
< n_elts
; i
++)
896 RTVEC_ELT (v
, i
) = op
;
899 enum machine_mode inmode
= GET_MODE (op
);
900 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
901 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
903 gcc_assert (in_n_elts
< n_elts
);
904 gcc_assert ((n_elts
% in_n_elts
) == 0);
905 for (i
= 0; i
< n_elts
; i
++)
906 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
908 return gen_rtx_CONST_VECTOR (mode
, v
);
912 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
914 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
915 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
916 enum machine_mode opmode
= GET_MODE (op
);
917 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
918 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
919 rtvec v
= rtvec_alloc (n_elts
);
922 gcc_assert (op_n_elts
== n_elts
);
923 for (i
= 0; i
< n_elts
; i
++)
925 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
926 CONST_VECTOR_ELT (op
, i
),
927 GET_MODE_INNER (opmode
));
930 RTVEC_ELT (v
, i
) = x
;
932 return gen_rtx_CONST_VECTOR (mode
, v
);
935 /* The order of these tests is critical so that, for example, we don't
936 check the wrong mode (input vs. output) for a conversion operation,
937 such as FIX. At some point, this should be simplified. */
939 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
940 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
942 HOST_WIDE_INT hv
, lv
;
945 if (GET_CODE (op
) == CONST_INT
)
946 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
948 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
950 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
951 d
= real_value_truncate (mode
, d
);
952 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
954 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
955 && (GET_CODE (op
) == CONST_DOUBLE
956 || GET_CODE (op
) == CONST_INT
))
958 HOST_WIDE_INT hv
, lv
;
961 if (GET_CODE (op
) == CONST_INT
)
962 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
964 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
966 if (op_mode
== VOIDmode
)
968 /* We don't know how to interpret negative-looking numbers in
969 this case, so don't try to fold those. */
973 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
976 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
978 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
979 d
= real_value_truncate (mode
, d
);
980 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
983 if (GET_CODE (op
) == CONST_INT
984 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
986 HOST_WIDE_INT arg0
= INTVAL (op
);
1000 val
= (arg0
>= 0 ? arg0
: - arg0
);
1004 /* Don't use ffs here. Instead, get low order bit and then its
1005 number. If arg0 is zero, this will return 0, as desired. */
1006 arg0
&= GET_MODE_MASK (mode
);
1007 val
= exact_log2 (arg0
& (- arg0
)) + 1;
1011 arg0
&= GET_MODE_MASK (mode
);
1012 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1015 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
1019 arg0
&= GET_MODE_MASK (mode
);
1022 /* Even if the value at zero is undefined, we have to come
1023 up with some replacement. Seems good enough. */
1024 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1025 val
= GET_MODE_BITSIZE (mode
);
1028 val
= exact_log2 (arg0
& -arg0
);
1032 arg0
&= GET_MODE_MASK (mode
);
1035 val
++, arg0
&= arg0
- 1;
1039 arg0
&= GET_MODE_MASK (mode
);
1042 val
++, arg0
&= arg0
- 1;
1051 /* When zero-extending a CONST_INT, we need to know its
1053 gcc_assert (op_mode
!= VOIDmode
);
1054 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1056 /* If we were really extending the mode,
1057 we would have to distinguish between zero-extension
1058 and sign-extension. */
1059 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1062 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1063 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1069 if (op_mode
== VOIDmode
)
1071 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1073 /* If we were really extending the mode,
1074 we would have to distinguish between zero-extension
1075 and sign-extension. */
1076 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1079 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1082 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1084 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
1085 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1093 case FLOAT_TRUNCATE
:
1103 return gen_int_mode (val
, mode
);
1106 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1107 for a DImode operation on a CONST_INT. */
1108 else if (GET_MODE (op
) == VOIDmode
1109 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1110 && (GET_CODE (op
) == CONST_DOUBLE
1111 || GET_CODE (op
) == CONST_INT
))
1113 unsigned HOST_WIDE_INT l1
, lv
;
1114 HOST_WIDE_INT h1
, hv
;
1116 if (GET_CODE (op
) == CONST_DOUBLE
)
1117 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1119 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1129 neg_double (l1
, h1
, &lv
, &hv
);
1134 neg_double (l1
, h1
, &lv
, &hv
);
1146 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
1149 lv
= exact_log2 (l1
& -l1
) + 1;
1155 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
1156 - HOST_BITS_PER_WIDE_INT
;
1158 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
1159 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1160 lv
= GET_MODE_BITSIZE (mode
);
1166 lv
= exact_log2 (l1
& -l1
);
1168 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
1169 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1170 lv
= GET_MODE_BITSIZE (mode
);
1193 /* This is just a change-of-mode, so do nothing. */
1198 gcc_assert (op_mode
!= VOIDmode
);
1200 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1204 lv
= l1
& GET_MODE_MASK (op_mode
);
1208 if (op_mode
== VOIDmode
1209 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1213 lv
= l1
& GET_MODE_MASK (op_mode
);
1214 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
1215 && (lv
& ((HOST_WIDE_INT
) 1
1216 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
1217 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1219 hv
= HWI_SIGN_EXTEND (lv
);
1230 return immed_double_const (lv
, hv
, mode
);
1233 else if (GET_CODE (op
) == CONST_DOUBLE
1234 && SCALAR_FLOAT_MODE_P (mode
))
1236 REAL_VALUE_TYPE d
, t
;
1237 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1242 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1244 real_sqrt (&t
, mode
, &d
);
1248 d
= REAL_VALUE_ABS (d
);
1251 d
= REAL_VALUE_NEGATE (d
);
1253 case FLOAT_TRUNCATE
:
1254 d
= real_value_truncate (mode
, d
);
1257 /* All this does is change the mode. */
1260 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1267 real_to_target (tmp
, &d
, GET_MODE (op
));
1268 for (i
= 0; i
< 4; i
++)
1270 real_from_target (&d
, tmp
, mode
);
1276 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1279 else if (GET_CODE (op
) == CONST_DOUBLE
1280 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1281 && GET_MODE_CLASS (mode
) == MODE_INT
1282 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1284 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1285 operators are intentionally left unspecified (to ease implementation
1286 by target backends), for consistency, this routine implements the
1287 same semantics for constant folding as used by the middle-end. */
1289 /* This was formerly used only for non-IEEE float.
1290 eggert@twinsun.com says it is safe for IEEE also. */
1291 HOST_WIDE_INT xh
, xl
, th
, tl
;
1292 REAL_VALUE_TYPE x
, t
;
1293 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1297 if (REAL_VALUE_ISNAN (x
))
1300 /* Test against the signed upper bound. */
1301 if (width
> HOST_BITS_PER_WIDE_INT
)
1303 th
= ((unsigned HOST_WIDE_INT
) 1
1304 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1310 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1312 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1313 if (REAL_VALUES_LESS (t
, x
))
1320 /* Test against the signed lower bound. */
1321 if (width
> HOST_BITS_PER_WIDE_INT
)
1323 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1329 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1331 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1332 if (REAL_VALUES_LESS (x
, t
))
1338 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1342 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1345 /* Test against the unsigned upper bound. */
1346 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1351 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1353 th
= ((unsigned HOST_WIDE_INT
) 1
1354 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1360 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1362 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1363 if (REAL_VALUES_LESS (t
, x
))
1370 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1376 return immed_double_const (xl
, xh
, mode
);
1382 /* Subroutine of simplify_binary_operation to simplify a commutative,
1383 associative binary operation CODE with result mode MODE, operating
1384 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1385 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1386 canonicalization is possible. */
1389 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1394 /* Linearize the operator to the left. */
1395 if (GET_CODE (op1
) == code
)
1397 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1398 if (GET_CODE (op0
) == code
)
1400 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1401 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1404 /* "a op (b op c)" becomes "(b op c) op a". */
1405 if (! swap_commutative_operands_p (op1
, op0
))
1406 return simplify_gen_binary (code
, mode
, op1
, op0
);
1413 if (GET_CODE (op0
) == code
)
1415 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1416 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1418 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1419 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1422 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1423 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1424 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1425 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1427 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1429 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1430 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1431 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1432 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1434 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1441 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1442 and OP1. Return 0 if no simplification is possible.
1444 Don't use this for relational operations such as EQ or LT.
1445 Use simplify_relational_operation instead. */
1447 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1450 rtx trueop0
, trueop1
;
1453 /* Relational operations don't work here. We must know the mode
1454 of the operands in order to do the comparison correctly.
1455 Assuming a full word can give incorrect results.
1456 Consider comparing 128 with -128 in QImode. */
1457 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1458 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1460 /* Make sure the constant is second. */
1461 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1462 && swap_commutative_operands_p (op0
, op1
))
1464 tem
= op0
, op0
= op1
, op1
= tem
;
1467 trueop0
= avoid_constant_pool_reference (op0
);
1468 trueop1
= avoid_constant_pool_reference (op1
);
1470 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1473 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1476 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1477 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1478 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1479 actual constants. */
1482 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1483 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1485 rtx tem
, reversed
, opleft
, opright
;
1487 unsigned int width
= GET_MODE_BITSIZE (mode
);
1489 /* Even if we can't compute a constant result,
1490 there are some cases worth simplifying. */
1495 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1496 when x is NaN, infinite, or finite and nonzero. They aren't
1497 when x is -0 and the rounding mode is not towards -infinity,
1498 since (-0) + 0 is then 0. */
1499 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1502 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1503 transformations are safe even for IEEE. */
1504 if (GET_CODE (op0
) == NEG
)
1505 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1506 else if (GET_CODE (op1
) == NEG
)
1507 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1509 /* (~a) + 1 -> -a */
1510 if (INTEGRAL_MODE_P (mode
)
1511 && GET_CODE (op0
) == NOT
1512 && trueop1
== const1_rtx
)
1513 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1515 /* Handle both-operands-constant cases. We can only add
1516 CONST_INTs to constants since the sum of relocatable symbols
1517 can't be handled by most assemblers. Don't add CONST_INT
1518 to CONST_INT since overflow won't be computed properly if wider
1519 than HOST_BITS_PER_WIDE_INT. */
1521 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1522 && GET_CODE (op1
) == CONST_INT
)
1523 return plus_constant (op0
, INTVAL (op1
));
1524 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1525 && GET_CODE (op0
) == CONST_INT
)
1526 return plus_constant (op1
, INTVAL (op0
));
1528 /* See if this is something like X * C - X or vice versa or
1529 if the multiplication is written as a shift. If so, we can
1530 distribute and make a new multiply, shift, or maybe just
1531 have X (if C is 2 in the example above). But don't make
1532 something more expensive than we had before. */
1534 if (SCALAR_INT_MODE_P (mode
))
1536 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1537 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1538 rtx lhs
= op0
, rhs
= op1
;
1540 if (GET_CODE (lhs
) == NEG
)
1544 lhs
= XEXP (lhs
, 0);
1546 else if (GET_CODE (lhs
) == MULT
1547 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1549 coeff0l
= INTVAL (XEXP (lhs
, 1));
1550 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1551 lhs
= XEXP (lhs
, 0);
1553 else if (GET_CODE (lhs
) == ASHIFT
1554 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1555 && INTVAL (XEXP (lhs
, 1)) >= 0
1556 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1558 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1560 lhs
= XEXP (lhs
, 0);
1563 if (GET_CODE (rhs
) == NEG
)
1567 rhs
= XEXP (rhs
, 0);
1569 else if (GET_CODE (rhs
) == MULT
1570 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1572 coeff1l
= INTVAL (XEXP (rhs
, 1));
1573 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1574 rhs
= XEXP (rhs
, 0);
1576 else if (GET_CODE (rhs
) == ASHIFT
1577 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1578 && INTVAL (XEXP (rhs
, 1)) >= 0
1579 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1581 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1583 rhs
= XEXP (rhs
, 0);
1586 if (rtx_equal_p (lhs
, rhs
))
1588 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1590 unsigned HOST_WIDE_INT l
;
1593 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1594 coeff
= immed_double_const (l
, h
, mode
);
1596 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1597 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1602 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1603 if ((GET_CODE (op1
) == CONST_INT
1604 || GET_CODE (op1
) == CONST_DOUBLE
)
1605 && GET_CODE (op0
) == XOR
1606 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1607 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1608 && mode_signbit_p (mode
, op1
))
1609 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1610 simplify_gen_binary (XOR
, mode
, op1
,
1613 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1614 if (GET_CODE (op0
) == MULT
1615 && GET_CODE (XEXP (op0
, 0)) == NEG
)
1619 in1
= XEXP (XEXP (op0
, 0), 0);
1620 in2
= XEXP (op0
, 1);
1621 return simplify_gen_binary (MINUS
, mode
, op1
,
1622 simplify_gen_binary (MULT
, mode
,
1626 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1627 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1629 if (COMPARISON_P (op0
)
1630 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
1631 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
1632 && (reversed
= reversed_comparison (op0
, mode
)))
1634 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
1636 /* If one of the operands is a PLUS or a MINUS, see if we can
1637 simplify this by the associative law.
1638 Don't use the associative law for floating point.
1639 The inaccuracy makes it nonassociative,
1640 and subtle programs can break if operations are associated. */
1642 if (INTEGRAL_MODE_P (mode
)
1643 && (plus_minus_operand_p (op0
)
1644 || plus_minus_operand_p (op1
))
1645 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1648 /* Reassociate floating point addition only when the user
1649 specifies unsafe math optimizations. */
1650 if (FLOAT_MODE_P (mode
)
1651 && flag_unsafe_math_optimizations
)
1653 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1661 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1662 using cc0, in which case we want to leave it as a COMPARE
1663 so we can distinguish it from a register-register-copy.
1665 In IEEE floating point, x-0 is not the same as x. */
1667 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1668 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1669 && trueop1
== CONST0_RTX (mode
))
1673 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1674 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1675 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1676 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1678 rtx xop00
= XEXP (op0
, 0);
1679 rtx xop10
= XEXP (op1
, 0);
1682 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1684 if (REG_P (xop00
) && REG_P (xop10
)
1685 && GET_MODE (xop00
) == GET_MODE (xop10
)
1686 && REGNO (xop00
) == REGNO (xop10
)
1687 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1688 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1695 /* We can't assume x-x is 0 even with non-IEEE floating point,
1696 but since it is zero except in very strange circumstances, we
1697 will treat it as zero with -funsafe-math-optimizations. */
1698 if (rtx_equal_p (trueop0
, trueop1
)
1699 && ! side_effects_p (op0
)
1700 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1701 return CONST0_RTX (mode
);
1703 /* Change subtraction from zero into negation. (0 - x) is the
1704 same as -x when x is NaN, infinite, or finite and nonzero.
1705 But if the mode has signed zeros, and does not round towards
1706 -infinity, then 0 - 0 is 0, not -0. */
1707 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1708 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1710 /* (-1 - a) is ~a. */
1711 if (trueop0
== constm1_rtx
)
1712 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1714 /* Subtracting 0 has no effect unless the mode has signed zeros
1715 and supports rounding towards -infinity. In such a case,
1717 if (!(HONOR_SIGNED_ZEROS (mode
)
1718 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1719 && trueop1
== CONST0_RTX (mode
))
1722 /* See if this is something like X * C - X or vice versa or
1723 if the multiplication is written as a shift. If so, we can
1724 distribute and make a new multiply, shift, or maybe just
1725 have X (if C is 2 in the example above). But don't make
1726 something more expensive than we had before. */
1728 if (SCALAR_INT_MODE_P (mode
))
1730 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1731 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1732 rtx lhs
= op0
, rhs
= op1
;
1734 if (GET_CODE (lhs
) == NEG
)
1738 lhs
= XEXP (lhs
, 0);
1740 else if (GET_CODE (lhs
) == MULT
1741 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1743 coeff0l
= INTVAL (XEXP (lhs
, 1));
1744 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1745 lhs
= XEXP (lhs
, 0);
1747 else if (GET_CODE (lhs
) == ASHIFT
1748 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1749 && INTVAL (XEXP (lhs
, 1)) >= 0
1750 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1752 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1754 lhs
= XEXP (lhs
, 0);
1757 if (GET_CODE (rhs
) == NEG
)
1761 rhs
= XEXP (rhs
, 0);
1763 else if (GET_CODE (rhs
) == MULT
1764 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1766 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1767 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1768 rhs
= XEXP (rhs
, 0);
1770 else if (GET_CODE (rhs
) == ASHIFT
1771 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1772 && INTVAL (XEXP (rhs
, 1)) >= 0
1773 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1775 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
1777 rhs
= XEXP (rhs
, 0);
1780 if (rtx_equal_p (lhs
, rhs
))
1782 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1784 unsigned HOST_WIDE_INT l
;
1787 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
1788 coeff
= immed_double_const (l
, h
, mode
);
1790 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1791 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1796 /* (a - (-b)) -> (a + b). True even for IEEE. */
1797 if (GET_CODE (op1
) == NEG
)
1798 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1800 /* (-x - c) may be simplified as (-c - x). */
1801 if (GET_CODE (op0
) == NEG
1802 && (GET_CODE (op1
) == CONST_INT
1803 || GET_CODE (op1
) == CONST_DOUBLE
))
1805 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1807 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1810 /* Don't let a relocatable value get a negative coeff. */
1811 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1812 return simplify_gen_binary (PLUS
, mode
,
1814 neg_const_int (mode
, op1
));
1816 /* (x - (x & y)) -> (x & ~y) */
1817 if (GET_CODE (op1
) == AND
)
1819 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1821 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1822 GET_MODE (XEXP (op1
, 1)));
1823 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1825 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1827 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1828 GET_MODE (XEXP (op1
, 0)));
1829 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1833 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1834 by reversing the comparison code if valid. */
1835 if (STORE_FLAG_VALUE
== 1
1836 && trueop0
== const1_rtx
1837 && COMPARISON_P (op1
)
1838 && (reversed
= reversed_comparison (op1
, mode
)))
1841 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1842 if (GET_CODE (op1
) == MULT
1843 && GET_CODE (XEXP (op1
, 0)) == NEG
)
1847 in1
= XEXP (XEXP (op1
, 0), 0);
1848 in2
= XEXP (op1
, 1);
1849 return simplify_gen_binary (PLUS
, mode
,
1850 simplify_gen_binary (MULT
, mode
,
1855 /* Canonicalize (minus (neg A) (mult B C)) to
1856 (minus (mult (neg B) C) A). */
1857 if (GET_CODE (op1
) == MULT
1858 && GET_CODE (op0
) == NEG
)
1862 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
1863 in2
= XEXP (op1
, 1);
1864 return simplify_gen_binary (MINUS
, mode
,
1865 simplify_gen_binary (MULT
, mode
,
1870 /* If one of the operands is a PLUS or a MINUS, see if we can
1871 simplify this by the associative law. This will, for example,
1872 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1873 Don't use the associative law for floating point.
1874 The inaccuracy makes it nonassociative,
1875 and subtle programs can break if operations are associated. */
1877 if (INTEGRAL_MODE_P (mode
)
1878 && (plus_minus_operand_p (op0
)
1879 || plus_minus_operand_p (op1
))
1880 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1885 if (trueop1
== constm1_rtx
)
1886 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1888 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1889 x is NaN, since x * 0 is then also NaN. Nor is it valid
1890 when the mode has signed zeros, since multiplying a negative
1891 number by 0 will give -0, not 0. */
1892 if (!HONOR_NANS (mode
)
1893 && !HONOR_SIGNED_ZEROS (mode
)
1894 && trueop1
== CONST0_RTX (mode
)
1895 && ! side_effects_p (op0
))
1898 /* In IEEE floating point, x*1 is not equivalent to x for
1900 if (!HONOR_SNANS (mode
)
1901 && trueop1
== CONST1_RTX (mode
))
1904 /* Convert multiply by constant power of two into shift unless
1905 we are still generating RTL. This test is a kludge. */
1906 if (GET_CODE (trueop1
) == CONST_INT
1907 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1908 /* If the mode is larger than the host word size, and the
1909 uppermost bit is set, then this isn't a power of two due
1910 to implicit sign extension. */
1911 && (width
<= HOST_BITS_PER_WIDE_INT
1912 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1913 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1915 /* Likewise for multipliers wider than a word. */
1916 if (GET_CODE (trueop1
) == CONST_DOUBLE
1917 && (GET_MODE (trueop1
) == VOIDmode
1918 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
1919 && GET_MODE (op0
) == mode
1920 && CONST_DOUBLE_LOW (trueop1
) == 0
1921 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
1922 return simplify_gen_binary (ASHIFT
, mode
, op0
,
1923 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
1925 /* x*2 is x+x and x*(-1) is -x */
1926 if (GET_CODE (trueop1
) == CONST_DOUBLE
1927 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
1928 && GET_MODE (op0
) == mode
)
1931 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1933 if (REAL_VALUES_EQUAL (d
, dconst2
))
1934 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1936 if (!HONOR_SNANS (mode
)
1937 && REAL_VALUES_EQUAL (d
, dconstm1
))
1938 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1941 /* Optimize -x * -x as x * x. */
1942 if (FLOAT_MODE_P (mode
)
1943 && GET_CODE (op0
) == NEG
1944 && GET_CODE (op1
) == NEG
1945 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
1946 && !side_effects_p (XEXP (op0
, 0)))
1947 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1949 /* Likewise, optimize abs(x) * abs(x) as x * x. */
1950 if (SCALAR_FLOAT_MODE_P (mode
)
1951 && GET_CODE (op0
) == ABS
1952 && GET_CODE (op1
) == ABS
1953 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
1954 && !side_effects_p (XEXP (op0
, 0)))
1955 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1957 /* Reassociate multiplication, but for floating point MULTs
1958 only when the user specifies unsafe math optimizations. */
1959 if (! FLOAT_MODE_P (mode
)
1960 || flag_unsafe_math_optimizations
)
1962 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1969 if (trueop1
== const0_rtx
)
1971 if (GET_CODE (trueop1
) == CONST_INT
1972 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1973 == GET_MODE_MASK (mode
)))
1975 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1977 /* A | (~A) -> -1 */
1978 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1979 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1980 && ! side_effects_p (op0
)
1981 && SCALAR_INT_MODE_P (mode
))
1984 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1985 if (GET_CODE (op1
) == CONST_INT
1986 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1987 && (nonzero_bits (op0
, mode
) & ~INTVAL (op1
)) == 0)
1990 /* Convert (A & B) | A to A. */
1991 if (GET_CODE (op0
) == AND
1992 && (rtx_equal_p (XEXP (op0
, 0), op1
)
1993 || rtx_equal_p (XEXP (op0
, 1), op1
))
1994 && ! side_effects_p (XEXP (op0
, 0))
1995 && ! side_effects_p (XEXP (op0
, 1)))
1998 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1999 mode size to (rotate A CX). */
2001 if (GET_CODE (op1
) == ASHIFT
2002 || GET_CODE (op1
) == SUBREG
)
2013 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2014 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2015 && GET_CODE (XEXP (opleft
, 1)) == CONST_INT
2016 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2017 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2018 == GET_MODE_BITSIZE (mode
)))
2019 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2021 /* Same, but for ashift that has been "simplified" to a wider mode
2022 by simplify_shift_const. */
2024 if (GET_CODE (opleft
) == SUBREG
2025 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2026 && GET_CODE (opright
) == LSHIFTRT
2027 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2028 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2029 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2030 && (GET_MODE_SIZE (GET_MODE (opleft
))
2031 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2032 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2033 SUBREG_REG (XEXP (opright
, 0)))
2034 && GET_CODE (XEXP (SUBREG_REG (opleft
), 1)) == CONST_INT
2035 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2036 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2037 == GET_MODE_BITSIZE (mode
)))
2038 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2039 XEXP (SUBREG_REG (opleft
), 1));
2041 /* If we have (ior (and (X C1) C2)), simplify this by making
2042 C1 as small as possible if C1 actually changes. */
2043 if (GET_CODE (op1
) == CONST_INT
2044 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2045 || INTVAL (op1
) > 0)
2046 && GET_CODE (op0
) == AND
2047 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2048 && GET_CODE (op1
) == CONST_INT
2049 && (INTVAL (XEXP (op0
, 1)) & INTVAL (op1
)) != 0)
2050 return simplify_gen_binary (IOR
, mode
,
2052 (AND
, mode
, XEXP (op0
, 0),
2053 GEN_INT (INTVAL (XEXP (op0
, 1))
2057 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2058 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2059 the PLUS does not affect any of the bits in OP1: then we can do
2060 the IOR as a PLUS and we can associate. This is valid if OP1
2061 can be safely shifted left C bits. */
2062 if (GET_CODE (trueop1
) == CONST_INT
&& GET_CODE (op0
) == ASHIFTRT
2063 && GET_CODE (XEXP (op0
, 0)) == PLUS
2064 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == CONST_INT
2065 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2066 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2068 int count
= INTVAL (XEXP (op0
, 1));
2069 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2071 if (mask
>> count
== INTVAL (trueop1
)
2072 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2073 return simplify_gen_binary (ASHIFTRT
, mode
,
2074 plus_constant (XEXP (op0
, 0), mask
),
2078 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2084 if (trueop1
== const0_rtx
)
2086 if (GET_CODE (trueop1
) == CONST_INT
2087 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2088 == GET_MODE_MASK (mode
)))
2089 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2090 if (rtx_equal_p (trueop0
, trueop1
)
2091 && ! side_effects_p (op0
)
2092 && GET_MODE_CLASS (mode
) != MODE_CC
)
2093 return CONST0_RTX (mode
);
2095 /* Canonicalize XOR of the most significant bit to PLUS. */
2096 if ((GET_CODE (op1
) == CONST_INT
2097 || GET_CODE (op1
) == CONST_DOUBLE
)
2098 && mode_signbit_p (mode
, op1
))
2099 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2100 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2101 if ((GET_CODE (op1
) == CONST_INT
2102 || GET_CODE (op1
) == CONST_DOUBLE
)
2103 && GET_CODE (op0
) == PLUS
2104 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
2105 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2106 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2107 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2108 simplify_gen_binary (XOR
, mode
, op1
,
2111 /* If we are XORing two things that have no bits in common,
2112 convert them into an IOR. This helps to detect rotation encoded
2113 using those methods and possibly other simplifications. */
2115 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2116 && (nonzero_bits (op0
, mode
)
2117 & nonzero_bits (op1
, mode
)) == 0)
2118 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2120 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2121 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2124 int num_negated
= 0;
2126 if (GET_CODE (op0
) == NOT
)
2127 num_negated
++, op0
= XEXP (op0
, 0);
2128 if (GET_CODE (op1
) == NOT
)
2129 num_negated
++, op1
= XEXP (op1
, 0);
2131 if (num_negated
== 2)
2132 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2133 else if (num_negated
== 1)
2134 return simplify_gen_unary (NOT
, mode
,
2135 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2139 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2140 correspond to a machine insn or result in further simplifications
2141 if B is a constant. */
2143 if (GET_CODE (op0
) == AND
2144 && rtx_equal_p (XEXP (op0
, 1), op1
)
2145 && ! side_effects_p (op1
))
2146 return simplify_gen_binary (AND
, mode
,
2147 simplify_gen_unary (NOT
, mode
,
2148 XEXP (op0
, 0), mode
),
2151 else if (GET_CODE (op0
) == AND
2152 && rtx_equal_p (XEXP (op0
, 0), op1
)
2153 && ! side_effects_p (op1
))
2154 return simplify_gen_binary (AND
, mode
,
2155 simplify_gen_unary (NOT
, mode
,
2156 XEXP (op0
, 1), mode
),
2159 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2160 comparison if STORE_FLAG_VALUE is 1. */
2161 if (STORE_FLAG_VALUE
== 1
2162 && trueop1
== const1_rtx
2163 && COMPARISON_P (op0
)
2164 && (reversed
= reversed_comparison (op0
, mode
)))
2167 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2168 is (lt foo (const_int 0)), so we can perform the above
2169 simplification if STORE_FLAG_VALUE is 1. */
2171 if (STORE_FLAG_VALUE
== 1
2172 && trueop1
== const1_rtx
2173 && GET_CODE (op0
) == LSHIFTRT
2174 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2175 && INTVAL (XEXP (op0
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
2176 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2178 /* (xor (comparison foo bar) (const_int sign-bit))
2179 when STORE_FLAG_VALUE is the sign bit. */
2180 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2181 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
2182 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
2183 && trueop1
== const_true_rtx
2184 && COMPARISON_P (op0
)
2185 && (reversed
= reversed_comparison (op0
, mode
)))
2190 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2196 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2198 /* If we are turning off bits already known off in OP0, we need
2200 if (GET_CODE (trueop1
) == CONST_INT
2201 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2202 && (nonzero_bits (trueop0
, mode
) & ~INTVAL (trueop1
)) == 0)
2204 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2205 && GET_MODE_CLASS (mode
) != MODE_CC
)
2208 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2209 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2210 && ! side_effects_p (op0
)
2211 && GET_MODE_CLASS (mode
) != MODE_CC
)
2212 return CONST0_RTX (mode
);
2214 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2215 there are no nonzero bits of C outside of X's mode. */
2216 if ((GET_CODE (op0
) == SIGN_EXTEND
2217 || GET_CODE (op0
) == ZERO_EXTEND
)
2218 && GET_CODE (trueop1
) == CONST_INT
2219 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2220 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2221 & INTVAL (trueop1
)) == 0)
2223 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2224 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2225 gen_int_mode (INTVAL (trueop1
),
2227 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2230 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2231 insn (and may simplify more). */
2232 if (GET_CODE (op0
) == XOR
2233 && rtx_equal_p (XEXP (op0
, 0), op1
)
2234 && ! side_effects_p (op1
))
2235 return simplify_gen_binary (AND
, mode
,
2236 simplify_gen_unary (NOT
, mode
,
2237 XEXP (op0
, 1), mode
),
2240 if (GET_CODE (op0
) == XOR
2241 && rtx_equal_p (XEXP (op0
, 1), op1
)
2242 && ! side_effects_p (op1
))
2243 return simplify_gen_binary (AND
, mode
,
2244 simplify_gen_unary (NOT
, mode
,
2245 XEXP (op0
, 0), mode
),
2248 /* Similarly for (~(A ^ B)) & A. */
2249 if (GET_CODE (op0
) == NOT
2250 && GET_CODE (XEXP (op0
, 0)) == XOR
2251 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2252 && ! side_effects_p (op1
))
2253 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2255 if (GET_CODE (op0
) == NOT
2256 && GET_CODE (XEXP (op0
, 0)) == XOR
2257 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2258 && ! side_effects_p (op1
))
2259 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2261 /* Convert (A | B) & A to A. */
2262 if (GET_CODE (op0
) == IOR
2263 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2264 || rtx_equal_p (XEXP (op0
, 1), op1
))
2265 && ! side_effects_p (XEXP (op0
, 0))
2266 && ! side_effects_p (XEXP (op0
, 1)))
2269 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2270 ((A & N) + B) & M -> (A + B) & M
2271 Similarly if (N & M) == 0,
2272 ((A | N) + B) & M -> (A + B) & M
2273 and for - instead of + and/or ^ instead of |. */
2274 if (GET_CODE (trueop1
) == CONST_INT
2275 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2276 && ~INTVAL (trueop1
)
2277 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
2278 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2283 pmop
[0] = XEXP (op0
, 0);
2284 pmop
[1] = XEXP (op0
, 1);
2286 for (which
= 0; which
< 2; which
++)
2289 switch (GET_CODE (tem
))
2292 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2293 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
2294 == INTVAL (trueop1
))
2295 pmop
[which
] = XEXP (tem
, 0);
2299 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2300 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
2301 pmop
[which
] = XEXP (tem
, 0);
2308 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2310 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2312 return simplify_gen_binary (code
, mode
, tem
, op1
);
2315 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2321 /* 0/x is 0 (or x&0 if x has side-effects). */
2322 if (trueop0
== CONST0_RTX (mode
))
2324 if (side_effects_p (op1
))
2325 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2329 if (trueop1
== CONST1_RTX (mode
))
2330 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2331 /* Convert divide by power of two into shift. */
2332 if (GET_CODE (trueop1
) == CONST_INT
2333 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
2334 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2338 /* Handle floating point and integers separately. */
2339 if (SCALAR_FLOAT_MODE_P (mode
))
2341 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2342 safe for modes with NaNs, since 0.0 / 0.0 will then be
2343 NaN rather than 0.0. Nor is it safe for modes with signed
2344 zeros, since dividing 0 by a negative number gives -0.0 */
2345 if (trueop0
== CONST0_RTX (mode
)
2346 && !HONOR_NANS (mode
)
2347 && !HONOR_SIGNED_ZEROS (mode
)
2348 && ! side_effects_p (op1
))
2351 if (trueop1
== CONST1_RTX (mode
)
2352 && !HONOR_SNANS (mode
))
2355 if (GET_CODE (trueop1
) == CONST_DOUBLE
2356 && trueop1
!= CONST0_RTX (mode
))
2359 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2362 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2363 && !HONOR_SNANS (mode
))
2364 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2366 /* Change FP division by a constant into multiplication.
2367 Only do this with -funsafe-math-optimizations. */
2368 if (flag_unsafe_math_optimizations
2369 && !REAL_VALUES_EQUAL (d
, dconst0
))
2371 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2372 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2373 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2379 /* 0/x is 0 (or x&0 if x has side-effects). */
2380 if (trueop0
== CONST0_RTX (mode
))
2382 if (side_effects_p (op1
))
2383 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2387 if (trueop1
== CONST1_RTX (mode
))
2388 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2390 if (trueop1
== constm1_rtx
)
2392 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2393 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2399 /* 0%x is 0 (or x&0 if x has side-effects). */
2400 if (trueop0
== CONST0_RTX (mode
))
2402 if (side_effects_p (op1
))
2403 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2406 /* x%1 is 0 (of x&0 if x has side-effects). */
2407 if (trueop1
== CONST1_RTX (mode
))
2409 if (side_effects_p (op0
))
2410 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2411 return CONST0_RTX (mode
);
2413 /* Implement modulus by power of two as AND. */
2414 if (GET_CODE (trueop1
) == CONST_INT
2415 && exact_log2 (INTVAL (trueop1
)) > 0)
2416 return simplify_gen_binary (AND
, mode
, op0
,
2417 GEN_INT (INTVAL (op1
) - 1));
2421 /* 0%x is 0 (or x&0 if x has side-effects). */
2422 if (trueop0
== CONST0_RTX (mode
))
2424 if (side_effects_p (op1
))
2425 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2428 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2429 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
2431 if (side_effects_p (op0
))
2432 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2433 return CONST0_RTX (mode
);
2440 if (trueop1
== CONST0_RTX (mode
))
2442 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2444 /* Rotating ~0 always results in ~0. */
2445 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
2446 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2447 && ! side_effects_p (op1
))
2453 if (trueop1
== CONST0_RTX (mode
))
2455 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2460 if (trueop1
== CONST0_RTX (mode
))
2462 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2464 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2465 if (GET_CODE (op0
) == CLZ
2466 && GET_CODE (trueop1
) == CONST_INT
2467 && STORE_FLAG_VALUE
== 1
2468 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
2470 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2471 unsigned HOST_WIDE_INT zero_val
= 0;
2473 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
2474 && zero_val
== GET_MODE_BITSIZE (imode
)
2475 && INTVAL (trueop1
) == exact_log2 (zero_val
))
2476 return simplify_gen_relational (EQ
, mode
, imode
,
2477 XEXP (op0
, 0), const0_rtx
);
2482 if (width
<= HOST_BITS_PER_WIDE_INT
2483 && GET_CODE (trueop1
) == CONST_INT
2484 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2485 && ! side_effects_p (op0
))
2487 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2489 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2495 if (width
<= HOST_BITS_PER_WIDE_INT
2496 && GET_CODE (trueop1
) == CONST_INT
2497 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2498 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2499 && ! side_effects_p (op0
))
2501 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2503 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2509 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2511 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2513 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2519 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2521 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2523 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2532 /* ??? There are simplifications that can be done. */
2536 if (!VECTOR_MODE_P (mode
))
2538 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2539 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2540 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2541 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2542 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
2544 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2545 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2550 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2551 gcc_assert (GET_MODE_INNER (mode
)
2552 == GET_MODE_INNER (GET_MODE (trueop0
)));
2553 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2555 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2557 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2558 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2559 rtvec v
= rtvec_alloc (n_elts
);
2562 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2563 for (i
= 0; i
< n_elts
; i
++)
2565 rtx x
= XVECEXP (trueop1
, 0, i
);
2567 gcc_assert (GET_CODE (x
) == CONST_INT
);
2568 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2572 return gen_rtx_CONST_VECTOR (mode
, v
);
2576 if (XVECLEN (trueop1
, 0) == 1
2577 && GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
2578 && GET_CODE (trueop0
) == VEC_CONCAT
)
2581 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
2583 /* Try to find the element in the VEC_CONCAT. */
2584 while (GET_MODE (vec
) != mode
2585 && GET_CODE (vec
) == VEC_CONCAT
)
2587 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
2588 if (offset
< vec_size
)
2589 vec
= XEXP (vec
, 0);
2593 vec
= XEXP (vec
, 1);
2595 vec
= avoid_constant_pool_reference (vec
);
2598 if (GET_MODE (vec
) == mode
)
2605 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2606 ? GET_MODE (trueop0
)
2607 : GET_MODE_INNER (mode
));
2608 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2609 ? GET_MODE (trueop1
)
2610 : GET_MODE_INNER (mode
));
2612 gcc_assert (VECTOR_MODE_P (mode
));
2613 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2614 == GET_MODE_SIZE (mode
));
2616 if (VECTOR_MODE_P (op0_mode
))
2617 gcc_assert (GET_MODE_INNER (mode
)
2618 == GET_MODE_INNER (op0_mode
));
2620 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2622 if (VECTOR_MODE_P (op1_mode
))
2623 gcc_assert (GET_MODE_INNER (mode
)
2624 == GET_MODE_INNER (op1_mode
));
2626 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2628 if ((GET_CODE (trueop0
) == CONST_VECTOR
2629 || GET_CODE (trueop0
) == CONST_INT
2630 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2631 && (GET_CODE (trueop1
) == CONST_VECTOR
2632 || GET_CODE (trueop1
) == CONST_INT
2633 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2635 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2636 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2637 rtvec v
= rtvec_alloc (n_elts
);
2639 unsigned in_n_elts
= 1;
2641 if (VECTOR_MODE_P (op0_mode
))
2642 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2643 for (i
= 0; i
< n_elts
; i
++)
2647 if (!VECTOR_MODE_P (op0_mode
))
2648 RTVEC_ELT (v
, i
) = trueop0
;
2650 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2654 if (!VECTOR_MODE_P (op1_mode
))
2655 RTVEC_ELT (v
, i
) = trueop1
;
2657 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2662 return gen_rtx_CONST_VECTOR (mode
, v
);
2675 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2678 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
2680 unsigned int width
= GET_MODE_BITSIZE (mode
);
2682 if (VECTOR_MODE_P (mode
)
2683 && code
!= VEC_CONCAT
2684 && GET_CODE (op0
) == CONST_VECTOR
2685 && GET_CODE (op1
) == CONST_VECTOR
)
2687 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2688 enum machine_mode op0mode
= GET_MODE (op0
);
2689 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
2690 enum machine_mode op1mode
= GET_MODE (op1
);
2691 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
2692 rtvec v
= rtvec_alloc (n_elts
);
2695 gcc_assert (op0_n_elts
== n_elts
);
2696 gcc_assert (op1_n_elts
== n_elts
);
2697 for (i
= 0; i
< n_elts
; i
++)
2699 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
2700 CONST_VECTOR_ELT (op0
, i
),
2701 CONST_VECTOR_ELT (op1
, i
));
2704 RTVEC_ELT (v
, i
) = x
;
2707 return gen_rtx_CONST_VECTOR (mode
, v
);
2710 if (VECTOR_MODE_P (mode
)
2711 && code
== VEC_CONCAT
2712 && CONSTANT_P (op0
) && CONSTANT_P (op1
))
2714 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2715 rtvec v
= rtvec_alloc (n_elts
);
2717 gcc_assert (n_elts
>= 2);
2720 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
2721 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
2723 RTVEC_ELT (v
, 0) = op0
;
2724 RTVEC_ELT (v
, 1) = op1
;
2728 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
2729 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
2732 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
2733 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
2734 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
2736 for (i
= 0; i
< op0_n_elts
; ++i
)
2737 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
2738 for (i
= 0; i
< op1_n_elts
; ++i
)
2739 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
2742 return gen_rtx_CONST_VECTOR (mode
, v
);
2745 if (SCALAR_FLOAT_MODE_P (mode
)
2746 && GET_CODE (op0
) == CONST_DOUBLE
2747 && GET_CODE (op1
) == CONST_DOUBLE
2748 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
2759 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
2761 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
2763 for (i
= 0; i
< 4; i
++)
2780 real_from_target (&r
, tmp0
, mode
);
2781 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
2785 REAL_VALUE_TYPE f0
, f1
, value
, result
;
2788 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
2789 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
2790 real_convert (&f0
, mode
, &f0
);
2791 real_convert (&f1
, mode
, &f1
);
2793 if (HONOR_SNANS (mode
)
2794 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
2798 && REAL_VALUES_EQUAL (f1
, dconst0
)
2799 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
2802 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2803 && flag_trapping_math
2804 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
2806 int s0
= REAL_VALUE_NEGATIVE (f0
);
2807 int s1
= REAL_VALUE_NEGATIVE (f1
);
2812 /* Inf + -Inf = NaN plus exception. */
2817 /* Inf - Inf = NaN plus exception. */
2822 /* Inf / Inf = NaN plus exception. */
2829 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2830 && flag_trapping_math
2831 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
2832 || (REAL_VALUE_ISINF (f1
)
2833 && REAL_VALUES_EQUAL (f0
, dconst0
))))
2834 /* Inf * 0 = NaN plus exception. */
2837 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
2839 real_convert (&result
, mode
, &value
);
2841 /* Don't constant fold this floating point operation if
2842 the result has overflowed and flag_trapping_math. */
2844 if (flag_trapping_math
2845 && MODE_HAS_INFINITIES (mode
)
2846 && REAL_VALUE_ISINF (result
)
2847 && !REAL_VALUE_ISINF (f0
)
2848 && !REAL_VALUE_ISINF (f1
))
2849 /* Overflow plus exception. */
2852 /* Don't constant fold this floating point operation if the
2853 result may dependent upon the run-time rounding mode and
2854 flag_rounding_math is set, or if GCC's software emulation
2855 is unable to accurately represent the result. */
2857 if ((flag_rounding_math
2858 || (REAL_MODE_FORMAT_COMPOSITE_P (mode
)
2859 && !flag_unsafe_math_optimizations
))
2860 && (inexact
|| !real_identical (&result
, &value
)))
2863 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
2867 /* We can fold some multi-word operations. */
2868 if (GET_MODE_CLASS (mode
) == MODE_INT
2869 && width
== HOST_BITS_PER_WIDE_INT
* 2
2870 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
2871 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
2873 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
2874 HOST_WIDE_INT h1
, h2
, hv
, ht
;
2876 if (GET_CODE (op0
) == CONST_DOUBLE
)
2877 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
2879 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
2881 if (GET_CODE (op1
) == CONST_DOUBLE
)
2882 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
2884 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
2889 /* A - B == A + (-B). */
2890 neg_double (l2
, h2
, &lv
, &hv
);
2893 /* Fall through.... */
2896 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2900 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2904 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2905 &lv
, &hv
, <
, &ht
))
2910 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2911 <
, &ht
, &lv
, &hv
))
2916 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2917 &lv
, &hv
, <
, &ht
))
2922 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2923 <
, &ht
, &lv
, &hv
))
2928 lv
= l1
& l2
, hv
= h1
& h2
;
2932 lv
= l1
| l2
, hv
= h1
| h2
;
2936 lv
= l1
^ l2
, hv
= h1
^ h2
;
2942 && ((unsigned HOST_WIDE_INT
) l1
2943 < (unsigned HOST_WIDE_INT
) l2
)))
2952 && ((unsigned HOST_WIDE_INT
) l1
2953 > (unsigned HOST_WIDE_INT
) l2
)))
2960 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
2962 && ((unsigned HOST_WIDE_INT
) l1
2963 < (unsigned HOST_WIDE_INT
) l2
)))
2970 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
2972 && ((unsigned HOST_WIDE_INT
) l1
2973 > (unsigned HOST_WIDE_INT
) l2
)))
2979 case LSHIFTRT
: case ASHIFTRT
:
2981 case ROTATE
: case ROTATERT
:
2982 if (SHIFT_COUNT_TRUNCATED
)
2983 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
2985 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
2988 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
2989 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
2991 else if (code
== ASHIFT
)
2992 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
2993 else if (code
== ROTATE
)
2994 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
2995 else /* code == ROTATERT */
2996 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3003 return immed_double_const (lv
, hv
, mode
);
3006 if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
3007 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3009 /* Get the integer argument values in two forms:
3010 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3012 arg0
= INTVAL (op0
);
3013 arg1
= INTVAL (op1
);
3015 if (width
< HOST_BITS_PER_WIDE_INT
)
3017 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3018 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3021 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3022 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3025 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3026 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3034 /* Compute the value of the arithmetic. */
3039 val
= arg0s
+ arg1s
;
3043 val
= arg0s
- arg1s
;
3047 val
= arg0s
* arg1s
;
3052 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3055 val
= arg0s
/ arg1s
;
3060 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3063 val
= arg0s
% arg1s
;
3068 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3071 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3076 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3079 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3097 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3098 the value is in range. We can't return any old value for
3099 out-of-range arguments because either the middle-end (via
3100 shift_truncation_mask) or the back-end might be relying on
3101 target-specific knowledge. Nor can we rely on
3102 shift_truncation_mask, since the shift might not be part of an
3103 ashlM3, lshrM3 or ashrM3 instruction. */
3104 if (SHIFT_COUNT_TRUNCATED
)
3105 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3106 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3109 val
= (code
== ASHIFT
3110 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3111 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3113 /* Sign-extend the result for arithmetic right shifts. */
3114 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3115 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
3123 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3124 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3132 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3133 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3137 /* Do nothing here. */
3141 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3145 val
= ((unsigned HOST_WIDE_INT
) arg0
3146 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3150 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3154 val
= ((unsigned HOST_WIDE_INT
) arg0
3155 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3163 /* ??? There are simplifications that can be done. */
3170 return gen_int_mode (val
, mode
);
3178 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3181 Rather than test for specific case, we do this by a brute-force method
3182 and do all possible simplifications until no more changes occur. Then
3183 we rebuild the operation. */
3185 struct simplify_plus_minus_op_data
3192 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
3194 const struct simplify_plus_minus_op_data
*d1
= p1
;
3195 const struct simplify_plus_minus_op_data
*d2
= p2
;
3198 result
= (commutative_operand_precedence (d2
->op
)
3199 - commutative_operand_precedence (d1
->op
));
3203 /* Group together equal REGs to do more simplification. */
3204 if (REG_P (d1
->op
) && REG_P (d2
->op
))
3205 return REGNO (d1
->op
) - REGNO (d2
->op
);
3211 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3214 struct simplify_plus_minus_op_data ops
[8];
3216 int n_ops
= 2, input_ops
= 2;
3217 int changed
, n_constants
= 0, canonicalized
= 0;
3220 memset (ops
, 0, sizeof ops
);
3222 /* Set up the two operands and then expand them until nothing has been
3223 changed. If we run out of room in our array, give up; this should
3224 almost never happen. */
3229 ops
[1].neg
= (code
== MINUS
);
3235 for (i
= 0; i
< n_ops
; i
++)
3237 rtx this_op
= ops
[i
].op
;
3238 int this_neg
= ops
[i
].neg
;
3239 enum rtx_code this_code
= GET_CODE (this_op
);
3248 ops
[n_ops
].op
= XEXP (this_op
, 1);
3249 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3252 ops
[i
].op
= XEXP (this_op
, 0);
3255 canonicalized
|= this_neg
;
3259 ops
[i
].op
= XEXP (this_op
, 0);
3260 ops
[i
].neg
= ! this_neg
;
3267 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3268 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3269 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3271 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3272 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3273 ops
[n_ops
].neg
= this_neg
;
3281 /* ~a -> (-a - 1) */
3284 ops
[n_ops
].op
= constm1_rtx
;
3285 ops
[n_ops
++].neg
= this_neg
;
3286 ops
[i
].op
= XEXP (this_op
, 0);
3287 ops
[i
].neg
= !this_neg
;
3297 ops
[i
].op
= neg_const_int (mode
, this_op
);
3311 if (n_constants
> 1)
3314 gcc_assert (n_ops
>= 2);
3316 /* If we only have two operands, we can avoid the loops. */
3319 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3322 /* Get the two operands. Be careful with the order, especially for
3323 the cases where code == MINUS. */
3324 if (ops
[0].neg
&& ops
[1].neg
)
3326 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3329 else if (ops
[0].neg
)
3340 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
3343 /* Now simplify each pair of operands until nothing changes. */
3346 /* Insertion sort is good enough for an eight-element array. */
3347 for (i
= 1; i
< n_ops
; i
++)
3349 struct simplify_plus_minus_op_data save
;
3351 if (simplify_plus_minus_op_data_cmp (&ops
[j
], &ops
[i
]) < 0)
3357 ops
[j
+ 1] = ops
[j
];
3358 while (j
-- && simplify_plus_minus_op_data_cmp (&ops
[j
], &save
) > 0);
3362 /* This is only useful the first time through. */
3367 for (i
= n_ops
- 1; i
> 0; i
--)
3368 for (j
= i
- 1; j
>= 0; j
--)
3370 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
3371 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
3373 if (lhs
!= 0 && rhs
!= 0)
3375 enum rtx_code ncode
= PLUS
;
3381 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3383 else if (swap_commutative_operands_p (lhs
, rhs
))
3384 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3386 if ((GET_CODE (lhs
) == CONST
|| GET_CODE (lhs
) == CONST_INT
)
3387 && (GET_CODE (rhs
) == CONST
|| GET_CODE (rhs
) == CONST_INT
))
3389 rtx tem_lhs
, tem_rhs
;
3391 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
3392 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
3393 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
3395 if (tem
&& !CONSTANT_P (tem
))
3396 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
3399 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
3401 /* Reject "simplifications" that just wrap the two
3402 arguments in a CONST. Failure to do so can result
3403 in infinite recursion with simplify_binary_operation
3404 when it calls us to simplify CONST operations. */
3406 && ! (GET_CODE (tem
) == CONST
3407 && GET_CODE (XEXP (tem
, 0)) == ncode
3408 && XEXP (XEXP (tem
, 0), 0) == lhs
3409 && XEXP (XEXP (tem
, 0), 1) == rhs
))
3412 if (GET_CODE (tem
) == NEG
)
3413 tem
= XEXP (tem
, 0), lneg
= !lneg
;
3414 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
3415 tem
= neg_const_int (mode
, tem
), lneg
= 0;
3419 ops
[j
].op
= NULL_RTX
;
3425 /* Pack all the operands to the lower-numbered entries. */
3426 for (i
= 0, j
= 0; j
< n_ops
; j
++)
3436 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3438 && GET_CODE (ops
[1].op
) == CONST_INT
3439 && CONSTANT_P (ops
[0].op
)
3441 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
3443 /* We suppressed creation of trivial CONST expressions in the
3444 combination loop to avoid recursion. Create one manually now.
3445 The combination loop should have ensured that there is exactly
3446 one CONST_INT, and the sort will have ensured that it is last
3447 in the array and that any other constant will be next-to-last. */
3450 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
3451 && CONSTANT_P (ops
[n_ops
- 2].op
))
3453 rtx value
= ops
[n_ops
- 1].op
;
3454 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
3455 value
= neg_const_int (mode
, value
);
3456 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
3460 /* Put a non-negated operand first, if possible. */
3462 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
3465 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
3474 /* Now make the result by performing the requested operations. */
3476 for (i
= 1; i
< n_ops
; i
++)
3477 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
3478 mode
, result
, ops
[i
].op
);
3483 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3485 plus_minus_operand_p (rtx x
)
3487 return GET_CODE (x
) == PLUS
3488 || GET_CODE (x
) == MINUS
3489 || (GET_CODE (x
) == CONST
3490 && GET_CODE (XEXP (x
, 0)) == PLUS
3491 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
3492 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
3495 /* Like simplify_binary_operation except used for relational operators.
3496 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3497 not also be VOIDmode.
3499 CMP_MODE specifies in which mode the comparison is done in, so it is
3500 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3501 the operands or, if both are VOIDmode, the operands are compared in
3502 "infinite precision". */
3504 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
3505 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3507 rtx tem
, trueop0
, trueop1
;
3509 if (cmp_mode
== VOIDmode
)
3510 cmp_mode
= GET_MODE (op0
);
3511 if (cmp_mode
== VOIDmode
)
3512 cmp_mode
= GET_MODE (op1
);
3514 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
3517 if (SCALAR_FLOAT_MODE_P (mode
))
3519 if (tem
== const0_rtx
)
3520 return CONST0_RTX (mode
);
3521 #ifdef FLOAT_STORE_FLAG_VALUE
3523 REAL_VALUE_TYPE val
;
3524 val
= FLOAT_STORE_FLAG_VALUE (mode
);
3525 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
3531 if (VECTOR_MODE_P (mode
))
3533 if (tem
== const0_rtx
)
3534 return CONST0_RTX (mode
);
3535 #ifdef VECTOR_STORE_FLAG_VALUE
3540 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
3541 if (val
== NULL_RTX
)
3543 if (val
== const1_rtx
)
3544 return CONST1_RTX (mode
);
3546 units
= GET_MODE_NUNITS (mode
);
3547 v
= rtvec_alloc (units
);
3548 for (i
= 0; i
< units
; i
++)
3549 RTVEC_ELT (v
, i
) = val
;
3550 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
3560 /* For the following tests, ensure const0_rtx is op1. */
3561 if (swap_commutative_operands_p (op0
, op1
)
3562 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
3563 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
3565 /* If op0 is a compare, extract the comparison arguments from it. */
3566 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3567 return simplify_relational_operation (code
, mode
, VOIDmode
,
3568 XEXP (op0
, 0), XEXP (op0
, 1));
3570 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
3574 trueop0
= avoid_constant_pool_reference (op0
);
3575 trueop1
= avoid_constant_pool_reference (op1
);
3576 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
3580 /* This part of simplify_relational_operation is only used when CMP_MODE
3581 is not in class MODE_CC (i.e. it is a real comparison).
3583 MODE is the mode of the result, while CMP_MODE specifies in which
3584 mode the comparison is done in, so it is the mode of the operands. */
3587 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
3588 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3590 enum rtx_code op0code
= GET_CODE (op0
);
3592 if (GET_CODE (op1
) == CONST_INT
)
3594 if (INTVAL (op1
) == 0 && COMPARISON_P (op0
))
3596 /* If op0 is a comparison, extract the comparison arguments
3600 if (GET_MODE (op0
) == mode
)
3601 return simplify_rtx (op0
);
3603 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
3604 XEXP (op0
, 0), XEXP (op0
, 1));
3606 else if (code
== EQ
)
3608 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
3609 if (new_code
!= UNKNOWN
)
3610 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
3611 XEXP (op0
, 0), XEXP (op0
, 1));
3616 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3617 if ((code
== EQ
|| code
== NE
)
3618 && (op0code
== PLUS
|| op0code
== MINUS
)
3620 && CONSTANT_P (XEXP (op0
, 1))
3621 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
3623 rtx x
= XEXP (op0
, 0);
3624 rtx c
= XEXP (op0
, 1);
3626 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
3628 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
3631 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3632 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3634 && op1
== const0_rtx
3635 && GET_MODE_CLASS (mode
) == MODE_INT
3636 && cmp_mode
!= VOIDmode
3637 /* ??? Work-around BImode bugs in the ia64 backend. */
3639 && cmp_mode
!= BImode
3640 && nonzero_bits (op0
, cmp_mode
) == 1
3641 && STORE_FLAG_VALUE
== 1)
3642 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
3643 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
3644 : lowpart_subreg (mode
, op0
, cmp_mode
);
3646 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3647 if ((code
== EQ
|| code
== NE
)
3648 && op1
== const0_rtx
3650 return simplify_gen_relational (code
, mode
, cmp_mode
,
3651 XEXP (op0
, 0), XEXP (op0
, 1));
3653 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3654 if ((code
== EQ
|| code
== NE
)
3656 && rtx_equal_p (XEXP (op0
, 0), op1
)
3657 && !side_effects_p (XEXP (op0
, 0)))
3658 return simplify_gen_relational (code
, mode
, cmp_mode
,
3659 XEXP (op0
, 1), const0_rtx
);
3661 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3662 if ((code
== EQ
|| code
== NE
)
3664 && rtx_equal_p (XEXP (op0
, 1), op1
)
3665 && !side_effects_p (XEXP (op0
, 1)))
3666 return simplify_gen_relational (code
, mode
, cmp_mode
,
3667 XEXP (op0
, 0), const0_rtx
);
3669 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3670 if ((code
== EQ
|| code
== NE
)
3672 && (GET_CODE (op1
) == CONST_INT
3673 || GET_CODE (op1
) == CONST_DOUBLE
)
3674 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
3675 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
3676 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
3677 simplify_gen_binary (XOR
, cmp_mode
,
3678 XEXP (op0
, 1), op1
));
3683 /* Check if the given comparison (done in the given MODE) is actually a
3684 tautology or a contradiction.
3685 If no simplification is possible, this function returns zero.
3686 Otherwise, it returns either const_true_rtx or const0_rtx. */
3689 simplify_const_relational_operation (enum rtx_code code
,
3690 enum machine_mode mode
,
3693 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
3698 gcc_assert (mode
!= VOIDmode
3699 || (GET_MODE (op0
) == VOIDmode
3700 && GET_MODE (op1
) == VOIDmode
));
3702 /* If op0 is a compare, extract the comparison arguments from it. */
3703 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3705 op1
= XEXP (op0
, 1);
3706 op0
= XEXP (op0
, 0);
3708 if (GET_MODE (op0
) != VOIDmode
)
3709 mode
= GET_MODE (op0
);
3710 else if (GET_MODE (op1
) != VOIDmode
)
3711 mode
= GET_MODE (op1
);
3716 /* We can't simplify MODE_CC values since we don't know what the
3717 actual comparison is. */
3718 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
3721 /* Make sure the constant is second. */
3722 if (swap_commutative_operands_p (op0
, op1
))
3724 tem
= op0
, op0
= op1
, op1
= tem
;
3725 code
= swap_condition (code
);
3728 trueop0
= avoid_constant_pool_reference (op0
);
3729 trueop1
= avoid_constant_pool_reference (op1
);
3731 /* For integer comparisons of A and B maybe we can simplify A - B and can
3732 then simplify a comparison of that with zero. If A and B are both either
3733 a register or a CONST_INT, this can't help; testing for these cases will
3734 prevent infinite recursion here and speed things up.
3736 We can only do this for EQ and NE comparisons as otherwise we may
3737 lose or introduce overflow which we cannot disregard as undefined as
3738 we do not know the signedness of the operation on either the left or
3739 the right hand side of the comparison. */
3741 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
3742 && (code
== EQ
|| code
== NE
)
3743 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
3744 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
3745 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
3746 /* We cannot do this if tem is a nonzero address. */
3747 && ! nonzero_address_p (tem
))
3748 return simplify_const_relational_operation (signed_condition (code
),
3749 mode
, tem
, const0_rtx
);
3751 if (! HONOR_NANS (mode
) && code
== ORDERED
)
3752 return const_true_rtx
;
3754 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
3757 /* For modes without NaNs, if the two operands are equal, we know the
3758 result except if they have side-effects. */
3759 if (! HONOR_NANS (GET_MODE (trueop0
))
3760 && rtx_equal_p (trueop0
, trueop1
)
3761 && ! side_effects_p (trueop0
))
3762 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
3764 /* If the operands are floating-point constants, see if we can fold
3766 else if (GET_CODE (trueop0
) == CONST_DOUBLE
3767 && GET_CODE (trueop1
) == CONST_DOUBLE
3768 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
3770 REAL_VALUE_TYPE d0
, d1
;
3772 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
3773 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
3775 /* Comparisons are unordered iff at least one of the values is NaN. */
3776 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
3786 return const_true_rtx
;
3799 equal
= REAL_VALUES_EQUAL (d0
, d1
);
3800 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
3801 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
3804 /* Otherwise, see if the operands are both integers. */
3805 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
3806 && (GET_CODE (trueop0
) == CONST_DOUBLE
3807 || GET_CODE (trueop0
) == CONST_INT
)
3808 && (GET_CODE (trueop1
) == CONST_DOUBLE
3809 || GET_CODE (trueop1
) == CONST_INT
))
3811 int width
= GET_MODE_BITSIZE (mode
);
3812 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
3813 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
3815 /* Get the two words comprising each integer constant. */
3816 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
3818 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
3819 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
3823 l0u
= l0s
= INTVAL (trueop0
);
3824 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
3827 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
3829 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
3830 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
3834 l1u
= l1s
= INTVAL (trueop1
);
3835 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
3838 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3839 we have to sign or zero-extend the values. */
3840 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
3842 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3843 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3845 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3846 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3848 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3849 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3851 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
3852 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
3854 equal
= (h0u
== h1u
&& l0u
== l1u
);
3855 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
3856 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
3857 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
3858 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
3861 /* Otherwise, there are some code-specific tests we can make. */
3864 /* Optimize comparisons with upper and lower bounds. */
3865 if (SCALAR_INT_MODE_P (mode
)
3866 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3879 get_mode_bounds (mode
, sign
, mode
, &mmin
, &mmax
);
3886 /* x >= min is always true. */
3887 if (rtx_equal_p (trueop1
, mmin
))
3888 tem
= const_true_rtx
;
3894 /* x <= max is always true. */
3895 if (rtx_equal_p (trueop1
, mmax
))
3896 tem
= const_true_rtx
;
3901 /* x > max is always false. */
3902 if (rtx_equal_p (trueop1
, mmax
))
3908 /* x < min is always false. */
3909 if (rtx_equal_p (trueop1
, mmin
))
3916 if (tem
== const0_rtx
3917 || tem
== const_true_rtx
)
3924 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3929 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3930 return const_true_rtx
;
3934 /* Optimize abs(x) < 0.0. */
3935 if (trueop1
== CONST0_RTX (mode
)
3936 && !HONOR_SNANS (mode
)
3937 && (!INTEGRAL_MODE_P (mode
)
3938 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
3940 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3942 if (GET_CODE (tem
) == ABS
)
3944 if (INTEGRAL_MODE_P (mode
)
3945 && (issue_strict_overflow_warning
3946 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
3947 warning (OPT_Wstrict_overflow
,
3948 ("assuming signed overflow does not occur when "
3949 "assuming abs (x) < 0 is false"));
3956 /* Optimize abs(x) >= 0.0. */
3957 if (trueop1
== CONST0_RTX (mode
)
3958 && !HONOR_NANS (mode
)
3959 && (!INTEGRAL_MODE_P (mode
)
3960 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
3962 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3964 if (GET_CODE (tem
) == ABS
)
3966 if (INTEGRAL_MODE_P (mode
)
3967 && (issue_strict_overflow_warning
3968 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
3969 warning (OPT_Wstrict_overflow
,
3970 ("assuming signed overflow does not occur when "
3971 "assuming abs (x) >= 0 is true"));
3972 return const_true_rtx
;
3978 /* Optimize ! (abs(x) < 0.0). */
3979 if (trueop1
== CONST0_RTX (mode
))
3981 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3983 if (GET_CODE (tem
) == ABS
)
3984 return const_true_rtx
;
3995 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4001 return equal
? const_true_rtx
: const0_rtx
;
4004 return ! equal
? const_true_rtx
: const0_rtx
;
4007 return op0lt
? const_true_rtx
: const0_rtx
;
4010 return op1lt
? const_true_rtx
: const0_rtx
;
4012 return op0ltu
? const_true_rtx
: const0_rtx
;
4014 return op1ltu
? const_true_rtx
: const0_rtx
;
4017 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
4020 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
4022 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
4024 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
4026 return const_true_rtx
;
4034 /* Simplify CODE, an operation with result mode MODE and three operands,
4035 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4036 a constant. Return 0 if no simplifications is possible. */
4039 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4040 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4043 unsigned int width
= GET_MODE_BITSIZE (mode
);
4045 /* VOIDmode means "infinite" precision. */
4047 width
= HOST_BITS_PER_WIDE_INT
;
4053 if (GET_CODE (op0
) == CONST_INT
4054 && GET_CODE (op1
) == CONST_INT
4055 && GET_CODE (op2
) == CONST_INT
4056 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4057 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4059 /* Extracting a bit-field from a constant */
4060 HOST_WIDE_INT val
= INTVAL (op0
);
4062 if (BITS_BIG_ENDIAN
)
4063 val
>>= (GET_MODE_BITSIZE (op0_mode
)
4064 - INTVAL (op2
) - INTVAL (op1
));
4066 val
>>= INTVAL (op2
);
4068 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
4070 /* First zero-extend. */
4071 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
4072 /* If desired, propagate sign bit. */
4073 if (code
== SIGN_EXTRACT
4074 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
4075 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
4078 /* Clear the bits that don't belong in our mode,
4079 unless they and our sign bit are all one.
4080 So we get either a reasonable negative value or a reasonable
4081 unsigned value for this mode. */
4082 if (width
< HOST_BITS_PER_WIDE_INT
4083 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
4084 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
4085 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4087 return gen_int_mode (val
, mode
);
4092 if (GET_CODE (op0
) == CONST_INT
)
4093 return op0
!= const0_rtx
? op1
: op2
;
4095 /* Convert c ? a : a into "a". */
4096 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4099 /* Convert a != b ? a : b into "a". */
4100 if (GET_CODE (op0
) == NE
4101 && ! side_effects_p (op0
)
4102 && ! HONOR_NANS (mode
)
4103 && ! HONOR_SIGNED_ZEROS (mode
)
4104 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4105 && rtx_equal_p (XEXP (op0
, 1), op2
))
4106 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4107 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4110 /* Convert a == b ? a : b into "b". */
4111 if (GET_CODE (op0
) == EQ
4112 && ! side_effects_p (op0
)
4113 && ! HONOR_NANS (mode
)
4114 && ! HONOR_SIGNED_ZEROS (mode
)
4115 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4116 && rtx_equal_p (XEXP (op0
, 1), op2
))
4117 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4118 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4121 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
4123 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
4124 ? GET_MODE (XEXP (op0
, 1))
4125 : GET_MODE (XEXP (op0
, 0)));
4128 /* Look for happy constants in op1 and op2. */
4129 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
4131 HOST_WIDE_INT t
= INTVAL (op1
);
4132 HOST_WIDE_INT f
= INTVAL (op2
);
4134 if (t
== STORE_FLAG_VALUE
&& f
== 0)
4135 code
= GET_CODE (op0
);
4136 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
4139 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
4147 return simplify_gen_relational (code
, mode
, cmp_mode
,
4148 XEXP (op0
, 0), XEXP (op0
, 1));
4151 if (cmp_mode
== VOIDmode
)
4152 cmp_mode
= op0_mode
;
4153 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
4154 cmp_mode
, XEXP (op0
, 0),
4157 /* See if any simplifications were possible. */
4160 if (GET_CODE (temp
) == CONST_INT
)
4161 return temp
== const0_rtx
? op2
: op1
;
4163 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
4169 gcc_assert (GET_MODE (op0
) == mode
);
4170 gcc_assert (GET_MODE (op1
) == mode
);
4171 gcc_assert (VECTOR_MODE_P (mode
));
4172 op2
= avoid_constant_pool_reference (op2
);
4173 if (GET_CODE (op2
) == CONST_INT
)
4175 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
4176 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
4177 int mask
= (1 << n_elts
) - 1;
4179 if (!(INTVAL (op2
) & mask
))
4181 if ((INTVAL (op2
) & mask
) == mask
)
4184 op0
= avoid_constant_pool_reference (op0
);
4185 op1
= avoid_constant_pool_reference (op1
);
4186 if (GET_CODE (op0
) == CONST_VECTOR
4187 && GET_CODE (op1
) == CONST_VECTOR
)
4189 rtvec v
= rtvec_alloc (n_elts
);
4192 for (i
= 0; i
< n_elts
; i
++)
4193 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
4194 ? CONST_VECTOR_ELT (op0
, i
)
4195 : CONST_VECTOR_ELT (op1
, i
));
4196 return gen_rtx_CONST_VECTOR (mode
, v
);
4208 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4209 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4211 Works by unpacking OP into a collection of 8-bit values
4212 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4213 and then repacking them again for OUTERMODE. */
4216 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
4217 enum machine_mode innermode
, unsigned int byte
)
4219 /* We support up to 512-bit values (for V8DFmode). */
4223 value_mask
= (1 << value_bit
) - 1
4225 unsigned char value
[max_bitsize
/ value_bit
];
4234 rtvec result_v
= NULL
;
4235 enum mode_class outer_class
;
4236 enum machine_mode outer_submode
;
4238 /* Some ports misuse CCmode. */
4239 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
4242 /* We have no way to represent a complex constant at the rtl level. */
4243 if (COMPLEX_MODE_P (outermode
))
4246 /* Unpack the value. */
4248 if (GET_CODE (op
) == CONST_VECTOR
)
4250 num_elem
= CONST_VECTOR_NUNITS (op
);
4251 elems
= &CONST_VECTOR_ELT (op
, 0);
4252 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
4258 elem_bitsize
= max_bitsize
;
4260 /* If this asserts, it is too complicated; reducing value_bit may help. */
4261 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
4262 /* I don't know how to handle endianness of sub-units. */
4263 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
4265 for (elem
= 0; elem
< num_elem
; elem
++)
4268 rtx el
= elems
[elem
];
4270 /* Vectors are kept in target memory order. (This is probably
4273 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4274 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4276 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4277 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4278 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4279 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4280 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4283 switch (GET_CODE (el
))
4287 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4289 *vp
++ = INTVAL (el
) >> i
;
4290 /* CONST_INTs are always logically sign-extended. */
4291 for (; i
< elem_bitsize
; i
+= value_bit
)
4292 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
4296 if (GET_MODE (el
) == VOIDmode
)
4298 /* If this triggers, someone should have generated a
4299 CONST_INT instead. */
4300 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
4302 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4303 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
4304 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
4307 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
4310 /* It shouldn't matter what's done here, so fill it with
4312 for (; i
< elem_bitsize
; i
+= value_bit
)
4317 long tmp
[max_bitsize
/ 32];
4318 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
4320 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
4321 gcc_assert (bitsize
<= elem_bitsize
);
4322 gcc_assert (bitsize
% value_bit
== 0);
4324 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
4327 /* real_to_target produces its result in words affected by
4328 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4329 and use WORDS_BIG_ENDIAN instead; see the documentation
4330 of SUBREG in rtl.texi. */
4331 for (i
= 0; i
< bitsize
; i
+= value_bit
)
4334 if (WORDS_BIG_ENDIAN
)
4335 ibase
= bitsize
- 1 - i
;
4338 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
4341 /* It shouldn't matter what's done here, so fill it with
4343 for (; i
< elem_bitsize
; i
+= value_bit
)
4353 /* Now, pick the right byte to start with. */
4354 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4355 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4356 will already have offset 0. */
4357 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
4359 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
4361 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4362 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4363 byte
= (subword_byte
% UNITS_PER_WORD
4364 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4367 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4368 so if it's become negative it will instead be very large.) */
4369 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4371 /* Convert from bytes to chunks of size value_bit. */
4372 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
4374 /* Re-pack the value. */
4376 if (VECTOR_MODE_P (outermode
))
4378 num_elem
= GET_MODE_NUNITS (outermode
);
4379 result_v
= rtvec_alloc (num_elem
);
4380 elems
= &RTVEC_ELT (result_v
, 0);
4381 outer_submode
= GET_MODE_INNER (outermode
);
4387 outer_submode
= outermode
;
4390 outer_class
= GET_MODE_CLASS (outer_submode
);
4391 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
4393 gcc_assert (elem_bitsize
% value_bit
== 0);
4394 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
4396 for (elem
= 0; elem
< num_elem
; elem
++)
4400 /* Vectors are stored in target memory order. (This is probably
4403 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4404 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4406 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4407 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4408 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4409 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4410 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4413 switch (outer_class
)
4416 case MODE_PARTIAL_INT
:
4418 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
4421 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4423 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
4424 for (; i
< elem_bitsize
; i
+= value_bit
)
4425 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
4426 << (i
- HOST_BITS_PER_WIDE_INT
));
4428 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4430 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4431 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
4432 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
4433 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
4440 case MODE_DECIMAL_FLOAT
:
4443 long tmp
[max_bitsize
/ 32];
4445 /* real_from_target wants its input in words affected by
4446 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4447 and use WORDS_BIG_ENDIAN instead; see the documentation
4448 of SUBREG in rtl.texi. */
4449 for (i
= 0; i
< max_bitsize
/ 32; i
++)
4451 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4454 if (WORDS_BIG_ENDIAN
)
4455 ibase
= elem_bitsize
- 1 - i
;
4458 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
4461 real_from_target (&r
, tmp
, outer_submode
);
4462 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
4470 if (VECTOR_MODE_P (outermode
))
4471 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
4476 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4477 Return 0 if no simplifications are possible. */
4479 simplify_subreg (enum machine_mode outermode
, rtx op
,
4480 enum machine_mode innermode
, unsigned int byte
)
4482 /* Little bit of sanity checking. */
4483 gcc_assert (innermode
!= VOIDmode
);
4484 gcc_assert (outermode
!= VOIDmode
);
4485 gcc_assert (innermode
!= BLKmode
);
4486 gcc_assert (outermode
!= BLKmode
);
4488 gcc_assert (GET_MODE (op
) == innermode
4489 || GET_MODE (op
) == VOIDmode
);
4491 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
4492 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4494 if (outermode
== innermode
&& !byte
)
4497 if (GET_CODE (op
) == CONST_INT
4498 || GET_CODE (op
) == CONST_DOUBLE
4499 || GET_CODE (op
) == CONST_VECTOR
)
4500 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
4502 /* Changing mode twice with SUBREG => just change it once,
4503 or not at all if changing back op starting mode. */
4504 if (GET_CODE (op
) == SUBREG
)
4506 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
4507 int final_offset
= byte
+ SUBREG_BYTE (op
);
4510 if (outermode
== innermostmode
4511 && byte
== 0 && SUBREG_BYTE (op
) == 0)
4512 return SUBREG_REG (op
);
4514 /* The SUBREG_BYTE represents offset, as if the value were stored
4515 in memory. Irritating exception is paradoxical subreg, where
4516 we define SUBREG_BYTE to be 0. On big endian machines, this
4517 value should be negative. For a moment, undo this exception. */
4518 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
4520 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
4521 if (WORDS_BIG_ENDIAN
)
4522 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4523 if (BYTES_BIG_ENDIAN
)
4524 final_offset
+= difference
% UNITS_PER_WORD
;
4526 if (SUBREG_BYTE (op
) == 0
4527 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
4529 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
4530 if (WORDS_BIG_ENDIAN
)
4531 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4532 if (BYTES_BIG_ENDIAN
)
4533 final_offset
+= difference
% UNITS_PER_WORD
;
4536 /* See whether resulting subreg will be paradoxical. */
4537 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
4539 /* In nonparadoxical subregs we can't handle negative offsets. */
4540 if (final_offset
< 0)
4542 /* Bail out in case resulting subreg would be incorrect. */
4543 if (final_offset
% GET_MODE_SIZE (outermode
)
4544 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
4550 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
4552 /* In paradoxical subreg, see if we are still looking on lower part.
4553 If so, our SUBREG_BYTE will be 0. */
4554 if (WORDS_BIG_ENDIAN
)
4555 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4556 if (BYTES_BIG_ENDIAN
)
4557 offset
+= difference
% UNITS_PER_WORD
;
4558 if (offset
== final_offset
)
4564 /* Recurse for further possible simplifications. */
4565 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
4569 if (validate_subreg (outermode
, innermostmode
,
4570 SUBREG_REG (op
), final_offset
))
4571 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
4575 /* Merge implicit and explicit truncations. */
4577 if (GET_CODE (op
) == TRUNCATE
4578 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
4579 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
4580 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
4581 GET_MODE (XEXP (op
, 0)));
4583 /* SUBREG of a hard register => just change the register number
4584 and/or mode. If the hard register is not valid in that mode,
4585 suppress this simplification. If the hard register is the stack,
4586 frame, or argument pointer, leave this as a SUBREG. */
4589 && REGNO (op
) < FIRST_PSEUDO_REGISTER
4590 #ifdef CANNOT_CHANGE_MODE_CLASS
4591 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
4592 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
4593 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
4595 && ((reload_completed
&& !frame_pointer_needed
)
4596 || (REGNO (op
) != FRAME_POINTER_REGNUM
4597 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4598 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
4601 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4602 && REGNO (op
) != ARG_POINTER_REGNUM
4604 && REGNO (op
) != STACK_POINTER_REGNUM
4605 && subreg_offset_representable_p (REGNO (op
), innermode
,
4608 unsigned int regno
= REGNO (op
);
4609 unsigned int final_regno
4610 = regno
+ subreg_regno_offset (regno
, innermode
, byte
, outermode
);
4612 /* ??? We do allow it if the current REG is not valid for
4613 its mode. This is a kludge to work around how float/complex
4614 arguments are passed on 32-bit SPARC and should be fixed. */
4615 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
4616 || ! HARD_REGNO_MODE_OK (regno
, innermode
))
4619 int final_offset
= byte
;
4621 /* Adjust offset for paradoxical subregs. */
4623 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
4625 int difference
= (GET_MODE_SIZE (innermode
)
4626 - GET_MODE_SIZE (outermode
));
4627 if (WORDS_BIG_ENDIAN
)
4628 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4629 if (BYTES_BIG_ENDIAN
)
4630 final_offset
+= difference
% UNITS_PER_WORD
;
4633 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
4635 /* Propagate original regno. We don't have any way to specify
4636 the offset inside original regno, so do so only for lowpart.
4637 The information is used only by alias analysis that can not
4638 grog partial register anyway. */
4640 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
4641 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
4646 /* If we have a SUBREG of a register that we are replacing and we are
4647 replacing it with a MEM, make a new MEM and try replacing the
4648 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4649 or if we would be widening it. */
4652 && ! mode_dependent_address_p (XEXP (op
, 0))
4653 /* Allow splitting of volatile memory references in case we don't
4654 have instruction to move the whole thing. */
4655 && (! MEM_VOLATILE_P (op
)
4656 || ! have_insn_for (SET
, innermode
))
4657 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
4658 return adjust_address_nv (op
, outermode
, byte
);
4660 /* Handle complex values represented as CONCAT
4661 of real and imaginary part. */
4662 if (GET_CODE (op
) == CONCAT
)
4664 unsigned int inner_size
, final_offset
;
4667 inner_size
= GET_MODE_UNIT_SIZE (innermode
);
4668 part
= byte
< inner_size
? XEXP (op
, 0) : XEXP (op
, 1);
4669 final_offset
= byte
% inner_size
;
4670 if (final_offset
+ GET_MODE_SIZE (outermode
) > inner_size
)
4673 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
4676 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
4677 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
4681 /* Optimize SUBREG truncations of zero and sign extended values. */
4682 if ((GET_CODE (op
) == ZERO_EXTEND
4683 || GET_CODE (op
) == SIGN_EXTEND
)
4684 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
4686 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
4688 /* If we're requesting the lowpart of a zero or sign extension,
4689 there are three possibilities. If the outermode is the same
4690 as the origmode, we can omit both the extension and the subreg.
4691 If the outermode is not larger than the origmode, we can apply
4692 the truncation without the extension. Finally, if the outermode
4693 is larger than the origmode, but both are integer modes, we
4694 can just extend to the appropriate mode. */
4697 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
4698 if (outermode
== origmode
)
4699 return XEXP (op
, 0);
4700 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
4701 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
4702 subreg_lowpart_offset (outermode
,
4704 if (SCALAR_INT_MODE_P (outermode
))
4705 return simplify_gen_unary (GET_CODE (op
), outermode
,
4706 XEXP (op
, 0), origmode
);
4709 /* A SUBREG resulting from a zero extension may fold to zero if
4710 it extracts higher bits that the ZERO_EXTEND's source bits. */
4711 if (GET_CODE (op
) == ZERO_EXTEND
4712 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
4713 return CONST0_RTX (outermode
);
4716 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4717 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4718 the outer subreg is effectively a truncation to the original mode. */
4719 if ((GET_CODE (op
) == LSHIFTRT
4720 || GET_CODE (op
) == ASHIFTRT
)
4721 && SCALAR_INT_MODE_P (outermode
)
4722 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4723 to avoid the possibility that an outer LSHIFTRT shifts by more
4724 than the sign extension's sign_bit_copies and introduces zeros
4725 into the high bits of the result. */
4726 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
4727 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4728 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
4729 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4730 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4731 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4732 return simplify_gen_binary (ASHIFTRT
, outermode
,
4733 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4735 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4736 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4737 the outer subreg is effectively a truncation to the original mode. */
4738 if ((GET_CODE (op
) == LSHIFTRT
4739 || GET_CODE (op
) == ASHIFTRT
)
4740 && SCALAR_INT_MODE_P (outermode
)
4741 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4742 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4743 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4744 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4745 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4746 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4747 return simplify_gen_binary (LSHIFTRT
, outermode
,
4748 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4750 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4751 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4752 the outer subreg is effectively a truncation to the original mode. */
4753 if (GET_CODE (op
) == ASHIFT
4754 && SCALAR_INT_MODE_P (outermode
)
4755 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4756 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4757 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4758 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
4759 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4760 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4761 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4762 return simplify_gen_binary (ASHIFT
, outermode
,
4763 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4768 /* Make a SUBREG operation or equivalent if it folds. */
4771 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
4772 enum machine_mode innermode
, unsigned int byte
)
4776 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
4780 if (GET_CODE (op
) == SUBREG
4781 || GET_CODE (op
) == CONCAT
4782 || GET_MODE (op
) == VOIDmode
)
4785 if (validate_subreg (outermode
, innermode
, op
, byte
))
4786 return gen_rtx_SUBREG (outermode
, op
, byte
);
4791 /* Simplify X, an rtx expression.
4793 Return the simplified expression or NULL if no simplifications
4796 This is the preferred entry point into the simplification routines;
4797 however, we still allow passes to call the more specific routines.
4799 Right now GCC has three (yes, three) major bodies of RTL simplification
4800 code that need to be unified.
4802 1. fold_rtx in cse.c. This code uses various CSE specific
4803 information to aid in RTL simplification.
4805 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4806 it uses combine specific information to aid in RTL
4809 3. The routines in this file.
4812 Long term we want to only have one body of simplification code; to
4813 get to that state I recommend the following steps:
4815 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4816 which are not pass dependent state into these routines.
4818 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4819 use this routine whenever possible.
4821 3. Allow for pass dependent state to be provided to these
4822 routines and add simplifications based on the pass dependent
4823 state. Remove code from cse.c & combine.c that becomes
4826 It will take time, but ultimately the compiler will be easier to
4827 maintain and improve. It's totally silly that when we add a
4828 simplification that it needs to be added to 4 places (3 for RTL
4829 simplification and 1 for tree simplification. */
4832 simplify_rtx (rtx x
)
4834 enum rtx_code code
= GET_CODE (x
);
4835 enum machine_mode mode
= GET_MODE (x
);
4837 switch (GET_RTX_CLASS (code
))
4840 return simplify_unary_operation (code
, mode
,
4841 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
4842 case RTX_COMM_ARITH
:
4843 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
4844 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
4846 /* Fall through.... */
4849 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
4852 case RTX_BITFIELD_OPS
:
4853 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
4854 XEXP (x
, 0), XEXP (x
, 1),
4858 case RTX_COMM_COMPARE
:
4859 return simplify_relational_operation (code
, mode
,
4860 ((GET_MODE (XEXP (x
, 0))
4862 ? GET_MODE (XEXP (x
, 0))
4863 : GET_MODE (XEXP (x
, 1))),
4869 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
4870 GET_MODE (SUBREG_REG (x
)),
4877 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4878 if (GET_CODE (XEXP (x
, 0)) == HIGH
4879 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))