1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static bool plus_minus_operand_p (rtx
);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
61 enum machine_mode
, rtx
, rtx
);
62 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
63 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode
, rtx i
)
71 return gen_int_mode (- INTVAL (i
), mode
);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode
, rtx x
)
80 unsigned HOST_WIDE_INT val
;
83 if (GET_MODE_CLASS (mode
) != MODE_INT
)
86 width
= GET_MODE_BITSIZE (mode
);
90 if (width
<= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x
) == CONST_INT
)
93 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x
) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x
) == 0)
97 val
= CONST_DOUBLE_HIGH (x
);
98 width
-= HOST_BITS_PER_WIDE_INT
;
103 if (width
< HOST_BITS_PER_WIDE_INT
)
104 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
105 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
117 /* If this simplifies, do it. */
118 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0
, op1
))
125 tem
= op0
, op0
= op1
, op1
= tem
;
127 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x
)
136 enum machine_mode cmode
;
137 HOST_WIDE_INT offset
= 0;
139 switch (GET_CODE (x
))
145 /* Handle float extensions of constant pool references. */
147 c
= avoid_constant_pool_reference (tmp
);
148 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
152 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
161 if (GET_MODE (x
) == BLKmode
)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr
= targetm
.delegitimize_address (addr
);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr
) == CONST
171 && GET_CODE (XEXP (addr
, 0)) == PLUS
172 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST_INT
)
174 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
175 addr
= XEXP (XEXP (addr
, 0), 0);
178 if (GET_CODE (addr
) == LO_SUM
)
179 addr
= XEXP (addr
, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr
) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr
))
186 c
= get_pool_constant (addr
);
187 cmode
= get_pool_mode (addr
);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset
!= 0 || cmode
!= GET_MODE (x
))
194 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
195 if (tem
&& CONSTANT_P (tem
))
205 /* Return true if X is a MEM referencing the constant pool. */
208 constant_pool_reference_p (rtx x
)
210 return avoid_constant_pool_reference (x
) != x
;
213 /* Make a unary operation by first seeing if it folds and otherwise making
214 the specified operation. */
217 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
218 enum machine_mode op_mode
)
222 /* If this simplifies, use it. */
223 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
226 return gen_rtx_fmt_e (code
, mode
, op
);
229 /* Likewise for ternary operations. */
232 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
233 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
237 /* If this simplifies, use it. */
238 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
242 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
245 /* Likewise, for relational operations.
246 CMP_MODE specifies mode comparison is done in. */
249 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
250 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
254 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
258 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
261 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
262 resulting RTX. Return a new RTX which is as simplified as possible. */
265 simplify_replace_rtx (rtx x
, rtx old_rtx
, rtx new_rtx
)
267 enum rtx_code code
= GET_CODE (x
);
268 enum machine_mode mode
= GET_MODE (x
);
269 enum machine_mode op_mode
;
272 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
273 to build a new expression substituting recursively. If we can't do
274 anything, return our input. */
279 switch (GET_RTX_CLASS (code
))
283 op_mode
= GET_MODE (op0
);
284 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
285 if (op0
== XEXP (x
, 0))
287 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
291 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
292 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
293 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
295 return simplify_gen_binary (code
, mode
, op0
, op1
);
298 case RTX_COMM_COMPARE
:
301 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
302 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
303 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
304 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
306 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
309 case RTX_BITFIELD_OPS
:
311 op_mode
= GET_MODE (op0
);
312 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
313 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
314 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
315 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
317 if (op_mode
== VOIDmode
)
318 op_mode
= GET_MODE (op0
);
319 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
322 /* The only case we try to handle is a SUBREG. */
325 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
326 if (op0
== SUBREG_REG (x
))
328 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
329 GET_MODE (SUBREG_REG (x
)),
331 return op0
? op0
: x
;
338 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
339 if (op0
== XEXP (x
, 0))
341 return replace_equiv_address_nv (x
, op0
);
343 else if (code
== LO_SUM
)
345 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
346 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
348 /* (lo_sum (high x) x) -> x */
349 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
352 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
354 return gen_rtx_LO_SUM (mode
, op0
, op1
);
356 else if (code
== REG
)
358 if (rtx_equal_p (x
, old_rtx
))
369 /* Try to simplify a unary operation CODE whose output mode is to be
370 MODE with input operand OP whose mode was originally OP_MODE.
371 Return zero if no simplification can be made. */
373 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
374 rtx op
, enum machine_mode op_mode
)
378 if (GET_CODE (op
) == CONST
)
381 trueop
= avoid_constant_pool_reference (op
);
383 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
387 return simplify_unary_operation_1 (code
, mode
, op
);
390 /* Perform some simplifications we can do even if the operands
393 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
395 enum rtx_code reversed
;
401 /* (not (not X)) == X. */
402 if (GET_CODE (op
) == NOT
)
405 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
406 comparison is all ones. */
407 if (COMPARISON_P (op
)
408 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
409 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
410 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
411 XEXP (op
, 0), XEXP (op
, 1));
413 /* (not (plus X -1)) can become (neg X). */
414 if (GET_CODE (op
) == PLUS
415 && XEXP (op
, 1) == constm1_rtx
)
416 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
418 /* Similarly, (not (neg X)) is (plus X -1). */
419 if (GET_CODE (op
) == NEG
)
420 return plus_constant (XEXP (op
, 0), -1);
422 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
423 if (GET_CODE (op
) == XOR
424 && GET_CODE (XEXP (op
, 1)) == CONST_INT
425 && (temp
= simplify_unary_operation (NOT
, mode
,
426 XEXP (op
, 1), mode
)) != 0)
427 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
429 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
430 if (GET_CODE (op
) == PLUS
431 && GET_CODE (XEXP (op
, 1)) == CONST_INT
432 && mode_signbit_p (mode
, XEXP (op
, 1))
433 && (temp
= simplify_unary_operation (NOT
, mode
,
434 XEXP (op
, 1), mode
)) != 0)
435 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
438 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
439 operands other than 1, but that is not valid. We could do a
440 similar simplification for (not (lshiftrt C X)) where C is
441 just the sign bit, but this doesn't seem common enough to
443 if (GET_CODE (op
) == ASHIFT
444 && XEXP (op
, 0) == const1_rtx
)
446 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
447 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
450 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
451 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
452 so we can perform the above simplification. */
454 if (STORE_FLAG_VALUE
== -1
455 && GET_CODE (op
) == ASHIFTRT
456 && GET_CODE (XEXP (op
, 1)) == CONST_INT
457 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
458 return simplify_gen_relational (GE
, mode
, VOIDmode
,
459 XEXP (op
, 0), const0_rtx
);
462 if (GET_CODE (op
) == SUBREG
463 && subreg_lowpart_p (op
)
464 && (GET_MODE_SIZE (GET_MODE (op
))
465 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
466 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
467 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
469 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
472 x
= gen_rtx_ROTATE (inner_mode
,
473 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
475 XEXP (SUBREG_REG (op
), 1));
476 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
479 /* Apply De Morgan's laws to reduce number of patterns for machines
480 with negating logical insns (and-not, nand, etc.). If result has
481 only one NOT, put it first, since that is how the patterns are
484 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
486 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
487 enum machine_mode op_mode
;
489 op_mode
= GET_MODE (in1
);
490 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
492 op_mode
= GET_MODE (in2
);
493 if (op_mode
== VOIDmode
)
495 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
497 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
500 in2
= in1
; in1
= tem
;
503 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
509 /* (neg (neg X)) == X. */
510 if (GET_CODE (op
) == NEG
)
513 /* (neg (plus X 1)) can become (not X). */
514 if (GET_CODE (op
) == PLUS
515 && XEXP (op
, 1) == const1_rtx
)
516 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
518 /* Similarly, (neg (not X)) is (plus X 1). */
519 if (GET_CODE (op
) == NOT
)
520 return plus_constant (XEXP (op
, 0), 1);
522 /* (neg (minus X Y)) can become (minus Y X). This transformation
523 isn't safe for modes with signed zeros, since if X and Y are
524 both +0, (minus Y X) is the same as (minus X Y). If the
525 rounding mode is towards +infinity (or -infinity) then the two
526 expressions will be rounded differently. */
527 if (GET_CODE (op
) == MINUS
528 && !HONOR_SIGNED_ZEROS (mode
)
529 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
530 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
532 if (GET_CODE (op
) == PLUS
533 && !HONOR_SIGNED_ZEROS (mode
)
534 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
536 /* (neg (plus A C)) is simplified to (minus -C A). */
537 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
538 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
540 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
542 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
545 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
546 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
547 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
550 /* (neg (mult A B)) becomes (mult (neg A) B).
551 This works even for floating-point values. */
552 if (GET_CODE (op
) == MULT
553 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
555 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
556 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
559 /* NEG commutes with ASHIFT since it is multiplication. Only do
560 this if we can then eliminate the NEG (e.g., if the operand
562 if (GET_CODE (op
) == ASHIFT
)
564 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
566 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
569 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
570 C is equal to the width of MODE minus 1. */
571 if (GET_CODE (op
) == ASHIFTRT
572 && GET_CODE (XEXP (op
, 1)) == CONST_INT
573 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
574 return simplify_gen_binary (LSHIFTRT
, mode
,
575 XEXP (op
, 0), XEXP (op
, 1));
577 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
578 C is equal to the width of MODE minus 1. */
579 if (GET_CODE (op
) == LSHIFTRT
580 && GET_CODE (XEXP (op
, 1)) == CONST_INT
581 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
582 return simplify_gen_binary (ASHIFTRT
, mode
,
583 XEXP (op
, 0), XEXP (op
, 1));
585 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
586 if (GET_CODE (op
) == XOR
587 && XEXP (op
, 1) == const1_rtx
588 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
589 return plus_constant (XEXP (op
, 0), -1);
591 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
592 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
593 if (GET_CODE (op
) == LT
594 && XEXP (op
, 1) == const0_rtx
)
596 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
597 int isize
= GET_MODE_BITSIZE (inner
);
598 if (STORE_FLAG_VALUE
== 1)
600 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
601 GEN_INT (isize
- 1));
604 if (GET_MODE_BITSIZE (mode
) > isize
)
605 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
606 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
608 else if (STORE_FLAG_VALUE
== -1)
610 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
611 GEN_INT (isize
- 1));
614 if (GET_MODE_BITSIZE (mode
) > isize
)
615 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
616 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
622 /* We can't handle truncation to a partial integer mode here
623 because we don't know the real bitsize of the partial
625 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
628 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
629 if ((GET_CODE (op
) == SIGN_EXTEND
630 || GET_CODE (op
) == ZERO_EXTEND
)
631 && GET_MODE (XEXP (op
, 0)) == mode
)
634 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
635 (OP:SI foo:SI) if OP is NEG or ABS. */
636 if ((GET_CODE (op
) == ABS
637 || GET_CODE (op
) == NEG
)
638 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
639 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
640 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
641 return simplify_gen_unary (GET_CODE (op
), mode
,
642 XEXP (XEXP (op
, 0), 0), mode
);
644 /* (truncate:A (subreg:B (truncate:C X) 0)) is
646 if (GET_CODE (op
) == SUBREG
647 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
648 && subreg_lowpart_p (op
))
649 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
650 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
652 /* If we know that the value is already truncated, we can
653 replace the TRUNCATE with a SUBREG. Note that this is also
654 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
655 modes we just have to apply a different definition for
656 truncation. But don't do this for an (LSHIFTRT (MULT ...))
657 since this will cause problems with the umulXi3_highpart
659 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
660 GET_MODE_BITSIZE (GET_MODE (op
)))
661 ? (num_sign_bit_copies (op
, GET_MODE (op
))
662 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op
))
663 - GET_MODE_BITSIZE (mode
)))
664 : truncated_to_mode (mode
, op
))
665 && ! (GET_CODE (op
) == LSHIFTRT
666 && GET_CODE (XEXP (op
, 0)) == MULT
))
667 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
669 /* A truncate of a comparison can be replaced with a subreg if
670 STORE_FLAG_VALUE permits. This is like the previous test,
671 but it works even if the comparison is done in a mode larger
672 than HOST_BITS_PER_WIDE_INT. */
673 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
675 && ((HOST_WIDE_INT
) STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
676 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
680 if (DECIMAL_FLOAT_MODE_P (mode
))
683 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
684 if (GET_CODE (op
) == FLOAT_EXTEND
685 && GET_MODE (XEXP (op
, 0)) == mode
)
688 /* (float_truncate:SF (float_truncate:DF foo:XF))
689 = (float_truncate:SF foo:XF).
690 This may eliminate double rounding, so it is unsafe.
692 (float_truncate:SF (float_extend:XF foo:DF))
693 = (float_truncate:SF foo:DF).
695 (float_truncate:DF (float_extend:XF foo:SF))
696 = (float_extend:SF foo:DF). */
697 if ((GET_CODE (op
) == FLOAT_TRUNCATE
698 && flag_unsafe_math_optimizations
)
699 || GET_CODE (op
) == FLOAT_EXTEND
)
700 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
702 > GET_MODE_SIZE (mode
)
703 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
707 /* (float_truncate (float x)) is (float x) */
708 if (GET_CODE (op
) == FLOAT
709 && (flag_unsafe_math_optimizations
710 || ((unsigned)significand_size (GET_MODE (op
))
711 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
712 - num_sign_bit_copies (XEXP (op
, 0),
713 GET_MODE (XEXP (op
, 0)))))))
714 return simplify_gen_unary (FLOAT
, mode
,
716 GET_MODE (XEXP (op
, 0)));
718 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
719 (OP:SF foo:SF) if OP is NEG or ABS. */
720 if ((GET_CODE (op
) == ABS
721 || GET_CODE (op
) == NEG
)
722 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
723 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
724 return simplify_gen_unary (GET_CODE (op
), mode
,
725 XEXP (XEXP (op
, 0), 0), mode
);
727 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
728 is (float_truncate:SF x). */
729 if (GET_CODE (op
) == SUBREG
730 && subreg_lowpart_p (op
)
731 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
732 return SUBREG_REG (op
);
736 if (DECIMAL_FLOAT_MODE_P (mode
))
739 /* (float_extend (float_extend x)) is (float_extend x)
741 (float_extend (float x)) is (float x) assuming that double
742 rounding can't happen.
744 if (GET_CODE (op
) == FLOAT_EXTEND
745 || (GET_CODE (op
) == FLOAT
746 && ((unsigned)significand_size (GET_MODE (op
))
747 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
748 - num_sign_bit_copies (XEXP (op
, 0),
749 GET_MODE (XEXP (op
, 0)))))))
750 return simplify_gen_unary (GET_CODE (op
), mode
,
752 GET_MODE (XEXP (op
, 0)));
757 /* (abs (neg <foo>)) -> (abs <foo>) */
758 if (GET_CODE (op
) == NEG
)
759 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
760 GET_MODE (XEXP (op
, 0)));
762 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
764 if (GET_MODE (op
) == VOIDmode
)
767 /* If operand is something known to be positive, ignore the ABS. */
768 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
769 || ((GET_MODE_BITSIZE (GET_MODE (op
))
770 <= HOST_BITS_PER_WIDE_INT
)
771 && ((nonzero_bits (op
, GET_MODE (op
))
773 << (GET_MODE_BITSIZE (GET_MODE (op
)) - 1)))
777 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
778 if (num_sign_bit_copies (op
, mode
) == GET_MODE_BITSIZE (mode
))
779 return gen_rtx_NEG (mode
, op
);
784 /* (ffs (*_extend <X>)) = (ffs <X>) */
785 if (GET_CODE (op
) == SIGN_EXTEND
786 || GET_CODE (op
) == ZERO_EXTEND
)
787 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
788 GET_MODE (XEXP (op
, 0)));
793 /* (pop* (zero_extend <X>)) = (pop* <X>) */
794 if (GET_CODE (op
) == ZERO_EXTEND
)
795 return simplify_gen_unary (code
, mode
, XEXP (op
, 0),
796 GET_MODE (XEXP (op
, 0)));
800 /* (float (sign_extend <X>)) = (float <X>). */
801 if (GET_CODE (op
) == SIGN_EXTEND
)
802 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
803 GET_MODE (XEXP (op
, 0)));
807 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
808 becomes just the MINUS if its mode is MODE. This allows
809 folding switch statements on machines using casesi (such as
811 if (GET_CODE (op
) == TRUNCATE
812 && GET_MODE (XEXP (op
, 0)) == mode
813 && GET_CODE (XEXP (op
, 0)) == MINUS
814 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
815 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
818 /* Check for a sign extension of a subreg of a promoted
819 variable, where the promotion is sign-extended, and the
820 target mode is the same as the variable's promotion. */
821 if (GET_CODE (op
) == SUBREG
822 && SUBREG_PROMOTED_VAR_P (op
)
823 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
824 && GET_MODE (XEXP (op
, 0)) == mode
)
827 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
828 if (! POINTERS_EXTEND_UNSIGNED
829 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
831 || (GET_CODE (op
) == SUBREG
832 && REG_P (SUBREG_REG (op
))
833 && REG_POINTER (SUBREG_REG (op
))
834 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
835 return convert_memory_address (Pmode
, op
);
840 /* Check for a zero extension of a subreg of a promoted
841 variable, where the promotion is zero-extended, and the
842 target mode is the same as the variable's promotion. */
843 if (GET_CODE (op
) == SUBREG
844 && SUBREG_PROMOTED_VAR_P (op
)
845 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
846 && GET_MODE (XEXP (op
, 0)) == mode
)
849 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
850 if (POINTERS_EXTEND_UNSIGNED
> 0
851 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
853 || (GET_CODE (op
) == SUBREG
854 && REG_P (SUBREG_REG (op
))
855 && REG_POINTER (SUBREG_REG (op
))
856 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
857 return convert_memory_address (Pmode
, op
);
868 /* Try to compute the value of a unary operation CODE whose output mode is to
869 be MODE with input operand OP whose mode was originally OP_MODE.
870 Return zero if the value cannot be computed. */
872 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
873 rtx op
, enum machine_mode op_mode
)
875 unsigned int width
= GET_MODE_BITSIZE (mode
);
877 if (code
== VEC_DUPLICATE
)
879 gcc_assert (VECTOR_MODE_P (mode
));
880 if (GET_MODE (op
) != VOIDmode
)
882 if (!VECTOR_MODE_P (GET_MODE (op
)))
883 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
885 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
888 if (GET_CODE (op
) == CONST_INT
|| GET_CODE (op
) == CONST_DOUBLE
889 || GET_CODE (op
) == CONST_VECTOR
)
891 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
892 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
893 rtvec v
= rtvec_alloc (n_elts
);
896 if (GET_CODE (op
) != CONST_VECTOR
)
897 for (i
= 0; i
< n_elts
; i
++)
898 RTVEC_ELT (v
, i
) = op
;
901 enum machine_mode inmode
= GET_MODE (op
);
902 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
903 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
905 gcc_assert (in_n_elts
< n_elts
);
906 gcc_assert ((n_elts
% in_n_elts
) == 0);
907 for (i
= 0; i
< n_elts
; i
++)
908 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
910 return gen_rtx_CONST_VECTOR (mode
, v
);
914 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
916 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
917 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
918 enum machine_mode opmode
= GET_MODE (op
);
919 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
920 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
921 rtvec v
= rtvec_alloc (n_elts
);
924 gcc_assert (op_n_elts
== n_elts
);
925 for (i
= 0; i
< n_elts
; i
++)
927 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
928 CONST_VECTOR_ELT (op
, i
),
929 GET_MODE_INNER (opmode
));
932 RTVEC_ELT (v
, i
) = x
;
934 return gen_rtx_CONST_VECTOR (mode
, v
);
937 /* The order of these tests is critical so that, for example, we don't
938 check the wrong mode (input vs. output) for a conversion operation,
939 such as FIX. At some point, this should be simplified. */
941 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
942 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
944 HOST_WIDE_INT hv
, lv
;
947 if (GET_CODE (op
) == CONST_INT
)
948 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
950 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
952 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
953 d
= real_value_truncate (mode
, d
);
954 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
956 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
957 && (GET_CODE (op
) == CONST_DOUBLE
958 || GET_CODE (op
) == CONST_INT
))
960 HOST_WIDE_INT hv
, lv
;
963 if (GET_CODE (op
) == CONST_INT
)
964 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
966 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
968 if (op_mode
== VOIDmode
)
970 /* We don't know how to interpret negative-looking numbers in
971 this case, so don't try to fold those. */
975 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
978 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
980 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
981 d
= real_value_truncate (mode
, d
);
982 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
985 if (GET_CODE (op
) == CONST_INT
986 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
988 HOST_WIDE_INT arg0
= INTVAL (op
);
1002 val
= (arg0
>= 0 ? arg0
: - arg0
);
1006 /* Don't use ffs here. Instead, get low order bit and then its
1007 number. If arg0 is zero, this will return 0, as desired. */
1008 arg0
&= GET_MODE_MASK (mode
);
1009 val
= exact_log2 (arg0
& (- arg0
)) + 1;
1013 arg0
&= GET_MODE_MASK (mode
);
1014 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1017 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
1021 arg0
&= GET_MODE_MASK (mode
);
1024 /* Even if the value at zero is undefined, we have to come
1025 up with some replacement. Seems good enough. */
1026 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1027 val
= GET_MODE_BITSIZE (mode
);
1030 val
= exact_log2 (arg0
& -arg0
);
1034 arg0
&= GET_MODE_MASK (mode
);
1037 val
++, arg0
&= arg0
- 1;
1041 arg0
&= GET_MODE_MASK (mode
);
1044 val
++, arg0
&= arg0
- 1;
1056 /* When zero-extending a CONST_INT, we need to know its
1058 gcc_assert (op_mode
!= VOIDmode
);
1059 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1061 /* If we were really extending the mode,
1062 we would have to distinguish between zero-extension
1063 and sign-extension. */
1064 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1067 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1068 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1074 if (op_mode
== VOIDmode
)
1076 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1078 /* If we were really extending the mode,
1079 we would have to distinguish between zero-extension
1080 and sign-extension. */
1081 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1084 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1087 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1089 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
1090 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1098 case FLOAT_TRUNCATE
:
1108 return gen_int_mode (val
, mode
);
1111 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1112 for a DImode operation on a CONST_INT. */
1113 else if (GET_MODE (op
) == VOIDmode
1114 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1115 && (GET_CODE (op
) == CONST_DOUBLE
1116 || GET_CODE (op
) == CONST_INT
))
1118 unsigned HOST_WIDE_INT l1
, lv
;
1119 HOST_WIDE_INT h1
, hv
;
1121 if (GET_CODE (op
) == CONST_DOUBLE
)
1122 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1124 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1134 neg_double (l1
, h1
, &lv
, &hv
);
1139 neg_double (l1
, h1
, &lv
, &hv
);
1151 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
1154 lv
= exact_log2 (l1
& -l1
) + 1;
1160 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
1161 - HOST_BITS_PER_WIDE_INT
;
1163 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
1164 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1165 lv
= GET_MODE_BITSIZE (mode
);
1171 lv
= exact_log2 (l1
& -l1
);
1173 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
1174 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1175 lv
= GET_MODE_BITSIZE (mode
);
1198 /* This is just a change-of-mode, so do nothing. */
1203 gcc_assert (op_mode
!= VOIDmode
);
1205 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1209 lv
= l1
& GET_MODE_MASK (op_mode
);
1213 if (op_mode
== VOIDmode
1214 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1218 lv
= l1
& GET_MODE_MASK (op_mode
);
1219 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
1220 && (lv
& ((HOST_WIDE_INT
) 1
1221 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
1222 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1224 hv
= HWI_SIGN_EXTEND (lv
);
1235 return immed_double_const (lv
, hv
, mode
);
1238 else if (GET_CODE (op
) == CONST_DOUBLE
1239 && SCALAR_FLOAT_MODE_P (mode
))
1241 REAL_VALUE_TYPE d
, t
;
1242 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1247 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1249 real_sqrt (&t
, mode
, &d
);
1253 d
= REAL_VALUE_ABS (d
);
1256 d
= REAL_VALUE_NEGATE (d
);
1258 case FLOAT_TRUNCATE
:
1259 d
= real_value_truncate (mode
, d
);
1262 /* All this does is change the mode. */
1265 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1272 real_to_target (tmp
, &d
, GET_MODE (op
));
1273 for (i
= 0; i
< 4; i
++)
1275 real_from_target (&d
, tmp
, mode
);
1281 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1284 else if (GET_CODE (op
) == CONST_DOUBLE
1285 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1286 && GET_MODE_CLASS (mode
) == MODE_INT
1287 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1289 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1290 operators are intentionally left unspecified (to ease implementation
1291 by target backends), for consistency, this routine implements the
1292 same semantics for constant folding as used by the middle-end. */
1294 /* This was formerly used only for non-IEEE float.
1295 eggert@twinsun.com says it is safe for IEEE also. */
1296 HOST_WIDE_INT xh
, xl
, th
, tl
;
1297 REAL_VALUE_TYPE x
, t
;
1298 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1302 if (REAL_VALUE_ISNAN (x
))
1305 /* Test against the signed upper bound. */
1306 if (width
> HOST_BITS_PER_WIDE_INT
)
1308 th
= ((unsigned HOST_WIDE_INT
) 1
1309 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1315 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1317 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1318 if (REAL_VALUES_LESS (t
, x
))
1325 /* Test against the signed lower bound. */
1326 if (width
> HOST_BITS_PER_WIDE_INT
)
1328 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1334 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1336 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1337 if (REAL_VALUES_LESS (x
, t
))
1343 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1347 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1350 /* Test against the unsigned upper bound. */
1351 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1356 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1358 th
= ((unsigned HOST_WIDE_INT
) 1
1359 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1365 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1367 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1368 if (REAL_VALUES_LESS (t
, x
))
1375 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1381 return immed_double_const (xl
, xh
, mode
);
1387 /* Subroutine of simplify_binary_operation to simplify a commutative,
1388 associative binary operation CODE with result mode MODE, operating
1389 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1390 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1391 canonicalization is possible. */
1394 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1399 /* Linearize the operator to the left. */
1400 if (GET_CODE (op1
) == code
)
1402 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1403 if (GET_CODE (op0
) == code
)
1405 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1406 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1409 /* "a op (b op c)" becomes "(b op c) op a". */
1410 if (! swap_commutative_operands_p (op1
, op0
))
1411 return simplify_gen_binary (code
, mode
, op1
, op0
);
1418 if (GET_CODE (op0
) == code
)
1420 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1421 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1423 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1424 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1427 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1428 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1429 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1430 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1432 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1434 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1435 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1436 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1437 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1439 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1446 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1447 and OP1. Return 0 if no simplification is possible.
1449 Don't use this for relational operations such as EQ or LT.
1450 Use simplify_relational_operation instead. */
1452 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1455 rtx trueop0
, trueop1
;
1458 /* Relational operations don't work here. We must know the mode
1459 of the operands in order to do the comparison correctly.
1460 Assuming a full word can give incorrect results.
1461 Consider comparing 128 with -128 in QImode. */
1462 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1463 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1465 /* Make sure the constant is second. */
1466 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1467 && swap_commutative_operands_p (op0
, op1
))
1469 tem
= op0
, op0
= op1
, op1
= tem
;
1472 trueop0
= avoid_constant_pool_reference (op0
);
1473 trueop1
= avoid_constant_pool_reference (op1
);
1475 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1478 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1481 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1482 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1483 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1484 actual constants. */
1487 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1488 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1490 rtx tem
, reversed
, opleft
, opright
;
1492 unsigned int width
= GET_MODE_BITSIZE (mode
);
1494 /* Even if we can't compute a constant result,
1495 there are some cases worth simplifying. */
1500 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1501 when x is NaN, infinite, or finite and nonzero. They aren't
1502 when x is -0 and the rounding mode is not towards -infinity,
1503 since (-0) + 0 is then 0. */
1504 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1507 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1508 transformations are safe even for IEEE. */
1509 if (GET_CODE (op0
) == NEG
)
1510 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1511 else if (GET_CODE (op1
) == NEG
)
1512 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1514 /* (~a) + 1 -> -a */
1515 if (INTEGRAL_MODE_P (mode
)
1516 && GET_CODE (op0
) == NOT
1517 && trueop1
== const1_rtx
)
1518 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1520 /* Handle both-operands-constant cases. We can only add
1521 CONST_INTs to constants since the sum of relocatable symbols
1522 can't be handled by most assemblers. Don't add CONST_INT
1523 to CONST_INT since overflow won't be computed properly if wider
1524 than HOST_BITS_PER_WIDE_INT. */
1526 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1527 && GET_CODE (op1
) == CONST_INT
)
1528 return plus_constant (op0
, INTVAL (op1
));
1529 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1530 && GET_CODE (op0
) == CONST_INT
)
1531 return plus_constant (op1
, INTVAL (op0
));
1533 /* See if this is something like X * C - X or vice versa or
1534 if the multiplication is written as a shift. If so, we can
1535 distribute and make a new multiply, shift, or maybe just
1536 have X (if C is 2 in the example above). But don't make
1537 something more expensive than we had before. */
1539 if (SCALAR_INT_MODE_P (mode
))
1541 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1542 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1543 rtx lhs
= op0
, rhs
= op1
;
1545 if (GET_CODE (lhs
) == NEG
)
1549 lhs
= XEXP (lhs
, 0);
1551 else if (GET_CODE (lhs
) == MULT
1552 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1554 coeff0l
= INTVAL (XEXP (lhs
, 1));
1555 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1556 lhs
= XEXP (lhs
, 0);
1558 else if (GET_CODE (lhs
) == ASHIFT
1559 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1560 && INTVAL (XEXP (lhs
, 1)) >= 0
1561 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1563 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1565 lhs
= XEXP (lhs
, 0);
1568 if (GET_CODE (rhs
) == NEG
)
1572 rhs
= XEXP (rhs
, 0);
1574 else if (GET_CODE (rhs
) == MULT
1575 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1577 coeff1l
= INTVAL (XEXP (rhs
, 1));
1578 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1579 rhs
= XEXP (rhs
, 0);
1581 else if (GET_CODE (rhs
) == ASHIFT
1582 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1583 && INTVAL (XEXP (rhs
, 1)) >= 0
1584 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1586 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1588 rhs
= XEXP (rhs
, 0);
1591 if (rtx_equal_p (lhs
, rhs
))
1593 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1595 unsigned HOST_WIDE_INT l
;
1598 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1599 coeff
= immed_double_const (l
, h
, mode
);
1601 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1602 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1607 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1608 if ((GET_CODE (op1
) == CONST_INT
1609 || GET_CODE (op1
) == CONST_DOUBLE
)
1610 && GET_CODE (op0
) == XOR
1611 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1612 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1613 && mode_signbit_p (mode
, op1
))
1614 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1615 simplify_gen_binary (XOR
, mode
, op1
,
1618 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1619 if (GET_CODE (op0
) == MULT
1620 && GET_CODE (XEXP (op0
, 0)) == NEG
)
1624 in1
= XEXP (XEXP (op0
, 0), 0);
1625 in2
= XEXP (op0
, 1);
1626 return simplify_gen_binary (MINUS
, mode
, op1
,
1627 simplify_gen_binary (MULT
, mode
,
1631 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1632 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1634 if (COMPARISON_P (op0
)
1635 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
1636 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
1637 && (reversed
= reversed_comparison (op0
, mode
)))
1639 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
1641 /* If one of the operands is a PLUS or a MINUS, see if we can
1642 simplify this by the associative law.
1643 Don't use the associative law for floating point.
1644 The inaccuracy makes it nonassociative,
1645 and subtle programs can break if operations are associated. */
1647 if (INTEGRAL_MODE_P (mode
)
1648 && (plus_minus_operand_p (op0
)
1649 || plus_minus_operand_p (op1
))
1650 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1653 /* Reassociate floating point addition only when the user
1654 specifies unsafe math optimizations. */
1655 if (FLOAT_MODE_P (mode
)
1656 && flag_unsafe_math_optimizations
)
1658 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1666 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1667 using cc0, in which case we want to leave it as a COMPARE
1668 so we can distinguish it from a register-register-copy.
1670 In IEEE floating point, x-0 is not the same as x. */
1672 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1673 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1674 && trueop1
== CONST0_RTX (mode
))
1678 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1679 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1680 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1681 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1683 rtx xop00
= XEXP (op0
, 0);
1684 rtx xop10
= XEXP (op1
, 0);
1687 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1689 if (REG_P (xop00
) && REG_P (xop10
)
1690 && GET_MODE (xop00
) == GET_MODE (xop10
)
1691 && REGNO (xop00
) == REGNO (xop10
)
1692 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1693 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1700 /* We can't assume x-x is 0 even with non-IEEE floating point,
1701 but since it is zero except in very strange circumstances, we
1702 will treat it as zero with -funsafe-math-optimizations. */
1703 if (rtx_equal_p (trueop0
, trueop1
)
1704 && ! side_effects_p (op0
)
1705 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1706 return CONST0_RTX (mode
);
1708 /* Change subtraction from zero into negation. (0 - x) is the
1709 same as -x when x is NaN, infinite, or finite and nonzero.
1710 But if the mode has signed zeros, and does not round towards
1711 -infinity, then 0 - 0 is 0, not -0. */
1712 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1713 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1715 /* (-1 - a) is ~a. */
1716 if (trueop0
== constm1_rtx
)
1717 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1719 /* Subtracting 0 has no effect unless the mode has signed zeros
1720 and supports rounding towards -infinity. In such a case,
1722 if (!(HONOR_SIGNED_ZEROS (mode
)
1723 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1724 && trueop1
== CONST0_RTX (mode
))
1727 /* See if this is something like X * C - X or vice versa or
1728 if the multiplication is written as a shift. If so, we can
1729 distribute and make a new multiply, shift, or maybe just
1730 have X (if C is 2 in the example above). But don't make
1731 something more expensive than we had before. */
1733 if (SCALAR_INT_MODE_P (mode
))
1735 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1736 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1737 rtx lhs
= op0
, rhs
= op1
;
1739 if (GET_CODE (lhs
) == NEG
)
1743 lhs
= XEXP (lhs
, 0);
1745 else if (GET_CODE (lhs
) == MULT
1746 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1748 coeff0l
= INTVAL (XEXP (lhs
, 1));
1749 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1750 lhs
= XEXP (lhs
, 0);
1752 else if (GET_CODE (lhs
) == ASHIFT
1753 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1754 && INTVAL (XEXP (lhs
, 1)) >= 0
1755 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1757 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1759 lhs
= XEXP (lhs
, 0);
1762 if (GET_CODE (rhs
) == NEG
)
1766 rhs
= XEXP (rhs
, 0);
1768 else if (GET_CODE (rhs
) == MULT
1769 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1771 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1772 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1773 rhs
= XEXP (rhs
, 0);
1775 else if (GET_CODE (rhs
) == ASHIFT
1776 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1777 && INTVAL (XEXP (rhs
, 1)) >= 0
1778 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1780 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
1782 rhs
= XEXP (rhs
, 0);
1785 if (rtx_equal_p (lhs
, rhs
))
1787 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1789 unsigned HOST_WIDE_INT l
;
1792 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
1793 coeff
= immed_double_const (l
, h
, mode
);
1795 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1796 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1801 /* (a - (-b)) -> (a + b). True even for IEEE. */
1802 if (GET_CODE (op1
) == NEG
)
1803 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1805 /* (-x - c) may be simplified as (-c - x). */
1806 if (GET_CODE (op0
) == NEG
1807 && (GET_CODE (op1
) == CONST_INT
1808 || GET_CODE (op1
) == CONST_DOUBLE
))
1810 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1812 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1815 /* Don't let a relocatable value get a negative coeff. */
1816 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1817 return simplify_gen_binary (PLUS
, mode
,
1819 neg_const_int (mode
, op1
));
1821 /* (x - (x & y)) -> (x & ~y) */
1822 if (GET_CODE (op1
) == AND
)
1824 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1826 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1827 GET_MODE (XEXP (op1
, 1)));
1828 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1830 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1832 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1833 GET_MODE (XEXP (op1
, 0)));
1834 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1838 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1839 by reversing the comparison code if valid. */
1840 if (STORE_FLAG_VALUE
== 1
1841 && trueop0
== const1_rtx
1842 && COMPARISON_P (op1
)
1843 && (reversed
= reversed_comparison (op1
, mode
)))
1846 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1847 if (GET_CODE (op1
) == MULT
1848 && GET_CODE (XEXP (op1
, 0)) == NEG
)
1852 in1
= XEXP (XEXP (op1
, 0), 0);
1853 in2
= XEXP (op1
, 1);
1854 return simplify_gen_binary (PLUS
, mode
,
1855 simplify_gen_binary (MULT
, mode
,
1860 /* Canonicalize (minus (neg A) (mult B C)) to
1861 (minus (mult (neg B) C) A). */
1862 if (GET_CODE (op1
) == MULT
1863 && GET_CODE (op0
) == NEG
)
1867 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
1868 in2
= XEXP (op1
, 1);
1869 return simplify_gen_binary (MINUS
, mode
,
1870 simplify_gen_binary (MULT
, mode
,
1875 /* If one of the operands is a PLUS or a MINUS, see if we can
1876 simplify this by the associative law. This will, for example,
1877 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1878 Don't use the associative law for floating point.
1879 The inaccuracy makes it nonassociative,
1880 and subtle programs can break if operations are associated. */
1882 if (INTEGRAL_MODE_P (mode
)
1883 && (plus_minus_operand_p (op0
)
1884 || plus_minus_operand_p (op1
))
1885 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1890 if (trueop1
== constm1_rtx
)
1891 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1893 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1894 x is NaN, since x * 0 is then also NaN. Nor is it valid
1895 when the mode has signed zeros, since multiplying a negative
1896 number by 0 will give -0, not 0. */
1897 if (!HONOR_NANS (mode
)
1898 && !HONOR_SIGNED_ZEROS (mode
)
1899 && trueop1
== CONST0_RTX (mode
)
1900 && ! side_effects_p (op0
))
1903 /* In IEEE floating point, x*1 is not equivalent to x for
1905 if (!HONOR_SNANS (mode
)
1906 && trueop1
== CONST1_RTX (mode
))
1909 /* Convert multiply by constant power of two into shift unless
1910 we are still generating RTL. This test is a kludge. */
1911 if (GET_CODE (trueop1
) == CONST_INT
1912 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1913 /* If the mode is larger than the host word size, and the
1914 uppermost bit is set, then this isn't a power of two due
1915 to implicit sign extension. */
1916 && (width
<= HOST_BITS_PER_WIDE_INT
1917 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1918 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1920 /* Likewise for multipliers wider than a word. */
1921 if (GET_CODE (trueop1
) == CONST_DOUBLE
1922 && (GET_MODE (trueop1
) == VOIDmode
1923 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
1924 && GET_MODE (op0
) == mode
1925 && CONST_DOUBLE_LOW (trueop1
) == 0
1926 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
1927 return simplify_gen_binary (ASHIFT
, mode
, op0
,
1928 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
1930 /* x*2 is x+x and x*(-1) is -x */
1931 if (GET_CODE (trueop1
) == CONST_DOUBLE
1932 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
1933 && GET_MODE (op0
) == mode
)
1936 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1938 if (REAL_VALUES_EQUAL (d
, dconst2
))
1939 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1941 if (!HONOR_SNANS (mode
)
1942 && REAL_VALUES_EQUAL (d
, dconstm1
))
1943 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1946 /* Optimize -x * -x as x * x. */
1947 if (FLOAT_MODE_P (mode
)
1948 && GET_CODE (op0
) == NEG
1949 && GET_CODE (op1
) == NEG
1950 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
1951 && !side_effects_p (XEXP (op0
, 0)))
1952 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1954 /* Likewise, optimize abs(x) * abs(x) as x * x. */
1955 if (SCALAR_FLOAT_MODE_P (mode
)
1956 && GET_CODE (op0
) == ABS
1957 && GET_CODE (op1
) == ABS
1958 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
1959 && !side_effects_p (XEXP (op0
, 0)))
1960 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1962 /* Reassociate multiplication, but for floating point MULTs
1963 only when the user specifies unsafe math optimizations. */
1964 if (! FLOAT_MODE_P (mode
)
1965 || flag_unsafe_math_optimizations
)
1967 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1974 if (trueop1
== const0_rtx
)
1976 if (GET_CODE (trueop1
) == CONST_INT
1977 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1978 == GET_MODE_MASK (mode
)))
1980 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1982 /* A | (~A) -> -1 */
1983 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1984 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1985 && ! side_effects_p (op0
)
1986 && SCALAR_INT_MODE_P (mode
))
1989 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1990 if (GET_CODE (op1
) == CONST_INT
1991 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1992 && (nonzero_bits (op0
, mode
) & ~INTVAL (op1
)) == 0)
1995 /* Convert (A & B) | A to A. */
1996 if (GET_CODE (op0
) == AND
1997 && (rtx_equal_p (XEXP (op0
, 0), op1
)
1998 || rtx_equal_p (XEXP (op0
, 1), op1
))
1999 && ! side_effects_p (XEXP (op0
, 0))
2000 && ! side_effects_p (XEXP (op0
, 1)))
2003 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2004 mode size to (rotate A CX). */
2006 if (GET_CODE (op1
) == ASHIFT
2007 || GET_CODE (op1
) == SUBREG
)
2018 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2019 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2020 && GET_CODE (XEXP (opleft
, 1)) == CONST_INT
2021 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2022 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2023 == GET_MODE_BITSIZE (mode
)))
2024 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2026 /* Same, but for ashift that has been "simplified" to a wider mode
2027 by simplify_shift_const. */
2029 if (GET_CODE (opleft
) == SUBREG
2030 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2031 && GET_CODE (opright
) == LSHIFTRT
2032 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2033 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2034 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2035 && (GET_MODE_SIZE (GET_MODE (opleft
))
2036 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2037 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2038 SUBREG_REG (XEXP (opright
, 0)))
2039 && GET_CODE (XEXP (SUBREG_REG (opleft
), 1)) == CONST_INT
2040 && GET_CODE (XEXP (opright
, 1)) == CONST_INT
2041 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2042 == GET_MODE_BITSIZE (mode
)))
2043 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2044 XEXP (SUBREG_REG (opleft
), 1));
2046 /* If we have (ior (and (X C1) C2)), simplify this by making
2047 C1 as small as possible if C1 actually changes. */
2048 if (GET_CODE (op1
) == CONST_INT
2049 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2050 || INTVAL (op1
) > 0)
2051 && GET_CODE (op0
) == AND
2052 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2053 && GET_CODE (op1
) == CONST_INT
2054 && (INTVAL (XEXP (op0
, 1)) & INTVAL (op1
)) != 0)
2055 return simplify_gen_binary (IOR
, mode
,
2057 (AND
, mode
, XEXP (op0
, 0),
2058 GEN_INT (INTVAL (XEXP (op0
, 1))
2062 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2063 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2064 the PLUS does not affect any of the bits in OP1: then we can do
2065 the IOR as a PLUS and we can associate. This is valid if OP1
2066 can be safely shifted left C bits. */
2067 if (GET_CODE (trueop1
) == CONST_INT
&& GET_CODE (op0
) == ASHIFTRT
2068 && GET_CODE (XEXP (op0
, 0)) == PLUS
2069 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == CONST_INT
2070 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2071 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2073 int count
= INTVAL (XEXP (op0
, 1));
2074 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2076 if (mask
>> count
== INTVAL (trueop1
)
2077 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2078 return simplify_gen_binary (ASHIFTRT
, mode
,
2079 plus_constant (XEXP (op0
, 0), mask
),
2083 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2089 if (trueop1
== const0_rtx
)
2091 if (GET_CODE (trueop1
) == CONST_INT
2092 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2093 == GET_MODE_MASK (mode
)))
2094 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2095 if (rtx_equal_p (trueop0
, trueop1
)
2096 && ! side_effects_p (op0
)
2097 && GET_MODE_CLASS (mode
) != MODE_CC
)
2098 return CONST0_RTX (mode
);
2100 /* Canonicalize XOR of the most significant bit to PLUS. */
2101 if ((GET_CODE (op1
) == CONST_INT
2102 || GET_CODE (op1
) == CONST_DOUBLE
)
2103 && mode_signbit_p (mode
, op1
))
2104 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2105 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2106 if ((GET_CODE (op1
) == CONST_INT
2107 || GET_CODE (op1
) == CONST_DOUBLE
)
2108 && GET_CODE (op0
) == PLUS
2109 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
2110 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2111 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2112 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2113 simplify_gen_binary (XOR
, mode
, op1
,
2116 /* If we are XORing two things that have no bits in common,
2117 convert them into an IOR. This helps to detect rotation encoded
2118 using those methods and possibly other simplifications. */
2120 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2121 && (nonzero_bits (op0
, mode
)
2122 & nonzero_bits (op1
, mode
)) == 0)
2123 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2125 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2126 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2129 int num_negated
= 0;
2131 if (GET_CODE (op0
) == NOT
)
2132 num_negated
++, op0
= XEXP (op0
, 0);
2133 if (GET_CODE (op1
) == NOT
)
2134 num_negated
++, op1
= XEXP (op1
, 0);
2136 if (num_negated
== 2)
2137 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2138 else if (num_negated
== 1)
2139 return simplify_gen_unary (NOT
, mode
,
2140 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2144 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2145 correspond to a machine insn or result in further simplifications
2146 if B is a constant. */
2148 if (GET_CODE (op0
) == AND
2149 && rtx_equal_p (XEXP (op0
, 1), op1
)
2150 && ! side_effects_p (op1
))
2151 return simplify_gen_binary (AND
, mode
,
2152 simplify_gen_unary (NOT
, mode
,
2153 XEXP (op0
, 0), mode
),
2156 else if (GET_CODE (op0
) == AND
2157 && rtx_equal_p (XEXP (op0
, 0), op1
)
2158 && ! side_effects_p (op1
))
2159 return simplify_gen_binary (AND
, mode
,
2160 simplify_gen_unary (NOT
, mode
,
2161 XEXP (op0
, 1), mode
),
2164 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2165 comparison if STORE_FLAG_VALUE is 1. */
2166 if (STORE_FLAG_VALUE
== 1
2167 && trueop1
== const1_rtx
2168 && COMPARISON_P (op0
)
2169 && (reversed
= reversed_comparison (op0
, mode
)))
2172 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2173 is (lt foo (const_int 0)), so we can perform the above
2174 simplification if STORE_FLAG_VALUE is 1. */
2176 if (STORE_FLAG_VALUE
== 1
2177 && trueop1
== const1_rtx
2178 && GET_CODE (op0
) == LSHIFTRT
2179 && GET_CODE (XEXP (op0
, 1)) == CONST_INT
2180 && INTVAL (XEXP (op0
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
2181 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2183 /* (xor (comparison foo bar) (const_int sign-bit))
2184 when STORE_FLAG_VALUE is the sign bit. */
2185 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2186 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
2187 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
2188 && trueop1
== const_true_rtx
2189 && COMPARISON_P (op0
)
2190 && (reversed
= reversed_comparison (op0
, mode
)))
2195 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2201 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2203 /* If we are turning off bits already known off in OP0, we need
2205 if (GET_CODE (trueop1
) == CONST_INT
2206 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2207 && (nonzero_bits (trueop0
, mode
) & ~INTVAL (trueop1
)) == 0)
2209 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2210 && GET_MODE_CLASS (mode
) != MODE_CC
)
2213 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2214 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2215 && ! side_effects_p (op0
)
2216 && GET_MODE_CLASS (mode
) != MODE_CC
)
2217 return CONST0_RTX (mode
);
2219 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2220 there are no nonzero bits of C outside of X's mode. */
2221 if ((GET_CODE (op0
) == SIGN_EXTEND
2222 || GET_CODE (op0
) == ZERO_EXTEND
)
2223 && GET_CODE (trueop1
) == CONST_INT
2224 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2225 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2226 & INTVAL (trueop1
)) == 0)
2228 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2229 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2230 gen_int_mode (INTVAL (trueop1
),
2232 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2235 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2236 insn (and may simplify more). */
2237 if (GET_CODE (op0
) == XOR
2238 && rtx_equal_p (XEXP (op0
, 0), op1
)
2239 && ! side_effects_p (op1
))
2240 return simplify_gen_binary (AND
, mode
,
2241 simplify_gen_unary (NOT
, mode
,
2242 XEXP (op0
, 1), mode
),
2245 if (GET_CODE (op0
) == XOR
2246 && rtx_equal_p (XEXP (op0
, 1), op1
)
2247 && ! side_effects_p (op1
))
2248 return simplify_gen_binary (AND
, mode
,
2249 simplify_gen_unary (NOT
, mode
,
2250 XEXP (op0
, 0), mode
),
2253 /* Similarly for (~(A ^ B)) & A. */
2254 if (GET_CODE (op0
) == NOT
2255 && GET_CODE (XEXP (op0
, 0)) == XOR
2256 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2257 && ! side_effects_p (op1
))
2258 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2260 if (GET_CODE (op0
) == NOT
2261 && GET_CODE (XEXP (op0
, 0)) == XOR
2262 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2263 && ! side_effects_p (op1
))
2264 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2266 /* Convert (A | B) & A to A. */
2267 if (GET_CODE (op0
) == IOR
2268 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2269 || rtx_equal_p (XEXP (op0
, 1), op1
))
2270 && ! side_effects_p (XEXP (op0
, 0))
2271 && ! side_effects_p (XEXP (op0
, 1)))
2274 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2275 ((A & N) + B) & M -> (A + B) & M
2276 Similarly if (N & M) == 0,
2277 ((A | N) + B) & M -> (A + B) & M
2278 and for - instead of + and/or ^ instead of |. */
2279 if (GET_CODE (trueop1
) == CONST_INT
2280 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2281 && ~INTVAL (trueop1
)
2282 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
2283 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2288 pmop
[0] = XEXP (op0
, 0);
2289 pmop
[1] = XEXP (op0
, 1);
2291 for (which
= 0; which
< 2; which
++)
2294 switch (GET_CODE (tem
))
2297 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2298 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
2299 == INTVAL (trueop1
))
2300 pmop
[which
] = XEXP (tem
, 0);
2304 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
2305 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
2306 pmop
[which
] = XEXP (tem
, 0);
2313 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2315 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2317 return simplify_gen_binary (code
, mode
, tem
, op1
);
2320 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2326 /* 0/x is 0 (or x&0 if x has side-effects). */
2327 if (trueop0
== CONST0_RTX (mode
))
2329 if (side_effects_p (op1
))
2330 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2334 if (trueop1
== CONST1_RTX (mode
))
2335 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2336 /* Convert divide by power of two into shift. */
2337 if (GET_CODE (trueop1
) == CONST_INT
2338 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
2339 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2343 /* Handle floating point and integers separately. */
2344 if (SCALAR_FLOAT_MODE_P (mode
))
2346 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2347 safe for modes with NaNs, since 0.0 / 0.0 will then be
2348 NaN rather than 0.0. Nor is it safe for modes with signed
2349 zeros, since dividing 0 by a negative number gives -0.0 */
2350 if (trueop0
== CONST0_RTX (mode
)
2351 && !HONOR_NANS (mode
)
2352 && !HONOR_SIGNED_ZEROS (mode
)
2353 && ! side_effects_p (op1
))
2356 if (trueop1
== CONST1_RTX (mode
)
2357 && !HONOR_SNANS (mode
))
2360 if (GET_CODE (trueop1
) == CONST_DOUBLE
2361 && trueop1
!= CONST0_RTX (mode
))
2364 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2367 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2368 && !HONOR_SNANS (mode
))
2369 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2371 /* Change FP division by a constant into multiplication.
2372 Only do this with -funsafe-math-optimizations. */
2373 if (flag_unsafe_math_optimizations
2374 && !REAL_VALUES_EQUAL (d
, dconst0
))
2376 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2377 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2378 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2384 /* 0/x is 0 (or x&0 if x has side-effects). */
2385 if (trueop0
== CONST0_RTX (mode
))
2387 if (side_effects_p (op1
))
2388 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2392 if (trueop1
== CONST1_RTX (mode
))
2393 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2395 if (trueop1
== constm1_rtx
)
2397 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2398 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2404 /* 0%x is 0 (or x&0 if x has side-effects). */
2405 if (trueop0
== CONST0_RTX (mode
))
2407 if (side_effects_p (op1
))
2408 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2411 /* x%1 is 0 (of x&0 if x has side-effects). */
2412 if (trueop1
== CONST1_RTX (mode
))
2414 if (side_effects_p (op0
))
2415 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2416 return CONST0_RTX (mode
);
2418 /* Implement modulus by power of two as AND. */
2419 if (GET_CODE (trueop1
) == CONST_INT
2420 && exact_log2 (INTVAL (trueop1
)) > 0)
2421 return simplify_gen_binary (AND
, mode
, op0
,
2422 GEN_INT (INTVAL (op1
) - 1));
2426 /* 0%x is 0 (or x&0 if x has side-effects). */
2427 if (trueop0
== CONST0_RTX (mode
))
2429 if (side_effects_p (op1
))
2430 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2433 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2434 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
2436 if (side_effects_p (op0
))
2437 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2438 return CONST0_RTX (mode
);
2445 if (trueop1
== CONST0_RTX (mode
))
2447 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2449 /* Rotating ~0 always results in ~0. */
2450 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
2451 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2452 && ! side_effects_p (op1
))
2458 if (trueop1
== CONST0_RTX (mode
))
2460 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2465 if (trueop1
== CONST0_RTX (mode
))
2467 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2469 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2470 if (GET_CODE (op0
) == CLZ
2471 && GET_CODE (trueop1
) == CONST_INT
2472 && STORE_FLAG_VALUE
== 1
2473 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
2475 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2476 unsigned HOST_WIDE_INT zero_val
= 0;
2478 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
2479 && zero_val
== GET_MODE_BITSIZE (imode
)
2480 && INTVAL (trueop1
) == exact_log2 (zero_val
))
2481 return simplify_gen_relational (EQ
, mode
, imode
,
2482 XEXP (op0
, 0), const0_rtx
);
2487 if (width
<= HOST_BITS_PER_WIDE_INT
2488 && GET_CODE (trueop1
) == CONST_INT
2489 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2490 && ! side_effects_p (op0
))
2492 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2494 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2500 if (width
<= HOST_BITS_PER_WIDE_INT
2501 && GET_CODE (trueop1
) == CONST_INT
2502 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2503 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2504 && ! side_effects_p (op0
))
2506 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2508 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2514 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2516 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2518 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2524 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2526 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2528 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2537 /* ??? There are simplifications that can be done. */
2541 if (!VECTOR_MODE_P (mode
))
2543 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2544 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2545 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2546 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2547 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
2549 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2550 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2555 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2556 gcc_assert (GET_MODE_INNER (mode
)
2557 == GET_MODE_INNER (GET_MODE (trueop0
)));
2558 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2560 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2562 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2563 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2564 rtvec v
= rtvec_alloc (n_elts
);
2567 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2568 for (i
= 0; i
< n_elts
; i
++)
2570 rtx x
= XVECEXP (trueop1
, 0, i
);
2572 gcc_assert (GET_CODE (x
) == CONST_INT
);
2573 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2577 return gen_rtx_CONST_VECTOR (mode
, v
);
2581 if (XVECLEN (trueop1
, 0) == 1
2582 && GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
2583 && GET_CODE (trueop0
) == VEC_CONCAT
)
2586 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
2588 /* Try to find the element in the VEC_CONCAT. */
2589 while (GET_MODE (vec
) != mode
2590 && GET_CODE (vec
) == VEC_CONCAT
)
2592 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
2593 if (offset
< vec_size
)
2594 vec
= XEXP (vec
, 0);
2598 vec
= XEXP (vec
, 1);
2600 vec
= avoid_constant_pool_reference (vec
);
2603 if (GET_MODE (vec
) == mode
)
2610 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2611 ? GET_MODE (trueop0
)
2612 : GET_MODE_INNER (mode
));
2613 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2614 ? GET_MODE (trueop1
)
2615 : GET_MODE_INNER (mode
));
2617 gcc_assert (VECTOR_MODE_P (mode
));
2618 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2619 == GET_MODE_SIZE (mode
));
2621 if (VECTOR_MODE_P (op0_mode
))
2622 gcc_assert (GET_MODE_INNER (mode
)
2623 == GET_MODE_INNER (op0_mode
));
2625 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2627 if (VECTOR_MODE_P (op1_mode
))
2628 gcc_assert (GET_MODE_INNER (mode
)
2629 == GET_MODE_INNER (op1_mode
));
2631 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2633 if ((GET_CODE (trueop0
) == CONST_VECTOR
2634 || GET_CODE (trueop0
) == CONST_INT
2635 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2636 && (GET_CODE (trueop1
) == CONST_VECTOR
2637 || GET_CODE (trueop1
) == CONST_INT
2638 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2640 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2641 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2642 rtvec v
= rtvec_alloc (n_elts
);
2644 unsigned in_n_elts
= 1;
2646 if (VECTOR_MODE_P (op0_mode
))
2647 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2648 for (i
= 0; i
< n_elts
; i
++)
2652 if (!VECTOR_MODE_P (op0_mode
))
2653 RTVEC_ELT (v
, i
) = trueop0
;
2655 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2659 if (!VECTOR_MODE_P (op1_mode
))
2660 RTVEC_ELT (v
, i
) = trueop1
;
2662 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2667 return gen_rtx_CONST_VECTOR (mode
, v
);
2680 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2683 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
2685 unsigned int width
= GET_MODE_BITSIZE (mode
);
2687 if (VECTOR_MODE_P (mode
)
2688 && code
!= VEC_CONCAT
2689 && GET_CODE (op0
) == CONST_VECTOR
2690 && GET_CODE (op1
) == CONST_VECTOR
)
2692 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2693 enum machine_mode op0mode
= GET_MODE (op0
);
2694 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
2695 enum machine_mode op1mode
= GET_MODE (op1
);
2696 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
2697 rtvec v
= rtvec_alloc (n_elts
);
2700 gcc_assert (op0_n_elts
== n_elts
);
2701 gcc_assert (op1_n_elts
== n_elts
);
2702 for (i
= 0; i
< n_elts
; i
++)
2704 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
2705 CONST_VECTOR_ELT (op0
, i
),
2706 CONST_VECTOR_ELT (op1
, i
));
2709 RTVEC_ELT (v
, i
) = x
;
2712 return gen_rtx_CONST_VECTOR (mode
, v
);
2715 if (VECTOR_MODE_P (mode
)
2716 && code
== VEC_CONCAT
2717 && CONSTANT_P (op0
) && CONSTANT_P (op1
))
2719 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2720 rtvec v
= rtvec_alloc (n_elts
);
2722 gcc_assert (n_elts
>= 2);
2725 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
2726 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
2728 RTVEC_ELT (v
, 0) = op0
;
2729 RTVEC_ELT (v
, 1) = op1
;
2733 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
2734 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
2737 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
2738 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
2739 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
2741 for (i
= 0; i
< op0_n_elts
; ++i
)
2742 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
2743 for (i
= 0; i
< op1_n_elts
; ++i
)
2744 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
2747 return gen_rtx_CONST_VECTOR (mode
, v
);
2750 if (SCALAR_FLOAT_MODE_P (mode
)
2751 && GET_CODE (op0
) == CONST_DOUBLE
2752 && GET_CODE (op1
) == CONST_DOUBLE
2753 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
2764 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
2766 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
2768 for (i
= 0; i
< 4; i
++)
2785 real_from_target (&r
, tmp0
, mode
);
2786 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
2790 REAL_VALUE_TYPE f0
, f1
, value
, result
;
2793 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
2794 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
2795 real_convert (&f0
, mode
, &f0
);
2796 real_convert (&f1
, mode
, &f1
);
2798 if (HONOR_SNANS (mode
)
2799 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
2803 && REAL_VALUES_EQUAL (f1
, dconst0
)
2804 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
2807 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2808 && flag_trapping_math
2809 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
2811 int s0
= REAL_VALUE_NEGATIVE (f0
);
2812 int s1
= REAL_VALUE_NEGATIVE (f1
);
2817 /* Inf + -Inf = NaN plus exception. */
2822 /* Inf - Inf = NaN plus exception. */
2827 /* Inf / Inf = NaN plus exception. */
2834 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2835 && flag_trapping_math
2836 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
2837 || (REAL_VALUE_ISINF (f1
)
2838 && REAL_VALUES_EQUAL (f0
, dconst0
))))
2839 /* Inf * 0 = NaN plus exception. */
2842 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
2844 real_convert (&result
, mode
, &value
);
2846 /* Don't constant fold this floating point operation if
2847 the result has overflowed and flag_trapping_math. */
2849 if (flag_trapping_math
2850 && MODE_HAS_INFINITIES (mode
)
2851 && REAL_VALUE_ISINF (result
)
2852 && !REAL_VALUE_ISINF (f0
)
2853 && !REAL_VALUE_ISINF (f1
))
2854 /* Overflow plus exception. */
2857 /* Don't constant fold this floating point operation if the
2858 result may dependent upon the run-time rounding mode and
2859 flag_rounding_math is set, or if GCC's software emulation
2860 is unable to accurately represent the result. */
2862 if ((flag_rounding_math
2863 || (REAL_MODE_FORMAT_COMPOSITE_P (mode
)
2864 && !flag_unsafe_math_optimizations
))
2865 && (inexact
|| !real_identical (&result
, &value
)))
2868 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
2872 /* We can fold some multi-word operations. */
2873 if (GET_MODE_CLASS (mode
) == MODE_INT
2874 && width
== HOST_BITS_PER_WIDE_INT
* 2
2875 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
2876 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
2878 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
2879 HOST_WIDE_INT h1
, h2
, hv
, ht
;
2881 if (GET_CODE (op0
) == CONST_DOUBLE
)
2882 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
2884 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
2886 if (GET_CODE (op1
) == CONST_DOUBLE
)
2887 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
2889 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
2894 /* A - B == A + (-B). */
2895 neg_double (l2
, h2
, &lv
, &hv
);
2898 /* Fall through.... */
2901 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2905 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2909 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2910 &lv
, &hv
, <
, &ht
))
2915 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2916 <
, &ht
, &lv
, &hv
))
2921 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2922 &lv
, &hv
, <
, &ht
))
2927 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2928 <
, &ht
, &lv
, &hv
))
2933 lv
= l1
& l2
, hv
= h1
& h2
;
2937 lv
= l1
| l2
, hv
= h1
| h2
;
2941 lv
= l1
^ l2
, hv
= h1
^ h2
;
2947 && ((unsigned HOST_WIDE_INT
) l1
2948 < (unsigned HOST_WIDE_INT
) l2
)))
2957 && ((unsigned HOST_WIDE_INT
) l1
2958 > (unsigned HOST_WIDE_INT
) l2
)))
2965 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
2967 && ((unsigned HOST_WIDE_INT
) l1
2968 < (unsigned HOST_WIDE_INT
) l2
)))
2975 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
2977 && ((unsigned HOST_WIDE_INT
) l1
2978 > (unsigned HOST_WIDE_INT
) l2
)))
2984 case LSHIFTRT
: case ASHIFTRT
:
2986 case ROTATE
: case ROTATERT
:
2987 if (SHIFT_COUNT_TRUNCATED
)
2988 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
2990 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
2993 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
2994 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
2996 else if (code
== ASHIFT
)
2997 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
2998 else if (code
== ROTATE
)
2999 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3000 else /* code == ROTATERT */
3001 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3008 return immed_double_const (lv
, hv
, mode
);
3011 if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
3012 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3014 /* Get the integer argument values in two forms:
3015 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3017 arg0
= INTVAL (op0
);
3018 arg1
= INTVAL (op1
);
3020 if (width
< HOST_BITS_PER_WIDE_INT
)
3022 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3023 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3026 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3027 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3030 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3031 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3039 /* Compute the value of the arithmetic. */
3044 val
= arg0s
+ arg1s
;
3048 val
= arg0s
- arg1s
;
3052 val
= arg0s
* arg1s
;
3057 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3060 val
= arg0s
/ arg1s
;
3065 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3068 val
= arg0s
% arg1s
;
3073 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3076 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3081 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3084 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3102 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3103 the value is in range. We can't return any old value for
3104 out-of-range arguments because either the middle-end (via
3105 shift_truncation_mask) or the back-end might be relying on
3106 target-specific knowledge. Nor can we rely on
3107 shift_truncation_mask, since the shift might not be part of an
3108 ashlM3, lshrM3 or ashrM3 instruction. */
3109 if (SHIFT_COUNT_TRUNCATED
)
3110 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3111 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3114 val
= (code
== ASHIFT
3115 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3116 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3118 /* Sign-extend the result for arithmetic right shifts. */
3119 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3120 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
3128 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3129 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3137 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3138 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3142 /* Do nothing here. */
3146 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3150 val
= ((unsigned HOST_WIDE_INT
) arg0
3151 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3155 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3159 val
= ((unsigned HOST_WIDE_INT
) arg0
3160 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3168 /* ??? There are simplifications that can be done. */
3175 return gen_int_mode (val
, mode
);
3183 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3186 Rather than test for specific case, we do this by a brute-force method
3187 and do all possible simplifications until no more changes occur. Then
3188 we rebuild the operation. */
3190 struct simplify_plus_minus_op_data
3197 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
3199 const struct simplify_plus_minus_op_data
*d1
= p1
;
3200 const struct simplify_plus_minus_op_data
*d2
= p2
;
3203 result
= (commutative_operand_precedence (d2
->op
)
3204 - commutative_operand_precedence (d1
->op
));
3208 /* Group together equal REGs to do more simplification. */
3209 if (REG_P (d1
->op
) && REG_P (d2
->op
))
3210 return REGNO (d1
->op
) - REGNO (d2
->op
);
3216 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3219 struct simplify_plus_minus_op_data ops
[8];
3221 int n_ops
= 2, input_ops
= 2;
3222 int changed
, n_constants
= 0, canonicalized
= 0;
3225 memset (ops
, 0, sizeof ops
);
3227 /* Set up the two operands and then expand them until nothing has been
3228 changed. If we run out of room in our array, give up; this should
3229 almost never happen. */
3234 ops
[1].neg
= (code
== MINUS
);
3240 for (i
= 0; i
< n_ops
; i
++)
3242 rtx this_op
= ops
[i
].op
;
3243 int this_neg
= ops
[i
].neg
;
3244 enum rtx_code this_code
= GET_CODE (this_op
);
3253 ops
[n_ops
].op
= XEXP (this_op
, 1);
3254 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3257 ops
[i
].op
= XEXP (this_op
, 0);
3260 canonicalized
|= this_neg
;
3264 ops
[i
].op
= XEXP (this_op
, 0);
3265 ops
[i
].neg
= ! this_neg
;
3272 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3273 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3274 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3276 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3277 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3278 ops
[n_ops
].neg
= this_neg
;
3286 /* ~a -> (-a - 1) */
3289 ops
[n_ops
].op
= constm1_rtx
;
3290 ops
[n_ops
++].neg
= this_neg
;
3291 ops
[i
].op
= XEXP (this_op
, 0);
3292 ops
[i
].neg
= !this_neg
;
3302 ops
[i
].op
= neg_const_int (mode
, this_op
);
3316 if (n_constants
> 1)
3319 gcc_assert (n_ops
>= 2);
3321 /* If we only have two operands, we can avoid the loops. */
3324 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3327 /* Get the two operands. Be careful with the order, especially for
3328 the cases where code == MINUS. */
3329 if (ops
[0].neg
&& ops
[1].neg
)
3331 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3334 else if (ops
[0].neg
)
3345 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
3348 /* Now simplify each pair of operands until nothing changes. */
3351 /* Insertion sort is good enough for an eight-element array. */
3352 for (i
= 1; i
< n_ops
; i
++)
3354 struct simplify_plus_minus_op_data save
;
3356 if (simplify_plus_minus_op_data_cmp (&ops
[j
], &ops
[i
]) < 0)
3362 ops
[j
+ 1] = ops
[j
];
3363 while (j
-- && simplify_plus_minus_op_data_cmp (&ops
[j
], &save
) > 0);
3367 /* This is only useful the first time through. */
3372 for (i
= n_ops
- 1; i
> 0; i
--)
3373 for (j
= i
- 1; j
>= 0; j
--)
3375 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
3376 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
3378 if (lhs
!= 0 && rhs
!= 0)
3380 enum rtx_code ncode
= PLUS
;
3386 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3388 else if (swap_commutative_operands_p (lhs
, rhs
))
3389 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3391 if ((GET_CODE (lhs
) == CONST
|| GET_CODE (lhs
) == CONST_INT
)
3392 && (GET_CODE (rhs
) == CONST
|| GET_CODE (rhs
) == CONST_INT
))
3394 rtx tem_lhs
, tem_rhs
;
3396 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
3397 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
3398 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
3400 if (tem
&& !CONSTANT_P (tem
))
3401 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
3404 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
3406 /* Reject "simplifications" that just wrap the two
3407 arguments in a CONST. Failure to do so can result
3408 in infinite recursion with simplify_binary_operation
3409 when it calls us to simplify CONST operations. */
3411 && ! (GET_CODE (tem
) == CONST
3412 && GET_CODE (XEXP (tem
, 0)) == ncode
3413 && XEXP (XEXP (tem
, 0), 0) == lhs
3414 && XEXP (XEXP (tem
, 0), 1) == rhs
))
3417 if (GET_CODE (tem
) == NEG
)
3418 tem
= XEXP (tem
, 0), lneg
= !lneg
;
3419 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
3420 tem
= neg_const_int (mode
, tem
), lneg
= 0;
3424 ops
[j
].op
= NULL_RTX
;
3430 /* Pack all the operands to the lower-numbered entries. */
3431 for (i
= 0, j
= 0; j
< n_ops
; j
++)
3441 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3443 && GET_CODE (ops
[1].op
) == CONST_INT
3444 && CONSTANT_P (ops
[0].op
)
3446 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
3448 /* We suppressed creation of trivial CONST expressions in the
3449 combination loop to avoid recursion. Create one manually now.
3450 The combination loop should have ensured that there is exactly
3451 one CONST_INT, and the sort will have ensured that it is last
3452 in the array and that any other constant will be next-to-last. */
3455 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
3456 && CONSTANT_P (ops
[n_ops
- 2].op
))
3458 rtx value
= ops
[n_ops
- 1].op
;
3459 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
3460 value
= neg_const_int (mode
, value
);
3461 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
3465 /* Put a non-negated operand first, if possible. */
3467 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
3470 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
3479 /* Now make the result by performing the requested operations. */
3481 for (i
= 1; i
< n_ops
; i
++)
3482 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
3483 mode
, result
, ops
[i
].op
);
3488 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3490 plus_minus_operand_p (rtx x
)
3492 return GET_CODE (x
) == PLUS
3493 || GET_CODE (x
) == MINUS
3494 || (GET_CODE (x
) == CONST
3495 && GET_CODE (XEXP (x
, 0)) == PLUS
3496 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
3497 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
3500 /* Like simplify_binary_operation except used for relational operators.
3501 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3502 not also be VOIDmode.
3504 CMP_MODE specifies in which mode the comparison is done in, so it is
3505 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3506 the operands or, if both are VOIDmode, the operands are compared in
3507 "infinite precision". */
3509 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
3510 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3512 rtx tem
, trueop0
, trueop1
;
3514 if (cmp_mode
== VOIDmode
)
3515 cmp_mode
= GET_MODE (op0
);
3516 if (cmp_mode
== VOIDmode
)
3517 cmp_mode
= GET_MODE (op1
);
3519 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
3522 if (SCALAR_FLOAT_MODE_P (mode
))
3524 if (tem
== const0_rtx
)
3525 return CONST0_RTX (mode
);
3526 #ifdef FLOAT_STORE_FLAG_VALUE
3528 REAL_VALUE_TYPE val
;
3529 val
= FLOAT_STORE_FLAG_VALUE (mode
);
3530 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
3536 if (VECTOR_MODE_P (mode
))
3538 if (tem
== const0_rtx
)
3539 return CONST0_RTX (mode
);
3540 #ifdef VECTOR_STORE_FLAG_VALUE
3545 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
3546 if (val
== NULL_RTX
)
3548 if (val
== const1_rtx
)
3549 return CONST1_RTX (mode
);
3551 units
= GET_MODE_NUNITS (mode
);
3552 v
= rtvec_alloc (units
);
3553 for (i
= 0; i
< units
; i
++)
3554 RTVEC_ELT (v
, i
) = val
;
3555 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
3565 /* For the following tests, ensure const0_rtx is op1. */
3566 if (swap_commutative_operands_p (op0
, op1
)
3567 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
3568 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
3570 /* If op0 is a compare, extract the comparison arguments from it. */
3571 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3572 return simplify_relational_operation (code
, mode
, VOIDmode
,
3573 XEXP (op0
, 0), XEXP (op0
, 1));
3575 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
3579 trueop0
= avoid_constant_pool_reference (op0
);
3580 trueop1
= avoid_constant_pool_reference (op1
);
3581 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
3585 /* This part of simplify_relational_operation is only used when CMP_MODE
3586 is not in class MODE_CC (i.e. it is a real comparison).
3588 MODE is the mode of the result, while CMP_MODE specifies in which
3589 mode the comparison is done in, so it is the mode of the operands. */
3592 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
3593 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3595 enum rtx_code op0code
= GET_CODE (op0
);
3597 if (GET_CODE (op1
) == CONST_INT
)
3599 if (INTVAL (op1
) == 0 && COMPARISON_P (op0
))
3601 /* If op0 is a comparison, extract the comparison arguments
3605 if (GET_MODE (op0
) == mode
)
3606 return simplify_rtx (op0
);
3608 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
3609 XEXP (op0
, 0), XEXP (op0
, 1));
3611 else if (code
== EQ
)
3613 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
3614 if (new_code
!= UNKNOWN
)
3615 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
3616 XEXP (op0
, 0), XEXP (op0
, 1));
3621 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3622 if ((code
== EQ
|| code
== NE
)
3623 && (op0code
== PLUS
|| op0code
== MINUS
)
3625 && CONSTANT_P (XEXP (op0
, 1))
3626 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
3628 rtx x
= XEXP (op0
, 0);
3629 rtx c
= XEXP (op0
, 1);
3631 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
3633 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
3636 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3637 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3639 && op1
== const0_rtx
3640 && GET_MODE_CLASS (mode
) == MODE_INT
3641 && cmp_mode
!= VOIDmode
3642 /* ??? Work-around BImode bugs in the ia64 backend. */
3644 && cmp_mode
!= BImode
3645 && nonzero_bits (op0
, cmp_mode
) == 1
3646 && STORE_FLAG_VALUE
== 1)
3647 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
3648 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
3649 : lowpart_subreg (mode
, op0
, cmp_mode
);
3651 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3652 if ((code
== EQ
|| code
== NE
)
3653 && op1
== const0_rtx
3655 return simplify_gen_relational (code
, mode
, cmp_mode
,
3656 XEXP (op0
, 0), XEXP (op0
, 1));
3658 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3659 if ((code
== EQ
|| code
== NE
)
3661 && rtx_equal_p (XEXP (op0
, 0), op1
)
3662 && !side_effects_p (XEXP (op0
, 0)))
3663 return simplify_gen_relational (code
, mode
, cmp_mode
,
3664 XEXP (op0
, 1), const0_rtx
);
3666 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3667 if ((code
== EQ
|| code
== NE
)
3669 && rtx_equal_p (XEXP (op0
, 1), op1
)
3670 && !side_effects_p (XEXP (op0
, 1)))
3671 return simplify_gen_relational (code
, mode
, cmp_mode
,
3672 XEXP (op0
, 0), const0_rtx
);
3674 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3675 if ((code
== EQ
|| code
== NE
)
3677 && (GET_CODE (op1
) == CONST_INT
3678 || GET_CODE (op1
) == CONST_DOUBLE
)
3679 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
3680 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
3681 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
3682 simplify_gen_binary (XOR
, cmp_mode
,
3683 XEXP (op0
, 1), op1
));
3688 /* Check if the given comparison (done in the given MODE) is actually a
3689 tautology or a contradiction.
3690 If no simplification is possible, this function returns zero.
3691 Otherwise, it returns either const_true_rtx or const0_rtx. */
3694 simplify_const_relational_operation (enum rtx_code code
,
3695 enum machine_mode mode
,
3698 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
3703 gcc_assert (mode
!= VOIDmode
3704 || (GET_MODE (op0
) == VOIDmode
3705 && GET_MODE (op1
) == VOIDmode
));
3707 /* If op0 is a compare, extract the comparison arguments from it. */
3708 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3710 op1
= XEXP (op0
, 1);
3711 op0
= XEXP (op0
, 0);
3713 if (GET_MODE (op0
) != VOIDmode
)
3714 mode
= GET_MODE (op0
);
3715 else if (GET_MODE (op1
) != VOIDmode
)
3716 mode
= GET_MODE (op1
);
3721 /* We can't simplify MODE_CC values since we don't know what the
3722 actual comparison is. */
3723 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
3726 /* Make sure the constant is second. */
3727 if (swap_commutative_operands_p (op0
, op1
))
3729 tem
= op0
, op0
= op1
, op1
= tem
;
3730 code
= swap_condition (code
);
3733 trueop0
= avoid_constant_pool_reference (op0
);
3734 trueop1
= avoid_constant_pool_reference (op1
);
3736 /* For integer comparisons of A and B maybe we can simplify A - B and can
3737 then simplify a comparison of that with zero. If A and B are both either
3738 a register or a CONST_INT, this can't help; testing for these cases will
3739 prevent infinite recursion here and speed things up.
3741 We can only do this for EQ and NE comparisons as otherwise we may
3742 lose or introduce overflow which we cannot disregard as undefined as
3743 we do not know the signedness of the operation on either the left or
3744 the right hand side of the comparison. */
3746 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
3747 && (code
== EQ
|| code
== NE
)
3748 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
3749 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
3750 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
3751 /* We cannot do this if tem is a nonzero address. */
3752 && ! nonzero_address_p (tem
))
3753 return simplify_const_relational_operation (signed_condition (code
),
3754 mode
, tem
, const0_rtx
);
3756 if (! HONOR_NANS (mode
) && code
== ORDERED
)
3757 return const_true_rtx
;
3759 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
3762 /* For modes without NaNs, if the two operands are equal, we know the
3763 result except if they have side-effects. */
3764 if (! HONOR_NANS (GET_MODE (trueop0
))
3765 && rtx_equal_p (trueop0
, trueop1
)
3766 && ! side_effects_p (trueop0
))
3767 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
3769 /* If the operands are floating-point constants, see if we can fold
3771 else if (GET_CODE (trueop0
) == CONST_DOUBLE
3772 && GET_CODE (trueop1
) == CONST_DOUBLE
3773 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
3775 REAL_VALUE_TYPE d0
, d1
;
3777 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
3778 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
3780 /* Comparisons are unordered iff at least one of the values is NaN. */
3781 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
3791 return const_true_rtx
;
3804 equal
= REAL_VALUES_EQUAL (d0
, d1
);
3805 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
3806 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
3809 /* Otherwise, see if the operands are both integers. */
3810 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
3811 && (GET_CODE (trueop0
) == CONST_DOUBLE
3812 || GET_CODE (trueop0
) == CONST_INT
)
3813 && (GET_CODE (trueop1
) == CONST_DOUBLE
3814 || GET_CODE (trueop1
) == CONST_INT
))
3816 int width
= GET_MODE_BITSIZE (mode
);
3817 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
3818 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
3820 /* Get the two words comprising each integer constant. */
3821 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
3823 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
3824 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
3828 l0u
= l0s
= INTVAL (trueop0
);
3829 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
3832 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
3834 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
3835 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
3839 l1u
= l1s
= INTVAL (trueop1
);
3840 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
3843 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3844 we have to sign or zero-extend the values. */
3845 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
3847 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3848 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3850 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3851 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3853 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3854 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3856 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
3857 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
3859 equal
= (h0u
== h1u
&& l0u
== l1u
);
3860 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
3861 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
3862 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
3863 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
3866 /* Otherwise, there are some code-specific tests we can make. */
3869 /* Optimize comparisons with upper and lower bounds. */
3870 if (SCALAR_INT_MODE_P (mode
)
3871 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3884 get_mode_bounds (mode
, sign
, mode
, &mmin
, &mmax
);
3891 /* x >= min is always true. */
3892 if (rtx_equal_p (trueop1
, mmin
))
3893 tem
= const_true_rtx
;
3899 /* x <= max is always true. */
3900 if (rtx_equal_p (trueop1
, mmax
))
3901 tem
= const_true_rtx
;
3906 /* x > max is always false. */
3907 if (rtx_equal_p (trueop1
, mmax
))
3913 /* x < min is always false. */
3914 if (rtx_equal_p (trueop1
, mmin
))
3921 if (tem
== const0_rtx
3922 || tem
== const_true_rtx
)
3929 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3934 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3935 return const_true_rtx
;
3939 /* Optimize abs(x) < 0.0. */
3940 if (trueop1
== CONST0_RTX (mode
)
3941 && !HONOR_SNANS (mode
)
3942 && !(flag_wrapv
&& INTEGRAL_MODE_P (mode
)))
3944 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3946 if (GET_CODE (tem
) == ABS
)
3952 /* Optimize abs(x) >= 0.0. */
3953 if (trueop1
== CONST0_RTX (mode
)
3954 && !HONOR_NANS (mode
)
3955 && !(flag_wrapv
&& INTEGRAL_MODE_P (mode
)))
3957 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3959 if (GET_CODE (tem
) == ABS
)
3960 return const_true_rtx
;
3965 /* Optimize ! (abs(x) < 0.0). */
3966 if (trueop1
== CONST0_RTX (mode
))
3968 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3970 if (GET_CODE (tem
) == ABS
)
3971 return const_true_rtx
;
3982 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3988 return equal
? const_true_rtx
: const0_rtx
;
3991 return ! equal
? const_true_rtx
: const0_rtx
;
3994 return op0lt
? const_true_rtx
: const0_rtx
;
3997 return op1lt
? const_true_rtx
: const0_rtx
;
3999 return op0ltu
? const_true_rtx
: const0_rtx
;
4001 return op1ltu
? const_true_rtx
: const0_rtx
;
4004 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
4007 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
4009 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
4011 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
4013 return const_true_rtx
;
4021 /* Simplify CODE, an operation with result mode MODE and three operands,
4022 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4023 a constant. Return 0 if no simplifications is possible. */
4026 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4027 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4030 unsigned int width
= GET_MODE_BITSIZE (mode
);
4032 /* VOIDmode means "infinite" precision. */
4034 width
= HOST_BITS_PER_WIDE_INT
;
4040 if (GET_CODE (op0
) == CONST_INT
4041 && GET_CODE (op1
) == CONST_INT
4042 && GET_CODE (op2
) == CONST_INT
4043 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4044 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4046 /* Extracting a bit-field from a constant */
4047 HOST_WIDE_INT val
= INTVAL (op0
);
4049 if (BITS_BIG_ENDIAN
)
4050 val
>>= (GET_MODE_BITSIZE (op0_mode
)
4051 - INTVAL (op2
) - INTVAL (op1
));
4053 val
>>= INTVAL (op2
);
4055 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
4057 /* First zero-extend. */
4058 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
4059 /* If desired, propagate sign bit. */
4060 if (code
== SIGN_EXTRACT
4061 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
4062 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
4065 /* Clear the bits that don't belong in our mode,
4066 unless they and our sign bit are all one.
4067 So we get either a reasonable negative value or a reasonable
4068 unsigned value for this mode. */
4069 if (width
< HOST_BITS_PER_WIDE_INT
4070 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
4071 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
4072 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4074 return gen_int_mode (val
, mode
);
4079 if (GET_CODE (op0
) == CONST_INT
)
4080 return op0
!= const0_rtx
? op1
: op2
;
4082 /* Convert c ? a : a into "a". */
4083 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4086 /* Convert a != b ? a : b into "a". */
4087 if (GET_CODE (op0
) == NE
4088 && ! side_effects_p (op0
)
4089 && ! HONOR_NANS (mode
)
4090 && ! HONOR_SIGNED_ZEROS (mode
)
4091 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4092 && rtx_equal_p (XEXP (op0
, 1), op2
))
4093 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4094 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4097 /* Convert a == b ? a : b into "b". */
4098 if (GET_CODE (op0
) == EQ
4099 && ! side_effects_p (op0
)
4100 && ! HONOR_NANS (mode
)
4101 && ! HONOR_SIGNED_ZEROS (mode
)
4102 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4103 && rtx_equal_p (XEXP (op0
, 1), op2
))
4104 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4105 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4108 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
4110 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
4111 ? GET_MODE (XEXP (op0
, 1))
4112 : GET_MODE (XEXP (op0
, 0)));
4115 /* Look for happy constants in op1 and op2. */
4116 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
4118 HOST_WIDE_INT t
= INTVAL (op1
);
4119 HOST_WIDE_INT f
= INTVAL (op2
);
4121 if (t
== STORE_FLAG_VALUE
&& f
== 0)
4122 code
= GET_CODE (op0
);
4123 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
4126 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
4134 return simplify_gen_relational (code
, mode
, cmp_mode
,
4135 XEXP (op0
, 0), XEXP (op0
, 1));
4138 if (cmp_mode
== VOIDmode
)
4139 cmp_mode
= op0_mode
;
4140 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
4141 cmp_mode
, XEXP (op0
, 0),
4144 /* See if any simplifications were possible. */
4147 if (GET_CODE (temp
) == CONST_INT
)
4148 return temp
== const0_rtx
? op2
: op1
;
4150 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
4156 gcc_assert (GET_MODE (op0
) == mode
);
4157 gcc_assert (GET_MODE (op1
) == mode
);
4158 gcc_assert (VECTOR_MODE_P (mode
));
4159 op2
= avoid_constant_pool_reference (op2
);
4160 if (GET_CODE (op2
) == CONST_INT
)
4162 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
4163 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
4164 int mask
= (1 << n_elts
) - 1;
4166 if (!(INTVAL (op2
) & mask
))
4168 if ((INTVAL (op2
) & mask
) == mask
)
4171 op0
= avoid_constant_pool_reference (op0
);
4172 op1
= avoid_constant_pool_reference (op1
);
4173 if (GET_CODE (op0
) == CONST_VECTOR
4174 && GET_CODE (op1
) == CONST_VECTOR
)
4176 rtvec v
= rtvec_alloc (n_elts
);
4179 for (i
= 0; i
< n_elts
; i
++)
4180 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
4181 ? CONST_VECTOR_ELT (op0
, i
)
4182 : CONST_VECTOR_ELT (op1
, i
));
4183 return gen_rtx_CONST_VECTOR (mode
, v
);
4195 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4196 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4198 Works by unpacking OP into a collection of 8-bit values
4199 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4200 and then repacking them again for OUTERMODE. */
4203 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
4204 enum machine_mode innermode
, unsigned int byte
)
4206 /* We support up to 512-bit values (for V8DFmode). */
4210 value_mask
= (1 << value_bit
) - 1
4212 unsigned char value
[max_bitsize
/ value_bit
];
4221 rtvec result_v
= NULL
;
4222 enum mode_class outer_class
;
4223 enum machine_mode outer_submode
;
4225 /* Some ports misuse CCmode. */
4226 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
4229 /* We have no way to represent a complex constant at the rtl level. */
4230 if (COMPLEX_MODE_P (outermode
))
4233 /* Unpack the value. */
4235 if (GET_CODE (op
) == CONST_VECTOR
)
4237 num_elem
= CONST_VECTOR_NUNITS (op
);
4238 elems
= &CONST_VECTOR_ELT (op
, 0);
4239 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
4245 elem_bitsize
= max_bitsize
;
4247 /* If this asserts, it is too complicated; reducing value_bit may help. */
4248 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
4249 /* I don't know how to handle endianness of sub-units. */
4250 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
4252 for (elem
= 0; elem
< num_elem
; elem
++)
4255 rtx el
= elems
[elem
];
4257 /* Vectors are kept in target memory order. (This is probably
4260 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4261 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4263 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4264 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4265 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4266 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4267 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4270 switch (GET_CODE (el
))
4274 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4276 *vp
++ = INTVAL (el
) >> i
;
4277 /* CONST_INTs are always logically sign-extended. */
4278 for (; i
< elem_bitsize
; i
+= value_bit
)
4279 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
4283 if (GET_MODE (el
) == VOIDmode
)
4285 /* If this triggers, someone should have generated a
4286 CONST_INT instead. */
4287 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
4289 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4290 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
4291 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
4294 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
4297 /* It shouldn't matter what's done here, so fill it with
4299 for (; i
< elem_bitsize
; i
+= value_bit
)
4304 long tmp
[max_bitsize
/ 32];
4305 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
4307 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
4308 gcc_assert (bitsize
<= elem_bitsize
);
4309 gcc_assert (bitsize
% value_bit
== 0);
4311 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
4314 /* real_to_target produces its result in words affected by
4315 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4316 and use WORDS_BIG_ENDIAN instead; see the documentation
4317 of SUBREG in rtl.texi. */
4318 for (i
= 0; i
< bitsize
; i
+= value_bit
)
4321 if (WORDS_BIG_ENDIAN
)
4322 ibase
= bitsize
- 1 - i
;
4325 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
4328 /* It shouldn't matter what's done here, so fill it with
4330 for (; i
< elem_bitsize
; i
+= value_bit
)
4340 /* Now, pick the right byte to start with. */
4341 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4342 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4343 will already have offset 0. */
4344 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
4346 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
4348 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4349 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4350 byte
= (subword_byte
% UNITS_PER_WORD
4351 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4354 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4355 so if it's become negative it will instead be very large.) */
4356 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4358 /* Convert from bytes to chunks of size value_bit. */
4359 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
4361 /* Re-pack the value. */
4363 if (VECTOR_MODE_P (outermode
))
4365 num_elem
= GET_MODE_NUNITS (outermode
);
4366 result_v
= rtvec_alloc (num_elem
);
4367 elems
= &RTVEC_ELT (result_v
, 0);
4368 outer_submode
= GET_MODE_INNER (outermode
);
4374 outer_submode
= outermode
;
4377 outer_class
= GET_MODE_CLASS (outer_submode
);
4378 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
4380 gcc_assert (elem_bitsize
% value_bit
== 0);
4381 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
4383 for (elem
= 0; elem
< num_elem
; elem
++)
4387 /* Vectors are stored in target memory order. (This is probably
4390 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4391 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4393 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4394 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4395 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4396 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4397 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4400 switch (outer_class
)
4403 case MODE_PARTIAL_INT
:
4405 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
4408 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4410 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
4411 for (; i
< elem_bitsize
; i
+= value_bit
)
4412 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
4413 << (i
- HOST_BITS_PER_WIDE_INT
));
4415 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4417 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4418 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
4419 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
4420 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
4427 case MODE_DECIMAL_FLOAT
:
4430 long tmp
[max_bitsize
/ 32];
4432 /* real_from_target wants its input in words affected by
4433 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4434 and use WORDS_BIG_ENDIAN instead; see the documentation
4435 of SUBREG in rtl.texi. */
4436 for (i
= 0; i
< max_bitsize
/ 32; i
++)
4438 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4441 if (WORDS_BIG_ENDIAN
)
4442 ibase
= elem_bitsize
- 1 - i
;
4445 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
4448 real_from_target (&r
, tmp
, outer_submode
);
4449 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
4457 if (VECTOR_MODE_P (outermode
))
4458 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
4463 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4464 Return 0 if no simplifications are possible. */
4466 simplify_subreg (enum machine_mode outermode
, rtx op
,
4467 enum machine_mode innermode
, unsigned int byte
)
4469 /* Little bit of sanity checking. */
4470 gcc_assert (innermode
!= VOIDmode
);
4471 gcc_assert (outermode
!= VOIDmode
);
4472 gcc_assert (innermode
!= BLKmode
);
4473 gcc_assert (outermode
!= BLKmode
);
4475 gcc_assert (GET_MODE (op
) == innermode
4476 || GET_MODE (op
) == VOIDmode
);
4478 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
4479 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4481 if (outermode
== innermode
&& !byte
)
4484 if (GET_CODE (op
) == CONST_INT
4485 || GET_CODE (op
) == CONST_DOUBLE
4486 || GET_CODE (op
) == CONST_VECTOR
)
4487 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
4489 /* Changing mode twice with SUBREG => just change it once,
4490 or not at all if changing back op starting mode. */
4491 if (GET_CODE (op
) == SUBREG
)
4493 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
4494 int final_offset
= byte
+ SUBREG_BYTE (op
);
4497 if (outermode
== innermostmode
4498 && byte
== 0 && SUBREG_BYTE (op
) == 0)
4499 return SUBREG_REG (op
);
4501 /* The SUBREG_BYTE represents offset, as if the value were stored
4502 in memory. Irritating exception is paradoxical subreg, where
4503 we define SUBREG_BYTE to be 0. On big endian machines, this
4504 value should be negative. For a moment, undo this exception. */
4505 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
4507 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
4508 if (WORDS_BIG_ENDIAN
)
4509 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4510 if (BYTES_BIG_ENDIAN
)
4511 final_offset
+= difference
% UNITS_PER_WORD
;
4513 if (SUBREG_BYTE (op
) == 0
4514 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
4516 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
4517 if (WORDS_BIG_ENDIAN
)
4518 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4519 if (BYTES_BIG_ENDIAN
)
4520 final_offset
+= difference
% UNITS_PER_WORD
;
4523 /* See whether resulting subreg will be paradoxical. */
4524 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
4526 /* In nonparadoxical subregs we can't handle negative offsets. */
4527 if (final_offset
< 0)
4529 /* Bail out in case resulting subreg would be incorrect. */
4530 if (final_offset
% GET_MODE_SIZE (outermode
)
4531 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
4537 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
4539 /* In paradoxical subreg, see if we are still looking on lower part.
4540 If so, our SUBREG_BYTE will be 0. */
4541 if (WORDS_BIG_ENDIAN
)
4542 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4543 if (BYTES_BIG_ENDIAN
)
4544 offset
+= difference
% UNITS_PER_WORD
;
4545 if (offset
== final_offset
)
4551 /* Recurse for further possible simplifications. */
4552 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
4556 if (validate_subreg (outermode
, innermostmode
,
4557 SUBREG_REG (op
), final_offset
))
4558 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
4562 /* Merge implicit and explicit truncations. */
4564 if (GET_CODE (op
) == TRUNCATE
4565 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
4566 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
4567 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
4568 GET_MODE (XEXP (op
, 0)));
4570 /* SUBREG of a hard register => just change the register number
4571 and/or mode. If the hard register is not valid in that mode,
4572 suppress this simplification. If the hard register is the stack,
4573 frame, or argument pointer, leave this as a SUBREG. */
4576 && REGNO (op
) < FIRST_PSEUDO_REGISTER
4577 #ifdef CANNOT_CHANGE_MODE_CLASS
4578 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
4579 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
4580 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
4582 && ((reload_completed
&& !frame_pointer_needed
)
4583 || (REGNO (op
) != FRAME_POINTER_REGNUM
4584 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4585 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
4588 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4589 && REGNO (op
) != ARG_POINTER_REGNUM
4591 && REGNO (op
) != STACK_POINTER_REGNUM
4592 && subreg_offset_representable_p (REGNO (op
), innermode
,
4595 unsigned int regno
= REGNO (op
);
4596 unsigned int final_regno
4597 = regno
+ subreg_regno_offset (regno
, innermode
, byte
, outermode
);
4599 /* ??? We do allow it if the current REG is not valid for
4600 its mode. This is a kludge to work around how float/complex
4601 arguments are passed on 32-bit SPARC and should be fixed. */
4602 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
4603 || ! HARD_REGNO_MODE_OK (regno
, innermode
))
4606 int final_offset
= byte
;
4608 /* Adjust offset for paradoxical subregs. */
4610 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
4612 int difference
= (GET_MODE_SIZE (innermode
)
4613 - GET_MODE_SIZE (outermode
));
4614 if (WORDS_BIG_ENDIAN
)
4615 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
4616 if (BYTES_BIG_ENDIAN
)
4617 final_offset
+= difference
% UNITS_PER_WORD
;
4620 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
4622 /* Propagate original regno. We don't have any way to specify
4623 the offset inside original regno, so do so only for lowpart.
4624 The information is used only by alias analysis that can not
4625 grog partial register anyway. */
4627 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
4628 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
4633 /* If we have a SUBREG of a register that we are replacing and we are
4634 replacing it with a MEM, make a new MEM and try replacing the
4635 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4636 or if we would be widening it. */
4639 && ! mode_dependent_address_p (XEXP (op
, 0))
4640 /* Allow splitting of volatile memory references in case we don't
4641 have instruction to move the whole thing. */
4642 && (! MEM_VOLATILE_P (op
)
4643 || ! have_insn_for (SET
, innermode
))
4644 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
4645 return adjust_address_nv (op
, outermode
, byte
);
4647 /* Handle complex values represented as CONCAT
4648 of real and imaginary part. */
4649 if (GET_CODE (op
) == CONCAT
)
4651 unsigned int part_size
, final_offset
;
4654 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
4655 if (byte
< part_size
)
4657 part
= XEXP (op
, 0);
4658 final_offset
= byte
;
4662 part
= XEXP (op
, 1);
4663 final_offset
= byte
- part_size
;
4666 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
4669 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
4672 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
4673 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
4677 /* Optimize SUBREG truncations of zero and sign extended values. */
4678 if ((GET_CODE (op
) == ZERO_EXTEND
4679 || GET_CODE (op
) == SIGN_EXTEND
)
4680 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
4682 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
4684 /* If we're requesting the lowpart of a zero or sign extension,
4685 there are three possibilities. If the outermode is the same
4686 as the origmode, we can omit both the extension and the subreg.
4687 If the outermode is not larger than the origmode, we can apply
4688 the truncation without the extension. Finally, if the outermode
4689 is larger than the origmode, but both are integer modes, we
4690 can just extend to the appropriate mode. */
4693 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
4694 if (outermode
== origmode
)
4695 return XEXP (op
, 0);
4696 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
4697 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
4698 subreg_lowpart_offset (outermode
,
4700 if (SCALAR_INT_MODE_P (outermode
))
4701 return simplify_gen_unary (GET_CODE (op
), outermode
,
4702 XEXP (op
, 0), origmode
);
4705 /* A SUBREG resulting from a zero extension may fold to zero if
4706 it extracts higher bits that the ZERO_EXTEND's source bits. */
4707 if (GET_CODE (op
) == ZERO_EXTEND
4708 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
4709 return CONST0_RTX (outermode
);
4712 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4713 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4714 the outer subreg is effectively a truncation to the original mode. */
4715 if ((GET_CODE (op
) == LSHIFTRT
4716 || GET_CODE (op
) == ASHIFTRT
)
4717 && SCALAR_INT_MODE_P (outermode
)
4718 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4719 to avoid the possibility that an outer LSHIFTRT shifts by more
4720 than the sign extension's sign_bit_copies and introduces zeros
4721 into the high bits of the result. */
4722 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
4723 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4724 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
4725 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4726 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4727 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4728 return simplify_gen_binary (ASHIFTRT
, outermode
,
4729 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4731 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4732 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4733 the outer subreg is effectively a truncation to the original mode. */
4734 if ((GET_CODE (op
) == LSHIFTRT
4735 || GET_CODE (op
) == ASHIFTRT
)
4736 && SCALAR_INT_MODE_P (outermode
)
4737 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4738 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4739 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4740 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4741 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4742 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4743 return simplify_gen_binary (LSHIFTRT
, outermode
,
4744 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4746 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4747 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4748 the outer subreg is effectively a truncation to the original mode. */
4749 if (GET_CODE (op
) == ASHIFT
4750 && SCALAR_INT_MODE_P (outermode
)
4751 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4752 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4753 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4754 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
4755 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4756 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4757 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4758 return simplify_gen_binary (ASHIFT
, outermode
,
4759 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4764 /* Make a SUBREG operation or equivalent if it folds. */
4767 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
4768 enum machine_mode innermode
, unsigned int byte
)
4772 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
4776 if (GET_CODE (op
) == SUBREG
4777 || GET_CODE (op
) == CONCAT
4778 || GET_MODE (op
) == VOIDmode
)
4781 if (validate_subreg (outermode
, innermode
, op
, byte
))
4782 return gen_rtx_SUBREG (outermode
, op
, byte
);
4787 /* Simplify X, an rtx expression.
4789 Return the simplified expression or NULL if no simplifications
4792 This is the preferred entry point into the simplification routines;
4793 however, we still allow passes to call the more specific routines.
4795 Right now GCC has three (yes, three) major bodies of RTL simplification
4796 code that need to be unified.
4798 1. fold_rtx in cse.c. This code uses various CSE specific
4799 information to aid in RTL simplification.
4801 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4802 it uses combine specific information to aid in RTL
4805 3. The routines in this file.
4808 Long term we want to only have one body of simplification code; to
4809 get to that state I recommend the following steps:
4811 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4812 which are not pass dependent state into these routines.
4814 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4815 use this routine whenever possible.
4817 3. Allow for pass dependent state to be provided to these
4818 routines and add simplifications based on the pass dependent
4819 state. Remove code from cse.c & combine.c that becomes
4822 It will take time, but ultimately the compiler will be easier to
4823 maintain and improve. It's totally silly that when we add a
4824 simplification that it needs to be added to 4 places (3 for RTL
4825 simplification and 1 for tree simplification. */
4828 simplify_rtx (rtx x
)
4830 enum rtx_code code
= GET_CODE (x
);
4831 enum machine_mode mode
= GET_MODE (x
);
4833 switch (GET_RTX_CLASS (code
))
4836 return simplify_unary_operation (code
, mode
,
4837 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
4838 case RTX_COMM_ARITH
:
4839 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
4840 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
4842 /* Fall through.... */
4845 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
4848 case RTX_BITFIELD_OPS
:
4849 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
4850 XEXP (x
, 0), XEXP (x
, 1),
4854 case RTX_COMM_COMPARE
:
4855 return simplify_relational_operation (code
, mode
,
4856 ((GET_MODE (XEXP (x
, 0))
4858 ? GET_MODE (XEXP (x
, 0))
4859 : GET_MODE (XEXP (x
, 1))),
4865 return simplify_subreg (mode
, SUBREG_REG (x
),
4866 GET_MODE (SUBREG_REG (x
)),
4873 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4874 if (GET_CODE (XEXP (x
, 0)) == HIGH
4875 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))