1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
53 static bool plus_minus_operand_p (const_rtx
);
54 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
61 enum machine_mode
, rtx
, rtx
);
62 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
63 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode
, const_rtx i
)
71 return gen_int_mode (- INTVAL (i
), mode
);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
80 unsigned HOST_WIDE_INT val
;
83 if (GET_MODE_CLASS (mode
) != MODE_INT
)
86 width
= GET_MODE_BITSIZE (mode
);
90 if (width
<= HOST_BITS_PER_WIDE_INT
93 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x
) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x
) == 0)
97 val
= CONST_DOUBLE_HIGH (x
);
98 width
-= HOST_BITS_PER_WIDE_INT
;
103 if (width
< HOST_BITS_PER_WIDE_INT
)
104 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
105 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
117 /* If this simplifies, do it. */
118 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0
, op1
))
125 tem
= op0
, op0
= op1
, op1
= tem
;
127 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x
)
136 enum machine_mode cmode
;
137 HOST_WIDE_INT offset
= 0;
139 switch (GET_CODE (x
))
145 /* Handle float extensions of constant pool references. */
147 c
= avoid_constant_pool_reference (tmp
);
148 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
152 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
161 if (GET_MODE (x
) == BLKmode
)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr
= targetm
.delegitimize_address (addr
);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr
) == CONST
171 && GET_CODE (XEXP (addr
, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
174 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
175 addr
= XEXP (XEXP (addr
, 0), 0);
178 if (GET_CODE (addr
) == LO_SUM
)
179 addr
= XEXP (addr
, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr
) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr
))
186 c
= get_pool_constant (addr
);
187 cmode
= get_pool_mode (addr
);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset
!= 0 || cmode
!= GET_MODE (x
))
194 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
195 if (tem
&& CONSTANT_P (tem
))
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
210 delegitimize_mem_from_attrs (rtx x
)
215 || GET_CODE (MEM_OFFSET (x
)) == CONST_INT
))
217 tree decl
= MEM_EXPR (x
);
218 enum machine_mode mode
= GET_MODE (x
);
219 HOST_WIDE_INT offset
= 0;
221 switch (TREE_CODE (decl
))
231 case ARRAY_RANGE_REF
:
236 case VIEW_CONVERT_EXPR
:
238 HOST_WIDE_INT bitsize
, bitpos
;
240 int unsignedp
= 0, volatilep
= 0;
242 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
243 &mode
, &unsignedp
, &volatilep
, false);
244 if (bitsize
!= GET_MODE_BITSIZE (mode
)
245 || (bitpos
% BITS_PER_UNIT
)
246 || (toffset
&& !host_integerp (toffset
, 0)))
250 offset
+= bitpos
/ BITS_PER_UNIT
;
252 offset
+= TREE_INT_CST_LOW (toffset
);
259 && mode
== GET_MODE (x
)
260 && TREE_CODE (decl
) == VAR_DECL
261 && (TREE_STATIC (decl
)
262 || DECL_THREAD_LOCAL_P (decl
))
263 && DECL_RTL_SET_P (decl
)
264 && MEM_P (DECL_RTL (decl
)))
269 offset
+= INTVAL (MEM_OFFSET (x
));
271 newx
= DECL_RTL (decl
);
275 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
284 || (GET_CODE (o
) == PLUS
285 && GET_CODE (XEXP (o
, 1)) == CONST_INT
286 && (offset
== INTVAL (XEXP (o
, 1))
287 || (GET_CODE (n
) == PLUS
288 && GET_CODE (XEXP (n
, 1)) == CONST_INT
289 && (INTVAL (XEXP (n
, 1)) + offset
290 == INTVAL (XEXP (o
, 1)))
291 && (n
= XEXP (n
, 0))))
292 && (o
= XEXP (o
, 0))))
293 && rtx_equal_p (o
, n
)))
294 x
= adjust_address_nv (newx
, mode
, offset
);
296 else if (GET_MODE (x
) == GET_MODE (newx
)
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
309 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
310 enum machine_mode op_mode
)
314 /* If this simplifies, use it. */
315 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
318 return gen_rtx_fmt_e (code
, mode
, op
);
321 /* Likewise for ternary operations. */
324 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
325 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
329 /* If this simplifies, use it. */
330 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
334 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
341 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
342 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
346 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
350 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
353 /* Replace all occurrences of OLD_RTX in X with FN (X', DATA), where X'
354 is an expression in X that is equal to OLD_RTX. Canonicalize and
357 If FN is null, assume FN (X', DATA) == copy_rtx (DATA). */
360 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
361 rtx (*fn
) (rtx
, void *), void *data
)
363 enum rtx_code code
= GET_CODE (x
);
364 enum machine_mode mode
= GET_MODE (x
);
365 enum machine_mode op_mode
;
367 rtx op0
, op1
, op2
, newx
, op
;
371 /* If X is OLD_RTX, return FN (X, DATA), with a null FN. Otherwise,
372 if this is an expression, try to build a new expression, substituting
373 recursively. If we can't do anything, return our input. */
375 if (rtx_equal_p (x
, old_rtx
))
380 return copy_rtx ((rtx
) data
);
383 switch (GET_RTX_CLASS (code
))
387 op_mode
= GET_MODE (op0
);
388 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
389 if (op0
== XEXP (x
, 0))
391 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
395 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
396 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
397 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
399 return simplify_gen_binary (code
, mode
, op0
, op1
);
402 case RTX_COMM_COMPARE
:
405 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
406 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
407 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
408 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
410 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
413 case RTX_BITFIELD_OPS
:
415 op_mode
= GET_MODE (op0
);
416 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
417 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
418 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
419 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
421 if (op_mode
== VOIDmode
)
422 op_mode
= GET_MODE (op0
);
423 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
428 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
429 if (op0
== SUBREG_REG (x
))
431 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
432 GET_MODE (SUBREG_REG (x
)),
434 return op0
? op0
: x
;
441 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
442 if (op0
== XEXP (x
, 0))
444 return replace_equiv_address_nv (x
, op0
);
446 else if (code
== LO_SUM
)
448 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
449 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
451 /* (lo_sum (high x) x) -> x */
452 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
455 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
457 return gen_rtx_LO_SUM (mode
, op0
, op1
);
466 fmt
= GET_RTX_FORMAT (code
);
467 for (i
= 0; fmt
[i
]; i
++)
472 newvec
= XVEC (newx
, i
);
473 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
475 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
477 if (op
!= RTVEC_ELT (vec
, j
))
481 newvec
= shallow_copy_rtvec (vec
);
483 newx
= shallow_copy_rtx (x
);
484 XVEC (newx
, i
) = newvec
;
486 RTVEC_ELT (newvec
, j
) = op
;
494 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
495 if (op
!= XEXP (x
, i
))
498 newx
= shallow_copy_rtx (x
);
507 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
508 resulting RTX. Return a new RTX which is as simplified as possible. */
511 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
513 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
516 /* Try to simplify a unary operation CODE whose output mode is to be
517 MODE with input operand OP whose mode was originally OP_MODE.
518 Return zero if no simplification can be made. */
520 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
521 rtx op
, enum machine_mode op_mode
)
525 trueop
= avoid_constant_pool_reference (op
);
527 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
531 return simplify_unary_operation_1 (code
, mode
, op
);
534 /* Perform some simplifications we can do even if the operands
537 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
539 enum rtx_code reversed
;
545 /* (not (not X)) == X. */
546 if (GET_CODE (op
) == NOT
)
549 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
550 comparison is all ones. */
551 if (COMPARISON_P (op
)
552 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
553 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
554 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
555 XEXP (op
, 0), XEXP (op
, 1));
557 /* (not (plus X -1)) can become (neg X). */
558 if (GET_CODE (op
) == PLUS
559 && XEXP (op
, 1) == constm1_rtx
)
560 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
562 /* Similarly, (not (neg X)) is (plus X -1). */
563 if (GET_CODE (op
) == NEG
)
564 return plus_constant (XEXP (op
, 0), -1);
566 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
567 if (GET_CODE (op
) == XOR
568 && CONST_INT_P (XEXP (op
, 1))
569 && (temp
= simplify_unary_operation (NOT
, mode
,
570 XEXP (op
, 1), mode
)) != 0)
571 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
573 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
574 if (GET_CODE (op
) == PLUS
575 && CONST_INT_P (XEXP (op
, 1))
576 && mode_signbit_p (mode
, XEXP (op
, 1))
577 && (temp
= simplify_unary_operation (NOT
, mode
,
578 XEXP (op
, 1), mode
)) != 0)
579 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
582 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
583 operands other than 1, but that is not valid. We could do a
584 similar simplification for (not (lshiftrt C X)) where C is
585 just the sign bit, but this doesn't seem common enough to
587 if (GET_CODE (op
) == ASHIFT
588 && XEXP (op
, 0) == const1_rtx
)
590 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
591 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
594 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
595 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
596 so we can perform the above simplification. */
598 if (STORE_FLAG_VALUE
== -1
599 && GET_CODE (op
) == ASHIFTRT
600 && GET_CODE (XEXP (op
, 1))
601 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
602 return simplify_gen_relational (GE
, mode
, VOIDmode
,
603 XEXP (op
, 0), const0_rtx
);
606 if (GET_CODE (op
) == SUBREG
607 && subreg_lowpart_p (op
)
608 && (GET_MODE_SIZE (GET_MODE (op
))
609 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
610 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
611 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
613 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
616 x
= gen_rtx_ROTATE (inner_mode
,
617 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
619 XEXP (SUBREG_REG (op
), 1));
620 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
623 /* Apply De Morgan's laws to reduce number of patterns for machines
624 with negating logical insns (and-not, nand, etc.). If result has
625 only one NOT, put it first, since that is how the patterns are
628 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
630 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
631 enum machine_mode op_mode
;
633 op_mode
= GET_MODE (in1
);
634 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
636 op_mode
= GET_MODE (in2
);
637 if (op_mode
== VOIDmode
)
639 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
641 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
644 in2
= in1
; in1
= tem
;
647 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
653 /* (neg (neg X)) == X. */
654 if (GET_CODE (op
) == NEG
)
657 /* (neg (plus X 1)) can become (not X). */
658 if (GET_CODE (op
) == PLUS
659 && XEXP (op
, 1) == const1_rtx
)
660 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
662 /* Similarly, (neg (not X)) is (plus X 1). */
663 if (GET_CODE (op
) == NOT
)
664 return plus_constant (XEXP (op
, 0), 1);
666 /* (neg (minus X Y)) can become (minus Y X). This transformation
667 isn't safe for modes with signed zeros, since if X and Y are
668 both +0, (minus Y X) is the same as (minus X Y). If the
669 rounding mode is towards +infinity (or -infinity) then the two
670 expressions will be rounded differently. */
671 if (GET_CODE (op
) == MINUS
672 && !HONOR_SIGNED_ZEROS (mode
)
673 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
674 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
676 if (GET_CODE (op
) == PLUS
677 && !HONOR_SIGNED_ZEROS (mode
)
678 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
680 /* (neg (plus A C)) is simplified to (minus -C A). */
681 if (CONST_INT_P (XEXP (op
, 1))
682 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
684 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
686 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
689 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
690 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
691 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
694 /* (neg (mult A B)) becomes (mult (neg A) B).
695 This works even for floating-point values. */
696 if (GET_CODE (op
) == MULT
697 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
699 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
700 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
703 /* NEG commutes with ASHIFT since it is multiplication. Only do
704 this if we can then eliminate the NEG (e.g., if the operand
706 if (GET_CODE (op
) == ASHIFT
)
708 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
710 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
713 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
714 C is equal to the width of MODE minus 1. */
715 if (GET_CODE (op
) == ASHIFTRT
716 && CONST_INT_P (XEXP (op
, 1))
717 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
718 return simplify_gen_binary (LSHIFTRT
, mode
,
719 XEXP (op
, 0), XEXP (op
, 1));
721 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
722 C is equal to the width of MODE minus 1. */
723 if (GET_CODE (op
) == LSHIFTRT
724 && CONST_INT_P (XEXP (op
, 1))
725 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
726 return simplify_gen_binary (ASHIFTRT
, mode
,
727 XEXP (op
, 0), XEXP (op
, 1));
729 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
730 if (GET_CODE (op
) == XOR
731 && XEXP (op
, 1) == const1_rtx
732 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
733 return plus_constant (XEXP (op
, 0), -1);
735 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
736 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
737 if (GET_CODE (op
) == LT
738 && XEXP (op
, 1) == const0_rtx
739 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
741 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
742 int isize
= GET_MODE_BITSIZE (inner
);
743 if (STORE_FLAG_VALUE
== 1)
745 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
746 GEN_INT (isize
- 1));
749 if (GET_MODE_BITSIZE (mode
) > isize
)
750 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
751 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
753 else if (STORE_FLAG_VALUE
== -1)
755 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
756 GEN_INT (isize
- 1));
759 if (GET_MODE_BITSIZE (mode
) > isize
)
760 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
761 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
767 /* We can't handle truncation to a partial integer mode here
768 because we don't know the real bitsize of the partial
770 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
773 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
774 if ((GET_CODE (op
) == SIGN_EXTEND
775 || GET_CODE (op
) == ZERO_EXTEND
)
776 && GET_MODE (XEXP (op
, 0)) == mode
)
779 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
780 (OP:SI foo:SI) if OP is NEG or ABS. */
781 if ((GET_CODE (op
) == ABS
782 || GET_CODE (op
) == NEG
)
783 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
784 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
785 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
786 return simplify_gen_unary (GET_CODE (op
), mode
,
787 XEXP (XEXP (op
, 0), 0), mode
);
789 /* (truncate:A (subreg:B (truncate:C X) 0)) is
791 if (GET_CODE (op
) == SUBREG
792 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
793 && subreg_lowpart_p (op
))
794 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
795 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
797 /* If we know that the value is already truncated, we can
798 replace the TRUNCATE with a SUBREG. Note that this is also
799 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
800 modes we just have to apply a different definition for
801 truncation. But don't do this for an (LSHIFTRT (MULT ...))
802 since this will cause problems with the umulXi3_highpart
804 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
805 GET_MODE_BITSIZE (GET_MODE (op
)))
806 ? (num_sign_bit_copies (op
, GET_MODE (op
))
807 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op
))
808 - GET_MODE_BITSIZE (mode
)))
809 : truncated_to_mode (mode
, op
))
810 && ! (GET_CODE (op
) == LSHIFTRT
811 && GET_CODE (XEXP (op
, 0)) == MULT
))
812 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
814 /* A truncate of a comparison can be replaced with a subreg if
815 STORE_FLAG_VALUE permits. This is like the previous test,
816 but it works even if the comparison is done in a mode larger
817 than HOST_BITS_PER_WIDE_INT. */
818 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
820 && ((HOST_WIDE_INT
) STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
821 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
825 if (DECIMAL_FLOAT_MODE_P (mode
))
828 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
829 if (GET_CODE (op
) == FLOAT_EXTEND
830 && GET_MODE (XEXP (op
, 0)) == mode
)
833 /* (float_truncate:SF (float_truncate:DF foo:XF))
834 = (float_truncate:SF foo:XF).
835 This may eliminate double rounding, so it is unsafe.
837 (float_truncate:SF (float_extend:XF foo:DF))
838 = (float_truncate:SF foo:DF).
840 (float_truncate:DF (float_extend:XF foo:SF))
841 = (float_extend:SF foo:DF). */
842 if ((GET_CODE (op
) == FLOAT_TRUNCATE
843 && flag_unsafe_math_optimizations
)
844 || GET_CODE (op
) == FLOAT_EXTEND
)
845 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
847 > GET_MODE_SIZE (mode
)
848 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
852 /* (float_truncate (float x)) is (float x) */
853 if (GET_CODE (op
) == FLOAT
854 && (flag_unsafe_math_optimizations
855 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
856 && ((unsigned)significand_size (GET_MODE (op
))
857 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
858 - num_sign_bit_copies (XEXP (op
, 0),
859 GET_MODE (XEXP (op
, 0))))))))
860 return simplify_gen_unary (FLOAT
, mode
,
862 GET_MODE (XEXP (op
, 0)));
864 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
865 (OP:SF foo:SF) if OP is NEG or ABS. */
866 if ((GET_CODE (op
) == ABS
867 || GET_CODE (op
) == NEG
)
868 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
869 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
870 return simplify_gen_unary (GET_CODE (op
), mode
,
871 XEXP (XEXP (op
, 0), 0), mode
);
873 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
874 is (float_truncate:SF x). */
875 if (GET_CODE (op
) == SUBREG
876 && subreg_lowpart_p (op
)
877 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
878 return SUBREG_REG (op
);
882 if (DECIMAL_FLOAT_MODE_P (mode
))
885 /* (float_extend (float_extend x)) is (float_extend x)
887 (float_extend (float x)) is (float x) assuming that double
888 rounding can't happen.
890 if (GET_CODE (op
) == FLOAT_EXTEND
891 || (GET_CODE (op
) == FLOAT
892 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
893 && ((unsigned)significand_size (GET_MODE (op
))
894 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
895 - num_sign_bit_copies (XEXP (op
, 0),
896 GET_MODE (XEXP (op
, 0)))))))
897 return simplify_gen_unary (GET_CODE (op
), mode
,
899 GET_MODE (XEXP (op
, 0)));
904 /* (abs (neg <foo>)) -> (abs <foo>) */
905 if (GET_CODE (op
) == NEG
)
906 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
907 GET_MODE (XEXP (op
, 0)));
909 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
911 if (GET_MODE (op
) == VOIDmode
)
914 /* If operand is something known to be positive, ignore the ABS. */
915 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
916 || ((GET_MODE_BITSIZE (GET_MODE (op
))
917 <= HOST_BITS_PER_WIDE_INT
)
918 && ((nonzero_bits (op
, GET_MODE (op
))
920 << (GET_MODE_BITSIZE (GET_MODE (op
)) - 1)))
924 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
925 if (num_sign_bit_copies (op
, mode
) == GET_MODE_BITSIZE (mode
))
926 return gen_rtx_NEG (mode
, op
);
931 /* (ffs (*_extend <X>)) = (ffs <X>) */
932 if (GET_CODE (op
) == SIGN_EXTEND
933 || GET_CODE (op
) == ZERO_EXTEND
)
934 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
935 GET_MODE (XEXP (op
, 0)));
939 switch (GET_CODE (op
))
943 /* (popcount (zero_extend <X>)) = (popcount <X>) */
944 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
945 GET_MODE (XEXP (op
, 0)));
949 /* Rotations don't affect popcount. */
950 if (!side_effects_p (XEXP (op
, 1)))
951 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
952 GET_MODE (XEXP (op
, 0)));
961 switch (GET_CODE (op
))
967 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
968 GET_MODE (XEXP (op
, 0)));
972 /* Rotations don't affect parity. */
973 if (!side_effects_p (XEXP (op
, 1)))
974 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
975 GET_MODE (XEXP (op
, 0)));
984 /* (bswap (bswap x)) -> x. */
985 if (GET_CODE (op
) == BSWAP
)
990 /* (float (sign_extend <X>)) = (float <X>). */
991 if (GET_CODE (op
) == SIGN_EXTEND
)
992 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
993 GET_MODE (XEXP (op
, 0)));
997 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
998 becomes just the MINUS if its mode is MODE. This allows
999 folding switch statements on machines using casesi (such as
1001 if (GET_CODE (op
) == TRUNCATE
1002 && GET_MODE (XEXP (op
, 0)) == mode
1003 && GET_CODE (XEXP (op
, 0)) == MINUS
1004 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1005 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1006 return XEXP (op
, 0);
1008 /* Check for a sign extension of a subreg of a promoted
1009 variable, where the promotion is sign-extended, and the
1010 target mode is the same as the variable's promotion. */
1011 if (GET_CODE (op
) == SUBREG
1012 && SUBREG_PROMOTED_VAR_P (op
)
1013 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1014 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1015 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1017 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1018 /* As we do not know which address space the pointer is refering to,
1019 we can do this only if the target does not support different pointer
1020 or address modes depending on the address space. */
1021 if (target_default_pointer_address_modes_p ()
1022 && ! POINTERS_EXTEND_UNSIGNED
1023 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1025 || (GET_CODE (op
) == SUBREG
1026 && REG_P (SUBREG_REG (op
))
1027 && REG_POINTER (SUBREG_REG (op
))
1028 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1029 return convert_memory_address (Pmode
, op
);
1034 /* Check for a zero extension of a subreg of a promoted
1035 variable, where the promotion is zero-extended, and the
1036 target mode is the same as the variable's promotion. */
1037 if (GET_CODE (op
) == SUBREG
1038 && SUBREG_PROMOTED_VAR_P (op
)
1039 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1040 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1041 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1043 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1044 /* As we do not know which address space the pointer is refering to,
1045 we can do this only if the target does not support different pointer
1046 or address modes depending on the address space. */
1047 if (target_default_pointer_address_modes_p ()
1048 && POINTERS_EXTEND_UNSIGNED
> 0
1049 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1051 || (GET_CODE (op
) == SUBREG
1052 && REG_P (SUBREG_REG (op
))
1053 && REG_POINTER (SUBREG_REG (op
))
1054 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1055 return convert_memory_address (Pmode
, op
);
1066 /* Try to compute the value of a unary operation CODE whose output mode is to
1067 be MODE with input operand OP whose mode was originally OP_MODE.
1068 Return zero if the value cannot be computed. */
1070 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1071 rtx op
, enum machine_mode op_mode
)
1073 unsigned int width
= GET_MODE_BITSIZE (mode
);
1075 if (code
== VEC_DUPLICATE
)
1077 gcc_assert (VECTOR_MODE_P (mode
));
1078 if (GET_MODE (op
) != VOIDmode
)
1080 if (!VECTOR_MODE_P (GET_MODE (op
)))
1081 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1083 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1086 if (CONST_INT_P (op
) || GET_CODE (op
) == CONST_DOUBLE
1087 || GET_CODE (op
) == CONST_VECTOR
)
1089 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1090 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1091 rtvec v
= rtvec_alloc (n_elts
);
1094 if (GET_CODE (op
) != CONST_VECTOR
)
1095 for (i
= 0; i
< n_elts
; i
++)
1096 RTVEC_ELT (v
, i
) = op
;
1099 enum machine_mode inmode
= GET_MODE (op
);
1100 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1101 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1103 gcc_assert (in_n_elts
< n_elts
);
1104 gcc_assert ((n_elts
% in_n_elts
) == 0);
1105 for (i
= 0; i
< n_elts
; i
++)
1106 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1108 return gen_rtx_CONST_VECTOR (mode
, v
);
1112 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1114 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1115 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1116 enum machine_mode opmode
= GET_MODE (op
);
1117 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1118 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1119 rtvec v
= rtvec_alloc (n_elts
);
1122 gcc_assert (op_n_elts
== n_elts
);
1123 for (i
= 0; i
< n_elts
; i
++)
1125 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1126 CONST_VECTOR_ELT (op
, i
),
1127 GET_MODE_INNER (opmode
));
1130 RTVEC_ELT (v
, i
) = x
;
1132 return gen_rtx_CONST_VECTOR (mode
, v
);
1135 /* The order of these tests is critical so that, for example, we don't
1136 check the wrong mode (input vs. output) for a conversion operation,
1137 such as FIX. At some point, this should be simplified. */
1139 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
1140 && (GET_CODE (op
) == CONST_DOUBLE
|| CONST_INT_P (op
)))
1142 HOST_WIDE_INT hv
, lv
;
1145 if (CONST_INT_P (op
))
1146 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1148 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1150 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1151 d
= real_value_truncate (mode
, d
);
1152 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1154 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
1155 && (GET_CODE (op
) == CONST_DOUBLE
1156 || CONST_INT_P (op
)))
1158 HOST_WIDE_INT hv
, lv
;
1161 if (CONST_INT_P (op
))
1162 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1164 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1166 if (op_mode
== VOIDmode
)
1168 /* We don't know how to interpret negative-looking numbers in
1169 this case, so don't try to fold those. */
1173 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
1176 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1178 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1179 d
= real_value_truncate (mode
, d
);
1180 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1183 if (CONST_INT_P (op
)
1184 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1186 HOST_WIDE_INT arg0
= INTVAL (op
);
1200 val
= (arg0
>= 0 ? arg0
: - arg0
);
1204 /* Don't use ffs here. Instead, get low order bit and then its
1205 number. If arg0 is zero, this will return 0, as desired. */
1206 arg0
&= GET_MODE_MASK (mode
);
1207 val
= exact_log2 (arg0
& (- arg0
)) + 1;
1211 arg0
&= GET_MODE_MASK (mode
);
1212 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1215 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
1219 arg0
&= GET_MODE_MASK (mode
);
1222 /* Even if the value at zero is undefined, we have to come
1223 up with some replacement. Seems good enough. */
1224 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1225 val
= GET_MODE_BITSIZE (mode
);
1228 val
= exact_log2 (arg0
& -arg0
);
1232 arg0
&= GET_MODE_MASK (mode
);
1235 val
++, arg0
&= arg0
- 1;
1239 arg0
&= GET_MODE_MASK (mode
);
1242 val
++, arg0
&= arg0
- 1;
1251 for (s
= 0; s
< width
; s
+= 8)
1253 unsigned int d
= width
- s
- 8;
1254 unsigned HOST_WIDE_INT byte
;
1255 byte
= (arg0
>> s
) & 0xff;
1266 /* When zero-extending a CONST_INT, we need to know its
1268 gcc_assert (op_mode
!= VOIDmode
);
1269 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1271 /* If we were really extending the mode,
1272 we would have to distinguish between zero-extension
1273 and sign-extension. */
1274 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1277 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1278 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1284 if (op_mode
== VOIDmode
)
1286 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1288 /* If we were really extending the mode,
1289 we would have to distinguish between zero-extension
1290 and sign-extension. */
1291 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1294 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1297 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1299 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
1300 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1308 case FLOAT_TRUNCATE
:
1320 return gen_int_mode (val
, mode
);
1323 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1324 for a DImode operation on a CONST_INT. */
1325 else if (GET_MODE (op
) == VOIDmode
1326 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1327 && (GET_CODE (op
) == CONST_DOUBLE
1328 || CONST_INT_P (op
)))
1330 unsigned HOST_WIDE_INT l1
, lv
;
1331 HOST_WIDE_INT h1
, hv
;
1333 if (GET_CODE (op
) == CONST_DOUBLE
)
1334 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1336 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1346 neg_double (l1
, h1
, &lv
, &hv
);
1351 neg_double (l1
, h1
, &lv
, &hv
);
1363 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
1366 lv
= exact_log2 (l1
& -l1
) + 1;
1372 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
1373 - HOST_BITS_PER_WIDE_INT
;
1375 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
1376 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1377 lv
= GET_MODE_BITSIZE (mode
);
1383 lv
= exact_log2 (l1
& -l1
);
1385 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
1386 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1387 lv
= GET_MODE_BITSIZE (mode
);
1415 for (s
= 0; s
< width
; s
+= 8)
1417 unsigned int d
= width
- s
- 8;
1418 unsigned HOST_WIDE_INT byte
;
1420 if (s
< HOST_BITS_PER_WIDE_INT
)
1421 byte
= (l1
>> s
) & 0xff;
1423 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1425 if (d
< HOST_BITS_PER_WIDE_INT
)
1428 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1434 /* This is just a change-of-mode, so do nothing. */
1439 gcc_assert (op_mode
!= VOIDmode
);
1441 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1445 lv
= l1
& GET_MODE_MASK (op_mode
);
1449 if (op_mode
== VOIDmode
1450 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1454 lv
= l1
& GET_MODE_MASK (op_mode
);
1455 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
1456 && (lv
& ((HOST_WIDE_INT
) 1
1457 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
1458 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1460 hv
= HWI_SIGN_EXTEND (lv
);
1471 return immed_double_const (lv
, hv
, mode
);
1474 else if (GET_CODE (op
) == CONST_DOUBLE
1475 && SCALAR_FLOAT_MODE_P (mode
))
1477 REAL_VALUE_TYPE d
, t
;
1478 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1483 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1485 real_sqrt (&t
, mode
, &d
);
1489 d
= REAL_VALUE_ABS (d
);
1492 d
= REAL_VALUE_NEGATE (d
);
1494 case FLOAT_TRUNCATE
:
1495 d
= real_value_truncate (mode
, d
);
1498 /* All this does is change the mode. */
1501 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1508 real_to_target (tmp
, &d
, GET_MODE (op
));
1509 for (i
= 0; i
< 4; i
++)
1511 real_from_target (&d
, tmp
, mode
);
1517 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1520 else if (GET_CODE (op
) == CONST_DOUBLE
1521 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1522 && GET_MODE_CLASS (mode
) == MODE_INT
1523 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1525 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1526 operators are intentionally left unspecified (to ease implementation
1527 by target backends), for consistency, this routine implements the
1528 same semantics for constant folding as used by the middle-end. */
1530 /* This was formerly used only for non-IEEE float.
1531 eggert@twinsun.com says it is safe for IEEE also. */
1532 HOST_WIDE_INT xh
, xl
, th
, tl
;
1533 REAL_VALUE_TYPE x
, t
;
1534 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1538 if (REAL_VALUE_ISNAN (x
))
1541 /* Test against the signed upper bound. */
1542 if (width
> HOST_BITS_PER_WIDE_INT
)
1544 th
= ((unsigned HOST_WIDE_INT
) 1
1545 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1551 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1553 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1554 if (REAL_VALUES_LESS (t
, x
))
1561 /* Test against the signed lower bound. */
1562 if (width
> HOST_BITS_PER_WIDE_INT
)
1564 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1570 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1572 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1573 if (REAL_VALUES_LESS (x
, t
))
1579 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1583 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1586 /* Test against the unsigned upper bound. */
1587 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1592 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1594 th
= ((unsigned HOST_WIDE_INT
) 1
1595 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1601 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1603 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1604 if (REAL_VALUES_LESS (t
, x
))
1611 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1617 return immed_double_const (xl
, xh
, mode
);
1623 /* Subroutine of simplify_binary_operation to simplify a commutative,
1624 associative binary operation CODE with result mode MODE, operating
1625 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1626 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1627 canonicalization is possible. */
1630 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1635 /* Linearize the operator to the left. */
1636 if (GET_CODE (op1
) == code
)
1638 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1639 if (GET_CODE (op0
) == code
)
1641 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1642 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1645 /* "a op (b op c)" becomes "(b op c) op a". */
1646 if (! swap_commutative_operands_p (op1
, op0
))
1647 return simplify_gen_binary (code
, mode
, op1
, op0
);
1654 if (GET_CODE (op0
) == code
)
1656 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1657 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1659 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1660 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1663 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1664 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1666 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1668 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1669 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1671 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1678 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1679 and OP1. Return 0 if no simplification is possible.
1681 Don't use this for relational operations such as EQ or LT.
1682 Use simplify_relational_operation instead. */
1684 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1687 rtx trueop0
, trueop1
;
1690 /* Relational operations don't work here. We must know the mode
1691 of the operands in order to do the comparison correctly.
1692 Assuming a full word can give incorrect results.
1693 Consider comparing 128 with -128 in QImode. */
1694 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1695 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1697 /* Make sure the constant is second. */
1698 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1699 && swap_commutative_operands_p (op0
, op1
))
1701 tem
= op0
, op0
= op1
, op1
= tem
;
1704 trueop0
= avoid_constant_pool_reference (op0
);
1705 trueop1
= avoid_constant_pool_reference (op1
);
1707 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1710 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1713 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1714 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1715 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1716 actual constants. */
1719 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1720 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1722 rtx tem
, reversed
, opleft
, opright
;
1724 unsigned int width
= GET_MODE_BITSIZE (mode
);
1726 /* Even if we can't compute a constant result,
1727 there are some cases worth simplifying. */
1732 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1733 when x is NaN, infinite, or finite and nonzero. They aren't
1734 when x is -0 and the rounding mode is not towards -infinity,
1735 since (-0) + 0 is then 0. */
1736 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1739 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1740 transformations are safe even for IEEE. */
1741 if (GET_CODE (op0
) == NEG
)
1742 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1743 else if (GET_CODE (op1
) == NEG
)
1744 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1746 /* (~a) + 1 -> -a */
1747 if (INTEGRAL_MODE_P (mode
)
1748 && GET_CODE (op0
) == NOT
1749 && trueop1
== const1_rtx
)
1750 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1752 /* Handle both-operands-constant cases. We can only add
1753 CONST_INTs to constants since the sum of relocatable symbols
1754 can't be handled by most assemblers. Don't add CONST_INT
1755 to CONST_INT since overflow won't be computed properly if wider
1756 than HOST_BITS_PER_WIDE_INT. */
1758 if ((GET_CODE (op0
) == CONST
1759 || GET_CODE (op0
) == SYMBOL_REF
1760 || GET_CODE (op0
) == LABEL_REF
)
1761 && CONST_INT_P (op1
))
1762 return plus_constant (op0
, INTVAL (op1
));
1763 else if ((GET_CODE (op1
) == CONST
1764 || GET_CODE (op1
) == SYMBOL_REF
1765 || GET_CODE (op1
) == LABEL_REF
)
1766 && CONST_INT_P (op0
))
1767 return plus_constant (op1
, INTVAL (op0
));
1769 /* See if this is something like X * C - X or vice versa or
1770 if the multiplication is written as a shift. If so, we can
1771 distribute and make a new multiply, shift, or maybe just
1772 have X (if C is 2 in the example above). But don't make
1773 something more expensive than we had before. */
1775 if (SCALAR_INT_MODE_P (mode
))
1777 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1778 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1779 rtx lhs
= op0
, rhs
= op1
;
1781 if (GET_CODE (lhs
) == NEG
)
1785 lhs
= XEXP (lhs
, 0);
1787 else if (GET_CODE (lhs
) == MULT
1788 && CONST_INT_P (XEXP (lhs
, 1)))
1790 coeff0l
= INTVAL (XEXP (lhs
, 1));
1791 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1792 lhs
= XEXP (lhs
, 0);
1794 else if (GET_CODE (lhs
) == ASHIFT
1795 && CONST_INT_P (XEXP (lhs
, 1))
1796 && INTVAL (XEXP (lhs
, 1)) >= 0
1797 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1799 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1801 lhs
= XEXP (lhs
, 0);
1804 if (GET_CODE (rhs
) == NEG
)
1808 rhs
= XEXP (rhs
, 0);
1810 else if (GET_CODE (rhs
) == MULT
1811 && CONST_INT_P (XEXP (rhs
, 1)))
1813 coeff1l
= INTVAL (XEXP (rhs
, 1));
1814 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1815 rhs
= XEXP (rhs
, 0);
1817 else if (GET_CODE (rhs
) == ASHIFT
1818 && CONST_INT_P (XEXP (rhs
, 1))
1819 && INTVAL (XEXP (rhs
, 1)) >= 0
1820 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1822 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1824 rhs
= XEXP (rhs
, 0);
1827 if (rtx_equal_p (lhs
, rhs
))
1829 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1831 unsigned HOST_WIDE_INT l
;
1833 bool speed
= optimize_function_for_speed_p (cfun
);
1835 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1836 coeff
= immed_double_const (l
, h
, mode
);
1838 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1839 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
1844 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1845 if ((CONST_INT_P (op1
)
1846 || GET_CODE (op1
) == CONST_DOUBLE
)
1847 && GET_CODE (op0
) == XOR
1848 && (CONST_INT_P (XEXP (op0
, 1))
1849 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1850 && mode_signbit_p (mode
, op1
))
1851 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1852 simplify_gen_binary (XOR
, mode
, op1
,
1855 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1856 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1857 && GET_CODE (op0
) == MULT
1858 && GET_CODE (XEXP (op0
, 0)) == NEG
)
1862 in1
= XEXP (XEXP (op0
, 0), 0);
1863 in2
= XEXP (op0
, 1);
1864 return simplify_gen_binary (MINUS
, mode
, op1
,
1865 simplify_gen_binary (MULT
, mode
,
1869 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1870 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1872 if (COMPARISON_P (op0
)
1873 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
1874 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
1875 && (reversed
= reversed_comparison (op0
, mode
)))
1877 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
1879 /* If one of the operands is a PLUS or a MINUS, see if we can
1880 simplify this by the associative law.
1881 Don't use the associative law for floating point.
1882 The inaccuracy makes it nonassociative,
1883 and subtle programs can break if operations are associated. */
1885 if (INTEGRAL_MODE_P (mode
)
1886 && (plus_minus_operand_p (op0
)
1887 || plus_minus_operand_p (op1
))
1888 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1891 /* Reassociate floating point addition only when the user
1892 specifies associative math operations. */
1893 if (FLOAT_MODE_P (mode
)
1894 && flag_associative_math
)
1896 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1903 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1904 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1905 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1906 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1908 rtx xop00
= XEXP (op0
, 0);
1909 rtx xop10
= XEXP (op1
, 0);
1912 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1914 if (REG_P (xop00
) && REG_P (xop10
)
1915 && GET_MODE (xop00
) == GET_MODE (xop10
)
1916 && REGNO (xop00
) == REGNO (xop10
)
1917 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1918 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1925 /* We can't assume x-x is 0 even with non-IEEE floating point,
1926 but since it is zero except in very strange circumstances, we
1927 will treat it as zero with -ffinite-math-only. */
1928 if (rtx_equal_p (trueop0
, trueop1
)
1929 && ! side_effects_p (op0
)
1930 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
1931 return CONST0_RTX (mode
);
1933 /* Change subtraction from zero into negation. (0 - x) is the
1934 same as -x when x is NaN, infinite, or finite and nonzero.
1935 But if the mode has signed zeros, and does not round towards
1936 -infinity, then 0 - 0 is 0, not -0. */
1937 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1938 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1940 /* (-1 - a) is ~a. */
1941 if (trueop0
== constm1_rtx
)
1942 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1944 /* Subtracting 0 has no effect unless the mode has signed zeros
1945 and supports rounding towards -infinity. In such a case,
1947 if (!(HONOR_SIGNED_ZEROS (mode
)
1948 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1949 && trueop1
== CONST0_RTX (mode
))
1952 /* See if this is something like X * C - X or vice versa or
1953 if the multiplication is written as a shift. If so, we can
1954 distribute and make a new multiply, shift, or maybe just
1955 have X (if C is 2 in the example above). But don't make
1956 something more expensive than we had before. */
1958 if (SCALAR_INT_MODE_P (mode
))
1960 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1961 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1962 rtx lhs
= op0
, rhs
= op1
;
1964 if (GET_CODE (lhs
) == NEG
)
1968 lhs
= XEXP (lhs
, 0);
1970 else if (GET_CODE (lhs
) == MULT
1971 && CONST_INT_P (XEXP (lhs
, 1)))
1973 coeff0l
= INTVAL (XEXP (lhs
, 1));
1974 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1975 lhs
= XEXP (lhs
, 0);
1977 else if (GET_CODE (lhs
) == ASHIFT
1978 && CONST_INT_P (XEXP (lhs
, 1))
1979 && INTVAL (XEXP (lhs
, 1)) >= 0
1980 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1982 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1984 lhs
= XEXP (lhs
, 0);
1987 if (GET_CODE (rhs
) == NEG
)
1991 rhs
= XEXP (rhs
, 0);
1993 else if (GET_CODE (rhs
) == MULT
1994 && CONST_INT_P (XEXP (rhs
, 1)))
1996 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1997 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1998 rhs
= XEXP (rhs
, 0);
2000 else if (GET_CODE (rhs
) == ASHIFT
2001 && CONST_INT_P (XEXP (rhs
, 1))
2002 && INTVAL (XEXP (rhs
, 1)) >= 0
2003 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2005 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
2007 rhs
= XEXP (rhs
, 0);
2010 if (rtx_equal_p (lhs
, rhs
))
2012 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2014 unsigned HOST_WIDE_INT l
;
2016 bool speed
= optimize_function_for_speed_p (cfun
);
2018 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
2019 coeff
= immed_double_const (l
, h
, mode
);
2021 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2022 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
2027 /* (a - (-b)) -> (a + b). True even for IEEE. */
2028 if (GET_CODE (op1
) == NEG
)
2029 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2031 /* (-x - c) may be simplified as (-c - x). */
2032 if (GET_CODE (op0
) == NEG
2033 && (CONST_INT_P (op1
)
2034 || GET_CODE (op1
) == CONST_DOUBLE
))
2036 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2038 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2041 /* Don't let a relocatable value get a negative coeff. */
2042 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2043 return simplify_gen_binary (PLUS
, mode
,
2045 neg_const_int (mode
, op1
));
2047 /* (x - (x & y)) -> (x & ~y) */
2048 if (GET_CODE (op1
) == AND
)
2050 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2052 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2053 GET_MODE (XEXP (op1
, 1)));
2054 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2056 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2058 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2059 GET_MODE (XEXP (op1
, 0)));
2060 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2064 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2065 by reversing the comparison code if valid. */
2066 if (STORE_FLAG_VALUE
== 1
2067 && trueop0
== const1_rtx
2068 && COMPARISON_P (op1
)
2069 && (reversed
= reversed_comparison (op1
, mode
)))
2072 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2073 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2074 && GET_CODE (op1
) == MULT
2075 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2079 in1
= XEXP (XEXP (op1
, 0), 0);
2080 in2
= XEXP (op1
, 1);
2081 return simplify_gen_binary (PLUS
, mode
,
2082 simplify_gen_binary (MULT
, mode
,
2087 /* Canonicalize (minus (neg A) (mult B C)) to
2088 (minus (mult (neg B) C) A). */
2089 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2090 && GET_CODE (op1
) == MULT
2091 && GET_CODE (op0
) == NEG
)
2095 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2096 in2
= XEXP (op1
, 1);
2097 return simplify_gen_binary (MINUS
, mode
,
2098 simplify_gen_binary (MULT
, mode
,
2103 /* If one of the operands is a PLUS or a MINUS, see if we can
2104 simplify this by the associative law. This will, for example,
2105 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2106 Don't use the associative law for floating point.
2107 The inaccuracy makes it nonassociative,
2108 and subtle programs can break if operations are associated. */
2110 if (INTEGRAL_MODE_P (mode
)
2111 && (plus_minus_operand_p (op0
)
2112 || plus_minus_operand_p (op1
))
2113 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2118 if (trueop1
== constm1_rtx
)
2119 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2121 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2122 x is NaN, since x * 0 is then also NaN. Nor is it valid
2123 when the mode has signed zeros, since multiplying a negative
2124 number by 0 will give -0, not 0. */
2125 if (!HONOR_NANS (mode
)
2126 && !HONOR_SIGNED_ZEROS (mode
)
2127 && trueop1
== CONST0_RTX (mode
)
2128 && ! side_effects_p (op0
))
2131 /* In IEEE floating point, x*1 is not equivalent to x for
2133 if (!HONOR_SNANS (mode
)
2134 && trueop1
== CONST1_RTX (mode
))
2137 /* Convert multiply by constant power of two into shift unless
2138 we are still generating RTL. This test is a kludge. */
2139 if (CONST_INT_P (trueop1
)
2140 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
2141 /* If the mode is larger than the host word size, and the
2142 uppermost bit is set, then this isn't a power of two due
2143 to implicit sign extension. */
2144 && (width
<= HOST_BITS_PER_WIDE_INT
2145 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2146 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2148 /* Likewise for multipliers wider than a word. */
2149 if (GET_CODE (trueop1
) == CONST_DOUBLE
2150 && (GET_MODE (trueop1
) == VOIDmode
2151 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
2152 && GET_MODE (op0
) == mode
2153 && CONST_DOUBLE_LOW (trueop1
) == 0
2154 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
2155 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2156 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2158 /* x*2 is x+x and x*(-1) is -x */
2159 if (GET_CODE (trueop1
) == CONST_DOUBLE
2160 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2161 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2162 && GET_MODE (op0
) == mode
)
2165 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2167 if (REAL_VALUES_EQUAL (d
, dconst2
))
2168 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2170 if (!HONOR_SNANS (mode
)
2171 && REAL_VALUES_EQUAL (d
, dconstm1
))
2172 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2175 /* Optimize -x * -x as x * x. */
2176 if (FLOAT_MODE_P (mode
)
2177 && GET_CODE (op0
) == NEG
2178 && GET_CODE (op1
) == NEG
2179 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2180 && !side_effects_p (XEXP (op0
, 0)))
2181 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2183 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2184 if (SCALAR_FLOAT_MODE_P (mode
)
2185 && GET_CODE (op0
) == ABS
2186 && GET_CODE (op1
) == ABS
2187 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2188 && !side_effects_p (XEXP (op0
, 0)))
2189 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2191 /* Reassociate multiplication, but for floating point MULTs
2192 only when the user specifies unsafe math optimizations. */
2193 if (! FLOAT_MODE_P (mode
)
2194 || flag_unsafe_math_optimizations
)
2196 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2203 if (trueop1
== const0_rtx
)
2205 if (CONST_INT_P (trueop1
)
2206 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2207 == GET_MODE_MASK (mode
)))
2209 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2211 /* A | (~A) -> -1 */
2212 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2213 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2214 && ! side_effects_p (op0
)
2215 && SCALAR_INT_MODE_P (mode
))
2218 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2219 if (CONST_INT_P (op1
)
2220 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2221 && (nonzero_bits (op0
, mode
) & ~INTVAL (op1
)) == 0)
2224 /* Canonicalize (X & C1) | C2. */
2225 if (GET_CODE (op0
) == AND
2226 && CONST_INT_P (trueop1
)
2227 && CONST_INT_P (XEXP (op0
, 1)))
2229 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2230 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2231 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2233 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2235 && !side_effects_p (XEXP (op0
, 0)))
2238 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2239 if (((c1
|c2
) & mask
) == mask
)
2240 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2242 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2243 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2245 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2246 gen_int_mode (c1
& ~c2
, mode
));
2247 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2251 /* Convert (A & B) | A to A. */
2252 if (GET_CODE (op0
) == AND
2253 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2254 || rtx_equal_p (XEXP (op0
, 1), op1
))
2255 && ! side_effects_p (XEXP (op0
, 0))
2256 && ! side_effects_p (XEXP (op0
, 1)))
2259 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2260 mode size to (rotate A CX). */
2262 if (GET_CODE (op1
) == ASHIFT
2263 || GET_CODE (op1
) == SUBREG
)
2274 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2275 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2276 && CONST_INT_P (XEXP (opleft
, 1))
2277 && CONST_INT_P (XEXP (opright
, 1))
2278 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2279 == GET_MODE_BITSIZE (mode
)))
2280 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2282 /* Same, but for ashift that has been "simplified" to a wider mode
2283 by simplify_shift_const. */
2285 if (GET_CODE (opleft
) == SUBREG
2286 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2287 && GET_CODE (opright
) == LSHIFTRT
2288 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2289 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2290 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2291 && (GET_MODE_SIZE (GET_MODE (opleft
))
2292 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2293 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2294 SUBREG_REG (XEXP (opright
, 0)))
2295 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2296 && CONST_INT_P (XEXP (opright
, 1))
2297 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2298 == GET_MODE_BITSIZE (mode
)))
2299 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2300 XEXP (SUBREG_REG (opleft
), 1));
2302 /* If we have (ior (and (X C1) C2)), simplify this by making
2303 C1 as small as possible if C1 actually changes. */
2304 if (CONST_INT_P (op1
)
2305 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2306 || INTVAL (op1
) > 0)
2307 && GET_CODE (op0
) == AND
2308 && CONST_INT_P (XEXP (op0
, 1))
2309 && CONST_INT_P (op1
)
2310 && (INTVAL (XEXP (op0
, 1)) & INTVAL (op1
)) != 0)
2311 return simplify_gen_binary (IOR
, mode
,
2313 (AND
, mode
, XEXP (op0
, 0),
2314 GEN_INT (INTVAL (XEXP (op0
, 1))
2318 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2319 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2320 the PLUS does not affect any of the bits in OP1: then we can do
2321 the IOR as a PLUS and we can associate. This is valid if OP1
2322 can be safely shifted left C bits. */
2323 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2324 && GET_CODE (XEXP (op0
, 0)) == PLUS
2325 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2326 && CONST_INT_P (XEXP (op0
, 1))
2327 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2329 int count
= INTVAL (XEXP (op0
, 1));
2330 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2332 if (mask
>> count
== INTVAL (trueop1
)
2333 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2334 return simplify_gen_binary (ASHIFTRT
, mode
,
2335 plus_constant (XEXP (op0
, 0), mask
),
2339 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2345 if (trueop1
== const0_rtx
)
2347 if (CONST_INT_P (trueop1
)
2348 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2349 == GET_MODE_MASK (mode
)))
2350 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2351 if (rtx_equal_p (trueop0
, trueop1
)
2352 && ! side_effects_p (op0
)
2353 && GET_MODE_CLASS (mode
) != MODE_CC
)
2354 return CONST0_RTX (mode
);
2356 /* Canonicalize XOR of the most significant bit to PLUS. */
2357 if ((CONST_INT_P (op1
)
2358 || GET_CODE (op1
) == CONST_DOUBLE
)
2359 && mode_signbit_p (mode
, op1
))
2360 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2361 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2362 if ((CONST_INT_P (op1
)
2363 || GET_CODE (op1
) == CONST_DOUBLE
)
2364 && GET_CODE (op0
) == PLUS
2365 && (CONST_INT_P (XEXP (op0
, 1))
2366 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2367 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2368 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2369 simplify_gen_binary (XOR
, mode
, op1
,
2372 /* If we are XORing two things that have no bits in common,
2373 convert them into an IOR. This helps to detect rotation encoded
2374 using those methods and possibly other simplifications. */
2376 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2377 && (nonzero_bits (op0
, mode
)
2378 & nonzero_bits (op1
, mode
)) == 0)
2379 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2381 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2382 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2385 int num_negated
= 0;
2387 if (GET_CODE (op0
) == NOT
)
2388 num_negated
++, op0
= XEXP (op0
, 0);
2389 if (GET_CODE (op1
) == NOT
)
2390 num_negated
++, op1
= XEXP (op1
, 0);
2392 if (num_negated
== 2)
2393 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2394 else if (num_negated
== 1)
2395 return simplify_gen_unary (NOT
, mode
,
2396 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2400 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2401 correspond to a machine insn or result in further simplifications
2402 if B is a constant. */
2404 if (GET_CODE (op0
) == AND
2405 && rtx_equal_p (XEXP (op0
, 1), op1
)
2406 && ! side_effects_p (op1
))
2407 return simplify_gen_binary (AND
, mode
,
2408 simplify_gen_unary (NOT
, mode
,
2409 XEXP (op0
, 0), mode
),
2412 else if (GET_CODE (op0
) == AND
2413 && rtx_equal_p (XEXP (op0
, 0), op1
)
2414 && ! side_effects_p (op1
))
2415 return simplify_gen_binary (AND
, mode
,
2416 simplify_gen_unary (NOT
, mode
,
2417 XEXP (op0
, 1), mode
),
2420 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2421 comparison if STORE_FLAG_VALUE is 1. */
2422 if (STORE_FLAG_VALUE
== 1
2423 && trueop1
== const1_rtx
2424 && COMPARISON_P (op0
)
2425 && (reversed
= reversed_comparison (op0
, mode
)))
2428 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2429 is (lt foo (const_int 0)), so we can perform the above
2430 simplification if STORE_FLAG_VALUE is 1. */
2432 if (STORE_FLAG_VALUE
== 1
2433 && trueop1
== const1_rtx
2434 && GET_CODE (op0
) == LSHIFTRT
2435 && CONST_INT_P (XEXP (op0
, 1))
2436 && INTVAL (XEXP (op0
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
2437 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2439 /* (xor (comparison foo bar) (const_int sign-bit))
2440 when STORE_FLAG_VALUE is the sign bit. */
2441 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2442 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
2443 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
2444 && trueop1
== const_true_rtx
2445 && COMPARISON_P (op0
)
2446 && (reversed
= reversed_comparison (op0
, mode
)))
2449 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2455 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2457 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
2459 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2460 HOST_WIDE_INT nzop1
;
2461 if (CONST_INT_P (trueop1
))
2463 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2464 /* If we are turning off bits already known off in OP0, we need
2466 if ((nzop0
& ~val1
) == 0)
2469 nzop1
= nonzero_bits (trueop1
, mode
);
2470 /* If we are clearing all the nonzero bits, the result is zero. */
2471 if ((nzop1
& nzop0
) == 0
2472 && !side_effects_p (op0
) && !side_effects_p (op1
))
2473 return CONST0_RTX (mode
);
2475 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2476 && GET_MODE_CLASS (mode
) != MODE_CC
)
2479 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2480 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2481 && ! side_effects_p (op0
)
2482 && GET_MODE_CLASS (mode
) != MODE_CC
)
2483 return CONST0_RTX (mode
);
2485 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2486 there are no nonzero bits of C outside of X's mode. */
2487 if ((GET_CODE (op0
) == SIGN_EXTEND
2488 || GET_CODE (op0
) == ZERO_EXTEND
)
2489 && CONST_INT_P (trueop1
)
2490 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2491 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2492 & INTVAL (trueop1
)) == 0)
2494 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2495 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2496 gen_int_mode (INTVAL (trueop1
),
2498 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2501 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2502 we might be able to further simplify the AND with X and potentially
2503 remove the truncation altogether. */
2504 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2506 rtx x
= XEXP (op0
, 0);
2507 enum machine_mode xmode
= GET_MODE (x
);
2508 tem
= simplify_gen_binary (AND
, xmode
, x
,
2509 gen_int_mode (INTVAL (trueop1
), xmode
));
2510 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2513 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2514 if (GET_CODE (op0
) == IOR
2515 && CONST_INT_P (trueop1
)
2516 && CONST_INT_P (XEXP (op0
, 1)))
2518 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2519 return simplify_gen_binary (IOR
, mode
,
2520 simplify_gen_binary (AND
, mode
,
2521 XEXP (op0
, 0), op1
),
2522 gen_int_mode (tmp
, mode
));
2525 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2526 insn (and may simplify more). */
2527 if (GET_CODE (op0
) == XOR
2528 && rtx_equal_p (XEXP (op0
, 0), op1
)
2529 && ! side_effects_p (op1
))
2530 return simplify_gen_binary (AND
, mode
,
2531 simplify_gen_unary (NOT
, mode
,
2532 XEXP (op0
, 1), mode
),
2535 if (GET_CODE (op0
) == XOR
2536 && rtx_equal_p (XEXP (op0
, 1), op1
)
2537 && ! side_effects_p (op1
))
2538 return simplify_gen_binary (AND
, mode
,
2539 simplify_gen_unary (NOT
, mode
,
2540 XEXP (op0
, 0), mode
),
2543 /* Similarly for (~(A ^ B)) & A. */
2544 if (GET_CODE (op0
) == NOT
2545 && GET_CODE (XEXP (op0
, 0)) == XOR
2546 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2547 && ! side_effects_p (op1
))
2548 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2550 if (GET_CODE (op0
) == NOT
2551 && GET_CODE (XEXP (op0
, 0)) == XOR
2552 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2553 && ! side_effects_p (op1
))
2554 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2556 /* Convert (A | B) & A to A. */
2557 if (GET_CODE (op0
) == IOR
2558 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2559 || rtx_equal_p (XEXP (op0
, 1), op1
))
2560 && ! side_effects_p (XEXP (op0
, 0))
2561 && ! side_effects_p (XEXP (op0
, 1)))
2564 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2565 ((A & N) + B) & M -> (A + B) & M
2566 Similarly if (N & M) == 0,
2567 ((A | N) + B) & M -> (A + B) & M
2568 and for - instead of + and/or ^ instead of |.
2569 Also, if (N & M) == 0, then
2570 (A +- N) & M -> A & M. */
2571 if (CONST_INT_P (trueop1
)
2572 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2573 && ~INTVAL (trueop1
)
2574 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
2575 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2580 pmop
[0] = XEXP (op0
, 0);
2581 pmop
[1] = XEXP (op0
, 1);
2583 if (CONST_INT_P (pmop
[1])
2584 && (INTVAL (pmop
[1]) & INTVAL (trueop1
)) == 0)
2585 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2587 for (which
= 0; which
< 2; which
++)
2590 switch (GET_CODE (tem
))
2593 if (CONST_INT_P (XEXP (tem
, 1))
2594 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
2595 == INTVAL (trueop1
))
2596 pmop
[which
] = XEXP (tem
, 0);
2600 if (CONST_INT_P (XEXP (tem
, 1))
2601 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
2602 pmop
[which
] = XEXP (tem
, 0);
2609 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2611 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2613 return simplify_gen_binary (code
, mode
, tem
, op1
);
2617 /* (and X (ior (not X) Y) -> (and X Y) */
2618 if (GET_CODE (op1
) == IOR
2619 && GET_CODE (XEXP (op1
, 0)) == NOT
2620 && op0
== XEXP (XEXP (op1
, 0), 0))
2621 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2623 /* (and (ior (not X) Y) X) -> (and X Y) */
2624 if (GET_CODE (op0
) == IOR
2625 && GET_CODE (XEXP (op0
, 0)) == NOT
2626 && op1
== XEXP (XEXP (op0
, 0), 0))
2627 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2629 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2635 /* 0/x is 0 (or x&0 if x has side-effects). */
2636 if (trueop0
== CONST0_RTX (mode
))
2638 if (side_effects_p (op1
))
2639 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2643 if (trueop1
== CONST1_RTX (mode
))
2644 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2645 /* Convert divide by power of two into shift. */
2646 if (CONST_INT_P (trueop1
)
2647 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
2648 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2652 /* Handle floating point and integers separately. */
2653 if (SCALAR_FLOAT_MODE_P (mode
))
2655 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2656 safe for modes with NaNs, since 0.0 / 0.0 will then be
2657 NaN rather than 0.0. Nor is it safe for modes with signed
2658 zeros, since dividing 0 by a negative number gives -0.0 */
2659 if (trueop0
== CONST0_RTX (mode
)
2660 && !HONOR_NANS (mode
)
2661 && !HONOR_SIGNED_ZEROS (mode
)
2662 && ! side_effects_p (op1
))
2665 if (trueop1
== CONST1_RTX (mode
)
2666 && !HONOR_SNANS (mode
))
2669 if (GET_CODE (trueop1
) == CONST_DOUBLE
2670 && trueop1
!= CONST0_RTX (mode
))
2673 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2676 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2677 && !HONOR_SNANS (mode
))
2678 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2680 /* Change FP division by a constant into multiplication.
2681 Only do this with -freciprocal-math. */
2682 if (flag_reciprocal_math
2683 && !REAL_VALUES_EQUAL (d
, dconst0
))
2685 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2686 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2687 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2693 /* 0/x is 0 (or x&0 if x has side-effects). */
2694 if (trueop0
== CONST0_RTX (mode
))
2696 if (side_effects_p (op1
))
2697 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2701 if (trueop1
== CONST1_RTX (mode
))
2702 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2704 if (trueop1
== constm1_rtx
)
2706 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2707 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2713 /* 0%x is 0 (or x&0 if x has side-effects). */
2714 if (trueop0
== CONST0_RTX (mode
))
2716 if (side_effects_p (op1
))
2717 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2720 /* x%1 is 0 (of x&0 if x has side-effects). */
2721 if (trueop1
== CONST1_RTX (mode
))
2723 if (side_effects_p (op0
))
2724 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2725 return CONST0_RTX (mode
);
2727 /* Implement modulus by power of two as AND. */
2728 if (CONST_INT_P (trueop1
)
2729 && exact_log2 (INTVAL (trueop1
)) > 0)
2730 return simplify_gen_binary (AND
, mode
, op0
,
2731 GEN_INT (INTVAL (op1
) - 1));
2735 /* 0%x is 0 (or x&0 if x has side-effects). */
2736 if (trueop0
== CONST0_RTX (mode
))
2738 if (side_effects_p (op1
))
2739 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2742 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2743 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
2745 if (side_effects_p (op0
))
2746 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2747 return CONST0_RTX (mode
);
2754 if (trueop1
== CONST0_RTX (mode
))
2756 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2758 /* Rotating ~0 always results in ~0. */
2759 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
2760 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2761 && ! side_effects_p (op1
))
2764 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
2766 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
2767 if (val
!= INTVAL (op1
))
2768 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
2775 if (trueop1
== CONST0_RTX (mode
))
2777 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2779 goto canonicalize_shift
;
2782 if (trueop1
== CONST0_RTX (mode
))
2784 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2786 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2787 if (GET_CODE (op0
) == CLZ
2788 && CONST_INT_P (trueop1
)
2789 && STORE_FLAG_VALUE
== 1
2790 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
2792 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2793 unsigned HOST_WIDE_INT zero_val
= 0;
2795 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
2796 && zero_val
== GET_MODE_BITSIZE (imode
)
2797 && INTVAL (trueop1
) == exact_log2 (zero_val
))
2798 return simplify_gen_relational (EQ
, mode
, imode
,
2799 XEXP (op0
, 0), const0_rtx
);
2801 goto canonicalize_shift
;
2804 if (width
<= HOST_BITS_PER_WIDE_INT
2805 && CONST_INT_P (trueop1
)
2806 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2807 && ! side_effects_p (op0
))
2809 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2811 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2817 if (width
<= HOST_BITS_PER_WIDE_INT
2818 && CONST_INT_P (trueop1
)
2819 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2820 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2821 && ! side_effects_p (op0
))
2823 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2825 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2831 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2833 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2835 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2841 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2843 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2845 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2858 /* ??? There are simplifications that can be done. */
2862 if (!VECTOR_MODE_P (mode
))
2864 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2865 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2866 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2867 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2868 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
2870 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2871 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2874 /* Extract a scalar element from a nested VEC_SELECT expression
2875 (with optional nested VEC_CONCAT expression). Some targets
2876 (i386) extract scalar element from a vector using chain of
2877 nested VEC_SELECT expressions. When input operand is a memory
2878 operand, this operation can be simplified to a simple scalar
2879 load from an offseted memory address. */
2880 if (GET_CODE (trueop0
) == VEC_SELECT
)
2882 rtx op0
= XEXP (trueop0
, 0);
2883 rtx op1
= XEXP (trueop0
, 1);
2885 enum machine_mode opmode
= GET_MODE (op0
);
2886 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
2887 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
2889 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
2895 gcc_assert (GET_CODE (op1
) == PARALLEL
);
2896 gcc_assert (i
< n_elts
);
2898 /* Select element, pointed by nested selector. */
2899 elem
= INTVAL (XVECEXP (op1
, 0, i
));
2901 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2902 if (GET_CODE (op0
) == VEC_CONCAT
)
2904 rtx op00
= XEXP (op0
, 0);
2905 rtx op01
= XEXP (op0
, 1);
2907 enum machine_mode mode00
, mode01
;
2908 int n_elts00
, n_elts01
;
2910 mode00
= GET_MODE (op00
);
2911 mode01
= GET_MODE (op01
);
2913 /* Find out number of elements of each operand. */
2914 if (VECTOR_MODE_P (mode00
))
2916 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
2917 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
2922 if (VECTOR_MODE_P (mode01
))
2924 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
2925 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
2930 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
2932 /* Select correct operand of VEC_CONCAT
2933 and adjust selector. */
2934 if (elem
< n_elts01
)
2945 vec
= rtvec_alloc (1);
2946 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
2948 tmp
= gen_rtx_fmt_ee (code
, mode
,
2949 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
2952 if (GET_CODE (trueop0
) == VEC_DUPLICATE
2953 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
2954 return XEXP (trueop0
, 0);
2958 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2959 gcc_assert (GET_MODE_INNER (mode
)
2960 == GET_MODE_INNER (GET_MODE (trueop0
)));
2961 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2963 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2965 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2966 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2967 rtvec v
= rtvec_alloc (n_elts
);
2970 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2971 for (i
= 0; i
< n_elts
; i
++)
2973 rtx x
= XVECEXP (trueop1
, 0, i
);
2975 gcc_assert (CONST_INT_P (x
));
2976 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2980 return gen_rtx_CONST_VECTOR (mode
, v
);
2984 if (XVECLEN (trueop1
, 0) == 1
2985 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
2986 && GET_CODE (trueop0
) == VEC_CONCAT
)
2989 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
2991 /* Try to find the element in the VEC_CONCAT. */
2992 while (GET_MODE (vec
) != mode
2993 && GET_CODE (vec
) == VEC_CONCAT
)
2995 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
2996 if (offset
< vec_size
)
2997 vec
= XEXP (vec
, 0);
3001 vec
= XEXP (vec
, 1);
3003 vec
= avoid_constant_pool_reference (vec
);
3006 if (GET_MODE (vec
) == mode
)
3013 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3014 ? GET_MODE (trueop0
)
3015 : GET_MODE_INNER (mode
));
3016 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3017 ? GET_MODE (trueop1
)
3018 : GET_MODE_INNER (mode
));
3020 gcc_assert (VECTOR_MODE_P (mode
));
3021 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3022 == GET_MODE_SIZE (mode
));
3024 if (VECTOR_MODE_P (op0_mode
))
3025 gcc_assert (GET_MODE_INNER (mode
)
3026 == GET_MODE_INNER (op0_mode
));
3028 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3030 if (VECTOR_MODE_P (op1_mode
))
3031 gcc_assert (GET_MODE_INNER (mode
)
3032 == GET_MODE_INNER (op1_mode
));
3034 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3036 if ((GET_CODE (trueop0
) == CONST_VECTOR
3037 || CONST_INT_P (trueop0
)
3038 || GET_CODE (trueop0
) == CONST_DOUBLE
)
3039 && (GET_CODE (trueop1
) == CONST_VECTOR
3040 || CONST_INT_P (trueop1
)
3041 || GET_CODE (trueop1
) == CONST_DOUBLE
))
3043 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3044 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3045 rtvec v
= rtvec_alloc (n_elts
);
3047 unsigned in_n_elts
= 1;
3049 if (VECTOR_MODE_P (op0_mode
))
3050 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3051 for (i
= 0; i
< n_elts
; i
++)
3055 if (!VECTOR_MODE_P (op0_mode
))
3056 RTVEC_ELT (v
, i
) = trueop0
;
3058 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3062 if (!VECTOR_MODE_P (op1_mode
))
3063 RTVEC_ELT (v
, i
) = trueop1
;
3065 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3070 return gen_rtx_CONST_VECTOR (mode
, v
);
3083 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3086 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3088 unsigned int width
= GET_MODE_BITSIZE (mode
);
3090 if (VECTOR_MODE_P (mode
)
3091 && code
!= VEC_CONCAT
3092 && GET_CODE (op0
) == CONST_VECTOR
3093 && GET_CODE (op1
) == CONST_VECTOR
)
3095 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3096 enum machine_mode op0mode
= GET_MODE (op0
);
3097 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3098 enum machine_mode op1mode
= GET_MODE (op1
);
3099 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3100 rtvec v
= rtvec_alloc (n_elts
);
3103 gcc_assert (op0_n_elts
== n_elts
);
3104 gcc_assert (op1_n_elts
== n_elts
);
3105 for (i
= 0; i
< n_elts
; i
++)
3107 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3108 CONST_VECTOR_ELT (op0
, i
),
3109 CONST_VECTOR_ELT (op1
, i
));
3112 RTVEC_ELT (v
, i
) = x
;
3115 return gen_rtx_CONST_VECTOR (mode
, v
);
3118 if (VECTOR_MODE_P (mode
)
3119 && code
== VEC_CONCAT
3120 && (CONST_INT_P (op0
)
3121 || GET_CODE (op0
) == CONST_DOUBLE
3122 || GET_CODE (op0
) == CONST_FIXED
)
3123 && (CONST_INT_P (op1
)
3124 || GET_CODE (op1
) == CONST_DOUBLE
3125 || GET_CODE (op1
) == CONST_FIXED
))
3127 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3128 rtvec v
= rtvec_alloc (n_elts
);
3130 gcc_assert (n_elts
>= 2);
3133 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3134 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3136 RTVEC_ELT (v
, 0) = op0
;
3137 RTVEC_ELT (v
, 1) = op1
;
3141 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3142 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3145 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3146 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3147 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3149 for (i
= 0; i
< op0_n_elts
; ++i
)
3150 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3151 for (i
= 0; i
< op1_n_elts
; ++i
)
3152 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3155 return gen_rtx_CONST_VECTOR (mode
, v
);
3158 if (SCALAR_FLOAT_MODE_P (mode
)
3159 && GET_CODE (op0
) == CONST_DOUBLE
3160 && GET_CODE (op1
) == CONST_DOUBLE
3161 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3172 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3174 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3176 for (i
= 0; i
< 4; i
++)
3193 real_from_target (&r
, tmp0
, mode
);
3194 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3198 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3201 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3202 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3203 real_convert (&f0
, mode
, &f0
);
3204 real_convert (&f1
, mode
, &f1
);
3206 if (HONOR_SNANS (mode
)
3207 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3211 && REAL_VALUES_EQUAL (f1
, dconst0
)
3212 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3215 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3216 && flag_trapping_math
3217 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3219 int s0
= REAL_VALUE_NEGATIVE (f0
);
3220 int s1
= REAL_VALUE_NEGATIVE (f1
);
3225 /* Inf + -Inf = NaN plus exception. */
3230 /* Inf - Inf = NaN plus exception. */
3235 /* Inf / Inf = NaN plus exception. */
3242 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3243 && flag_trapping_math
3244 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3245 || (REAL_VALUE_ISINF (f1
)
3246 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3247 /* Inf * 0 = NaN plus exception. */
3250 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3252 real_convert (&result
, mode
, &value
);
3254 /* Don't constant fold this floating point operation if
3255 the result has overflowed and flag_trapping_math. */
3257 if (flag_trapping_math
3258 && MODE_HAS_INFINITIES (mode
)
3259 && REAL_VALUE_ISINF (result
)
3260 && !REAL_VALUE_ISINF (f0
)
3261 && !REAL_VALUE_ISINF (f1
))
3262 /* Overflow plus exception. */
3265 /* Don't constant fold this floating point operation if the
3266 result may dependent upon the run-time rounding mode and
3267 flag_rounding_math is set, or if GCC's software emulation
3268 is unable to accurately represent the result. */
3270 if ((flag_rounding_math
3271 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3272 && (inexact
|| !real_identical (&result
, &value
)))
3275 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3279 /* We can fold some multi-word operations. */
3280 if (GET_MODE_CLASS (mode
) == MODE_INT
3281 && width
== HOST_BITS_PER_WIDE_INT
* 2
3282 && (GET_CODE (op0
) == CONST_DOUBLE
|| CONST_INT_P (op0
))
3283 && (GET_CODE (op1
) == CONST_DOUBLE
|| CONST_INT_P (op1
)))
3285 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
3286 HOST_WIDE_INT h1
, h2
, hv
, ht
;
3288 if (GET_CODE (op0
) == CONST_DOUBLE
)
3289 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
3291 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
3293 if (GET_CODE (op1
) == CONST_DOUBLE
)
3294 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
3296 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
3301 /* A - B == A + (-B). */
3302 neg_double (l2
, h2
, &lv
, &hv
);
3305 /* Fall through.... */
3308 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3312 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3316 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3317 &lv
, &hv
, <
, &ht
))
3322 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3323 <
, &ht
, &lv
, &hv
))
3328 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3329 &lv
, &hv
, <
, &ht
))
3334 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3335 <
, &ht
, &lv
, &hv
))
3340 lv
= l1
& l2
, hv
= h1
& h2
;
3344 lv
= l1
| l2
, hv
= h1
| h2
;
3348 lv
= l1
^ l2
, hv
= h1
^ h2
;
3354 && ((unsigned HOST_WIDE_INT
) l1
3355 < (unsigned HOST_WIDE_INT
) l2
)))
3364 && ((unsigned HOST_WIDE_INT
) l1
3365 > (unsigned HOST_WIDE_INT
) l2
)))
3372 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
3374 && ((unsigned HOST_WIDE_INT
) l1
3375 < (unsigned HOST_WIDE_INT
) l2
)))
3382 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
3384 && ((unsigned HOST_WIDE_INT
) l1
3385 > (unsigned HOST_WIDE_INT
) l2
)))
3391 case LSHIFTRT
: case ASHIFTRT
:
3393 case ROTATE
: case ROTATERT
:
3394 if (SHIFT_COUNT_TRUNCATED
)
3395 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
3397 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
3400 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3401 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
3403 else if (code
== ASHIFT
)
3404 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
3405 else if (code
== ROTATE
)
3406 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3407 else /* code == ROTATERT */
3408 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3415 return immed_double_const (lv
, hv
, mode
);
3418 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
3419 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3421 /* Get the integer argument values in two forms:
3422 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3424 arg0
= INTVAL (op0
);
3425 arg1
= INTVAL (op1
);
3427 if (width
< HOST_BITS_PER_WIDE_INT
)
3429 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3430 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3433 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3434 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3437 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3438 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3446 /* Compute the value of the arithmetic. */
3451 val
= arg0s
+ arg1s
;
3455 val
= arg0s
- arg1s
;
3459 val
= arg0s
* arg1s
;
3464 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3467 val
= arg0s
/ arg1s
;
3472 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3475 val
= arg0s
% arg1s
;
3480 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3483 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3488 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3491 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3509 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3510 the value is in range. We can't return any old value for
3511 out-of-range arguments because either the middle-end (via
3512 shift_truncation_mask) or the back-end might be relying on
3513 target-specific knowledge. Nor can we rely on
3514 shift_truncation_mask, since the shift might not be part of an
3515 ashlM3, lshrM3 or ashrM3 instruction. */
3516 if (SHIFT_COUNT_TRUNCATED
)
3517 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3518 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3521 val
= (code
== ASHIFT
3522 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3523 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3525 /* Sign-extend the result for arithmetic right shifts. */
3526 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3527 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
3535 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3536 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3544 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3545 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3549 /* Do nothing here. */
3553 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3557 val
= ((unsigned HOST_WIDE_INT
) arg0
3558 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3562 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3566 val
= ((unsigned HOST_WIDE_INT
) arg0
3567 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3580 /* ??? There are simplifications that can be done. */
3587 return gen_int_mode (val
, mode
);
3595 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3598 Rather than test for specific case, we do this by a brute-force method
3599 and do all possible simplifications until no more changes occur. Then
3600 we rebuild the operation. */
3602 struct simplify_plus_minus_op_data
3609 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3613 result
= (commutative_operand_precedence (y
)
3614 - commutative_operand_precedence (x
));
3618 /* Group together equal REGs to do more simplification. */
3619 if (REG_P (x
) && REG_P (y
))
3620 return REGNO (x
) > REGNO (y
);
3626 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3629 struct simplify_plus_minus_op_data ops
[8];
3631 int n_ops
= 2, input_ops
= 2;
3632 int changed
, n_constants
= 0, canonicalized
= 0;
3635 memset (ops
, 0, sizeof ops
);
3637 /* Set up the two operands and then expand them until nothing has been
3638 changed. If we run out of room in our array, give up; this should
3639 almost never happen. */
3644 ops
[1].neg
= (code
== MINUS
);
3650 for (i
= 0; i
< n_ops
; i
++)
3652 rtx this_op
= ops
[i
].op
;
3653 int this_neg
= ops
[i
].neg
;
3654 enum rtx_code this_code
= GET_CODE (this_op
);
3663 ops
[n_ops
].op
= XEXP (this_op
, 1);
3664 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3667 ops
[i
].op
= XEXP (this_op
, 0);
3670 canonicalized
|= this_neg
;
3674 ops
[i
].op
= XEXP (this_op
, 0);
3675 ops
[i
].neg
= ! this_neg
;
3682 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3683 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3684 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3686 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3687 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3688 ops
[n_ops
].neg
= this_neg
;
3696 /* ~a -> (-a - 1) */
3699 ops
[n_ops
].op
= constm1_rtx
;
3700 ops
[n_ops
++].neg
= this_neg
;
3701 ops
[i
].op
= XEXP (this_op
, 0);
3702 ops
[i
].neg
= !this_neg
;
3712 ops
[i
].op
= neg_const_int (mode
, this_op
);
3726 if (n_constants
> 1)
3729 gcc_assert (n_ops
>= 2);
3731 /* If we only have two operands, we can avoid the loops. */
3734 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3737 /* Get the two operands. Be careful with the order, especially for
3738 the cases where code == MINUS. */
3739 if (ops
[0].neg
&& ops
[1].neg
)
3741 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3744 else if (ops
[0].neg
)
3755 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
3758 /* Now simplify each pair of operands until nothing changes. */
3761 /* Insertion sort is good enough for an eight-element array. */
3762 for (i
= 1; i
< n_ops
; i
++)
3764 struct simplify_plus_minus_op_data save
;
3766 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
3772 ops
[j
+ 1] = ops
[j
];
3773 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
3778 for (i
= n_ops
- 1; i
> 0; i
--)
3779 for (j
= i
- 1; j
>= 0; j
--)
3781 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
3782 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
3784 if (lhs
!= 0 && rhs
!= 0)
3786 enum rtx_code ncode
= PLUS
;
3792 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3794 else if (swap_commutative_operands_p (lhs
, rhs
))
3795 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3797 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
3798 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
3800 rtx tem_lhs
, tem_rhs
;
3802 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
3803 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
3804 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
3806 if (tem
&& !CONSTANT_P (tem
))
3807 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
3810 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
3812 /* Reject "simplifications" that just wrap the two
3813 arguments in a CONST. Failure to do so can result
3814 in infinite recursion with simplify_binary_operation
3815 when it calls us to simplify CONST operations. */
3817 && ! (GET_CODE (tem
) == CONST
3818 && GET_CODE (XEXP (tem
, 0)) == ncode
3819 && XEXP (XEXP (tem
, 0), 0) == lhs
3820 && XEXP (XEXP (tem
, 0), 1) == rhs
))
3823 if (GET_CODE (tem
) == NEG
)
3824 tem
= XEXP (tem
, 0), lneg
= !lneg
;
3825 if (CONST_INT_P (tem
) && lneg
)
3826 tem
= neg_const_int (mode
, tem
), lneg
= 0;
3830 ops
[j
].op
= NULL_RTX
;
3837 /* If nothing changed, fail. */
3841 /* Pack all the operands to the lower-numbered entries. */
3842 for (i
= 0, j
= 0; j
< n_ops
; j
++)
3852 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3854 && CONST_INT_P (ops
[1].op
)
3855 && CONSTANT_P (ops
[0].op
)
3857 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
3859 /* We suppressed creation of trivial CONST expressions in the
3860 combination loop to avoid recursion. Create one manually now.
3861 The combination loop should have ensured that there is exactly
3862 one CONST_INT, and the sort will have ensured that it is last
3863 in the array and that any other constant will be next-to-last. */
3866 && CONST_INT_P (ops
[n_ops
- 1].op
)
3867 && CONSTANT_P (ops
[n_ops
- 2].op
))
3869 rtx value
= ops
[n_ops
- 1].op
;
3870 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
3871 value
= neg_const_int (mode
, value
);
3872 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
3876 /* Put a non-negated operand first, if possible. */
3878 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
3881 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
3890 /* Now make the result by performing the requested operations. */
3892 for (i
= 1; i
< n_ops
; i
++)
3893 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
3894 mode
, result
, ops
[i
].op
);
3899 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3901 plus_minus_operand_p (const_rtx x
)
3903 return GET_CODE (x
) == PLUS
3904 || GET_CODE (x
) == MINUS
3905 || (GET_CODE (x
) == CONST
3906 && GET_CODE (XEXP (x
, 0)) == PLUS
3907 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
3908 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
3911 /* Like simplify_binary_operation except used for relational operators.
3912 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3913 not also be VOIDmode.
3915 CMP_MODE specifies in which mode the comparison is done in, so it is
3916 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3917 the operands or, if both are VOIDmode, the operands are compared in
3918 "infinite precision". */
3920 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
3921 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3923 rtx tem
, trueop0
, trueop1
;
3925 if (cmp_mode
== VOIDmode
)
3926 cmp_mode
= GET_MODE (op0
);
3927 if (cmp_mode
== VOIDmode
)
3928 cmp_mode
= GET_MODE (op1
);
3930 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
3933 if (SCALAR_FLOAT_MODE_P (mode
))
3935 if (tem
== const0_rtx
)
3936 return CONST0_RTX (mode
);
3937 #ifdef FLOAT_STORE_FLAG_VALUE
3939 REAL_VALUE_TYPE val
;
3940 val
= FLOAT_STORE_FLAG_VALUE (mode
);
3941 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
3947 if (VECTOR_MODE_P (mode
))
3949 if (tem
== const0_rtx
)
3950 return CONST0_RTX (mode
);
3951 #ifdef VECTOR_STORE_FLAG_VALUE
3956 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
3957 if (val
== NULL_RTX
)
3959 if (val
== const1_rtx
)
3960 return CONST1_RTX (mode
);
3962 units
= GET_MODE_NUNITS (mode
);
3963 v
= rtvec_alloc (units
);
3964 for (i
= 0; i
< units
; i
++)
3965 RTVEC_ELT (v
, i
) = val
;
3966 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
3976 /* For the following tests, ensure const0_rtx is op1. */
3977 if (swap_commutative_operands_p (op0
, op1
)
3978 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
3979 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
3981 /* If op0 is a compare, extract the comparison arguments from it. */
3982 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3983 return simplify_gen_relational (code
, mode
, VOIDmode
,
3984 XEXP (op0
, 0), XEXP (op0
, 1));
3986 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
3990 trueop0
= avoid_constant_pool_reference (op0
);
3991 trueop1
= avoid_constant_pool_reference (op1
);
3992 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
3996 /* This part of simplify_relational_operation is only used when CMP_MODE
3997 is not in class MODE_CC (i.e. it is a real comparison).
3999 MODE is the mode of the result, while CMP_MODE specifies in which
4000 mode the comparison is done in, so it is the mode of the operands. */
4003 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4004 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4006 enum rtx_code op0code
= GET_CODE (op0
);
4008 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4010 /* If op0 is a comparison, extract the comparison arguments
4014 if (GET_MODE (op0
) == mode
)
4015 return simplify_rtx (op0
);
4017 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4018 XEXP (op0
, 0), XEXP (op0
, 1));
4020 else if (code
== EQ
)
4022 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4023 if (new_code
!= UNKNOWN
)
4024 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4025 XEXP (op0
, 0), XEXP (op0
, 1));
4029 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4030 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4031 if ((code
== LTU
|| code
== GEU
)
4032 && GET_CODE (op0
) == PLUS
4033 && CONST_INT_P (XEXP (op0
, 1))
4034 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4035 || rtx_equal_p (op1
, XEXP (op0
, 1))))
4038 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4039 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4040 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4043 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4044 if ((code
== LTU
|| code
== GEU
)
4045 && GET_CODE (op0
) == PLUS
4046 && rtx_equal_p (op1
, XEXP (op0
, 1))
4047 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4048 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4049 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4050 copy_rtx (XEXP (op0
, 0)));
4052 if (op1
== const0_rtx
)
4054 /* Canonicalize (GTU x 0) as (NE x 0). */
4056 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4057 /* Canonicalize (LEU x 0) as (EQ x 0). */
4059 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4061 else if (op1
== const1_rtx
)
4066 /* Canonicalize (GE x 1) as (GT x 0). */
4067 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4070 /* Canonicalize (GEU x 1) as (NE x 0). */
4071 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4074 /* Canonicalize (LT x 1) as (LE x 0). */
4075 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4078 /* Canonicalize (LTU x 1) as (EQ x 0). */
4079 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4085 else if (op1
== constm1_rtx
)
4087 /* Canonicalize (LE x -1) as (LT x 0). */
4089 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4090 /* Canonicalize (GT x -1) as (GE x 0). */
4092 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4095 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4096 if ((code
== EQ
|| code
== NE
)
4097 && (op0code
== PLUS
|| op0code
== MINUS
)
4099 && CONSTANT_P (XEXP (op0
, 1))
4100 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4102 rtx x
= XEXP (op0
, 0);
4103 rtx c
= XEXP (op0
, 1);
4105 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
4107 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
4110 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4111 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4113 && op1
== const0_rtx
4114 && GET_MODE_CLASS (mode
) == MODE_INT
4115 && cmp_mode
!= VOIDmode
4116 /* ??? Work-around BImode bugs in the ia64 backend. */
4118 && cmp_mode
!= BImode
4119 && nonzero_bits (op0
, cmp_mode
) == 1
4120 && STORE_FLAG_VALUE
== 1)
4121 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4122 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4123 : lowpart_subreg (mode
, op0
, cmp_mode
);
4125 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4126 if ((code
== EQ
|| code
== NE
)
4127 && op1
== const0_rtx
4129 return simplify_gen_relational (code
, mode
, cmp_mode
,
4130 XEXP (op0
, 0), XEXP (op0
, 1));
4132 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4133 if ((code
== EQ
|| code
== NE
)
4135 && rtx_equal_p (XEXP (op0
, 0), op1
)
4136 && !side_effects_p (XEXP (op0
, 0)))
4137 return simplify_gen_relational (code
, mode
, cmp_mode
,
4138 XEXP (op0
, 1), const0_rtx
);
4140 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4141 if ((code
== EQ
|| code
== NE
)
4143 && rtx_equal_p (XEXP (op0
, 1), op1
)
4144 && !side_effects_p (XEXP (op0
, 1)))
4145 return simplify_gen_relational (code
, mode
, cmp_mode
,
4146 XEXP (op0
, 0), const0_rtx
);
4148 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4149 if ((code
== EQ
|| code
== NE
)
4151 && (CONST_INT_P (op1
)
4152 || GET_CODE (op1
) == CONST_DOUBLE
)
4153 && (CONST_INT_P (XEXP (op0
, 1))
4154 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
4155 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4156 simplify_gen_binary (XOR
, cmp_mode
,
4157 XEXP (op0
, 1), op1
));
4159 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4165 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4166 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4167 XEXP (op0
, 0), const0_rtx
);
4172 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4173 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4174 XEXP (op0
, 0), const0_rtx
);
4193 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4194 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4195 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4196 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4197 For floating-point comparisons, assume that the operands were ordered. */
4200 comparison_result (enum rtx_code code
, int known_results
)
4206 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4209 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4213 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4216 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4220 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4223 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4226 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4228 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4231 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4233 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4236 return const_true_rtx
;
4244 /* Check if the given comparison (done in the given MODE) is actually a
4245 tautology or a contradiction.
4246 If no simplification is possible, this function returns zero.
4247 Otherwise, it returns either const_true_rtx or const0_rtx. */
4250 simplify_const_relational_operation (enum rtx_code code
,
4251 enum machine_mode mode
,
4258 gcc_assert (mode
!= VOIDmode
4259 || (GET_MODE (op0
) == VOIDmode
4260 && GET_MODE (op1
) == VOIDmode
));
4262 /* If op0 is a compare, extract the comparison arguments from it. */
4263 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4265 op1
= XEXP (op0
, 1);
4266 op0
= XEXP (op0
, 0);
4268 if (GET_MODE (op0
) != VOIDmode
)
4269 mode
= GET_MODE (op0
);
4270 else if (GET_MODE (op1
) != VOIDmode
)
4271 mode
= GET_MODE (op1
);
4276 /* We can't simplify MODE_CC values since we don't know what the
4277 actual comparison is. */
4278 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4281 /* Make sure the constant is second. */
4282 if (swap_commutative_operands_p (op0
, op1
))
4284 tem
= op0
, op0
= op1
, op1
= tem
;
4285 code
= swap_condition (code
);
4288 trueop0
= avoid_constant_pool_reference (op0
);
4289 trueop1
= avoid_constant_pool_reference (op1
);
4291 /* For integer comparisons of A and B maybe we can simplify A - B and can
4292 then simplify a comparison of that with zero. If A and B are both either
4293 a register or a CONST_INT, this can't help; testing for these cases will
4294 prevent infinite recursion here and speed things up.
4296 We can only do this for EQ and NE comparisons as otherwise we may
4297 lose or introduce overflow which we cannot disregard as undefined as
4298 we do not know the signedness of the operation on either the left or
4299 the right hand side of the comparison. */
4301 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4302 && (code
== EQ
|| code
== NE
)
4303 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4304 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4305 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4306 /* We cannot do this if tem is a nonzero address. */
4307 && ! nonzero_address_p (tem
))
4308 return simplify_const_relational_operation (signed_condition (code
),
4309 mode
, tem
, const0_rtx
);
4311 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4312 return const_true_rtx
;
4314 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4317 /* For modes without NaNs, if the two operands are equal, we know the
4318 result except if they have side-effects. Even with NaNs we know
4319 the result of unordered comparisons and, if signaling NaNs are
4320 irrelevant, also the result of LT/GT/LTGT. */
4321 if ((! HONOR_NANS (GET_MODE (trueop0
))
4322 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4323 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4324 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4325 && rtx_equal_p (trueop0
, trueop1
)
4326 && ! side_effects_p (trueop0
))
4327 return comparison_result (code
, CMP_EQ
);
4329 /* If the operands are floating-point constants, see if we can fold
4331 if (GET_CODE (trueop0
) == CONST_DOUBLE
4332 && GET_CODE (trueop1
) == CONST_DOUBLE
4333 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4335 REAL_VALUE_TYPE d0
, d1
;
4337 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4338 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4340 /* Comparisons are unordered iff at least one of the values is NaN. */
4341 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4351 return const_true_rtx
;
4364 return comparison_result (code
,
4365 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4366 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4369 /* Otherwise, see if the operands are both integers. */
4370 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4371 && (GET_CODE (trueop0
) == CONST_DOUBLE
4372 || CONST_INT_P (trueop0
))
4373 && (GET_CODE (trueop1
) == CONST_DOUBLE
4374 || CONST_INT_P (trueop1
)))
4376 int width
= GET_MODE_BITSIZE (mode
);
4377 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4378 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4380 /* Get the two words comprising each integer constant. */
4381 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
4383 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4384 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4388 l0u
= l0s
= INTVAL (trueop0
);
4389 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4392 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
4394 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4395 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4399 l1u
= l1s
= INTVAL (trueop1
);
4400 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4403 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4404 we have to sign or zero-extend the values. */
4405 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4407 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4408 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4410 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4411 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
4413 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4414 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
4416 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4417 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4419 if (h0u
== h1u
&& l0u
== l1u
)
4420 return comparison_result (code
, CMP_EQ
);
4424 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4425 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4426 return comparison_result (code
, cr
);
4430 /* Optimize comparisons with upper and lower bounds. */
4431 if (SCALAR_INT_MODE_P (mode
)
4432 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
4433 && CONST_INT_P (trueop1
))
4436 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4437 HOST_WIDE_INT val
= INTVAL (trueop1
);
4438 HOST_WIDE_INT mmin
, mmax
;
4448 /* Get a reduced range if the sign bit is zero. */
4449 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4456 rtx mmin_rtx
, mmax_rtx
;
4457 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4459 mmin
= INTVAL (mmin_rtx
);
4460 mmax
= INTVAL (mmax_rtx
);
4463 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4465 mmin
>>= (sign_copies
- 1);
4466 mmax
>>= (sign_copies
- 1);
4472 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4474 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4475 return const_true_rtx
;
4476 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4481 return const_true_rtx
;
4486 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4488 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4489 return const_true_rtx
;
4490 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4495 return const_true_rtx
;
4501 /* x == y is always false for y out of range. */
4502 if (val
< mmin
|| val
> mmax
)
4506 /* x > y is always false for y >= mmax, always true for y < mmin. */
4508 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4510 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4511 return const_true_rtx
;
4517 return const_true_rtx
;
4520 /* x < y is always false for y <= mmin, always true for y > mmax. */
4522 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4524 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4525 return const_true_rtx
;
4531 return const_true_rtx
;
4535 /* x != y is always true for y out of range. */
4536 if (val
< mmin
|| val
> mmax
)
4537 return const_true_rtx
;
4545 /* Optimize integer comparisons with zero. */
4546 if (trueop1
== const0_rtx
)
4548 /* Some addresses are known to be nonzero. We don't know
4549 their sign, but equality comparisons are known. */
4550 if (nonzero_address_p (trueop0
))
4552 if (code
== EQ
|| code
== LEU
)
4554 if (code
== NE
|| code
== GTU
)
4555 return const_true_rtx
;
4558 /* See if the first operand is an IOR with a constant. If so, we
4559 may be able to determine the result of this comparison. */
4560 if (GET_CODE (op0
) == IOR
)
4562 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4563 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4565 int sign_bitnum
= GET_MODE_BITSIZE (mode
) - 1;
4566 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4567 && (INTVAL (inner_const
)
4568 & ((HOST_WIDE_INT
) 1 << sign_bitnum
)));
4577 return const_true_rtx
;
4581 return const_true_rtx
;
4595 /* Optimize comparison of ABS with zero. */
4596 if (trueop1
== CONST0_RTX (mode
)
4597 && (GET_CODE (trueop0
) == ABS
4598 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4599 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4604 /* Optimize abs(x) < 0.0. */
4605 if (!HONOR_SNANS (mode
)
4606 && (!INTEGRAL_MODE_P (mode
)
4607 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4609 if (INTEGRAL_MODE_P (mode
)
4610 && (issue_strict_overflow_warning
4611 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4612 warning (OPT_Wstrict_overflow
,
4613 ("assuming signed overflow does not occur when "
4614 "assuming abs (x) < 0 is false"));
4620 /* Optimize abs(x) >= 0.0. */
4621 if (!HONOR_NANS (mode
)
4622 && (!INTEGRAL_MODE_P (mode
)
4623 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4625 if (INTEGRAL_MODE_P (mode
)
4626 && (issue_strict_overflow_warning
4627 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4628 warning (OPT_Wstrict_overflow
,
4629 ("assuming signed overflow does not occur when "
4630 "assuming abs (x) >= 0 is true"));
4631 return const_true_rtx
;
4636 /* Optimize ! (abs(x) < 0.0). */
4637 return const_true_rtx
;
4647 /* Simplify CODE, an operation with result mode MODE and three operands,
4648 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4649 a constant. Return 0 if no simplifications is possible. */
4652 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4653 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4656 unsigned int width
= GET_MODE_BITSIZE (mode
);
4658 /* VOIDmode means "infinite" precision. */
4660 width
= HOST_BITS_PER_WIDE_INT
;
4666 if (CONST_INT_P (op0
)
4667 && CONST_INT_P (op1
)
4668 && CONST_INT_P (op2
)
4669 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4670 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4672 /* Extracting a bit-field from a constant */
4673 HOST_WIDE_INT val
= INTVAL (op0
);
4675 if (BITS_BIG_ENDIAN
)
4676 val
>>= (GET_MODE_BITSIZE (op0_mode
)
4677 - INTVAL (op2
) - INTVAL (op1
));
4679 val
>>= INTVAL (op2
);
4681 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
4683 /* First zero-extend. */
4684 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
4685 /* If desired, propagate sign bit. */
4686 if (code
== SIGN_EXTRACT
4687 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
4688 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
4691 /* Clear the bits that don't belong in our mode,
4692 unless they and our sign bit are all one.
4693 So we get either a reasonable negative value or a reasonable
4694 unsigned value for this mode. */
4695 if (width
< HOST_BITS_PER_WIDE_INT
4696 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
4697 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
4698 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4700 return gen_int_mode (val
, mode
);
4705 if (CONST_INT_P (op0
))
4706 return op0
!= const0_rtx
? op1
: op2
;
4708 /* Convert c ? a : a into "a". */
4709 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4712 /* Convert a != b ? a : b into "a". */
4713 if (GET_CODE (op0
) == NE
4714 && ! side_effects_p (op0
)
4715 && ! HONOR_NANS (mode
)
4716 && ! HONOR_SIGNED_ZEROS (mode
)
4717 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4718 && rtx_equal_p (XEXP (op0
, 1), op2
))
4719 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4720 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4723 /* Convert a == b ? a : b into "b". */
4724 if (GET_CODE (op0
) == EQ
4725 && ! side_effects_p (op0
)
4726 && ! HONOR_NANS (mode
)
4727 && ! HONOR_SIGNED_ZEROS (mode
)
4728 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4729 && rtx_equal_p (XEXP (op0
, 1), op2
))
4730 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4731 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4734 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
4736 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
4737 ? GET_MODE (XEXP (op0
, 1))
4738 : GET_MODE (XEXP (op0
, 0)));
4741 /* Look for happy constants in op1 and op2. */
4742 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
4744 HOST_WIDE_INT t
= INTVAL (op1
);
4745 HOST_WIDE_INT f
= INTVAL (op2
);
4747 if (t
== STORE_FLAG_VALUE
&& f
== 0)
4748 code
= GET_CODE (op0
);
4749 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
4752 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
4760 return simplify_gen_relational (code
, mode
, cmp_mode
,
4761 XEXP (op0
, 0), XEXP (op0
, 1));
4764 if (cmp_mode
== VOIDmode
)
4765 cmp_mode
= op0_mode
;
4766 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
4767 cmp_mode
, XEXP (op0
, 0),
4770 /* See if any simplifications were possible. */
4773 if (CONST_INT_P (temp
))
4774 return temp
== const0_rtx
? op2
: op1
;
4776 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
4782 gcc_assert (GET_MODE (op0
) == mode
);
4783 gcc_assert (GET_MODE (op1
) == mode
);
4784 gcc_assert (VECTOR_MODE_P (mode
));
4785 op2
= avoid_constant_pool_reference (op2
);
4786 if (CONST_INT_P (op2
))
4788 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
4789 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
4790 int mask
= (1 << n_elts
) - 1;
4792 if (!(INTVAL (op2
) & mask
))
4794 if ((INTVAL (op2
) & mask
) == mask
)
4797 op0
= avoid_constant_pool_reference (op0
);
4798 op1
= avoid_constant_pool_reference (op1
);
4799 if (GET_CODE (op0
) == CONST_VECTOR
4800 && GET_CODE (op1
) == CONST_VECTOR
)
4802 rtvec v
= rtvec_alloc (n_elts
);
4805 for (i
= 0; i
< n_elts
; i
++)
4806 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
4807 ? CONST_VECTOR_ELT (op0
, i
)
4808 : CONST_VECTOR_ELT (op1
, i
));
4809 return gen_rtx_CONST_VECTOR (mode
, v
);
4821 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4823 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4825 Works by unpacking OP into a collection of 8-bit values
4826 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4827 and then repacking them again for OUTERMODE. */
4830 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
4831 enum machine_mode innermode
, unsigned int byte
)
4833 /* We support up to 512-bit values (for V8DFmode). */
4837 value_mask
= (1 << value_bit
) - 1
4839 unsigned char value
[max_bitsize
/ value_bit
];
4848 rtvec result_v
= NULL
;
4849 enum mode_class outer_class
;
4850 enum machine_mode outer_submode
;
4852 /* Some ports misuse CCmode. */
4853 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
4856 /* We have no way to represent a complex constant at the rtl level. */
4857 if (COMPLEX_MODE_P (outermode
))
4860 /* Unpack the value. */
4862 if (GET_CODE (op
) == CONST_VECTOR
)
4864 num_elem
= CONST_VECTOR_NUNITS (op
);
4865 elems
= &CONST_VECTOR_ELT (op
, 0);
4866 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
4872 elem_bitsize
= max_bitsize
;
4874 /* If this asserts, it is too complicated; reducing value_bit may help. */
4875 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
4876 /* I don't know how to handle endianness of sub-units. */
4877 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
4879 for (elem
= 0; elem
< num_elem
; elem
++)
4882 rtx el
= elems
[elem
];
4884 /* Vectors are kept in target memory order. (This is probably
4887 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4888 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4890 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4891 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4892 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4893 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4894 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4897 switch (GET_CODE (el
))
4901 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4903 *vp
++ = INTVAL (el
) >> i
;
4904 /* CONST_INTs are always logically sign-extended. */
4905 for (; i
< elem_bitsize
; i
+= value_bit
)
4906 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
4910 if (GET_MODE (el
) == VOIDmode
)
4912 /* If this triggers, someone should have generated a
4913 CONST_INT instead. */
4914 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
4916 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4917 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
4918 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
4921 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
4924 /* It shouldn't matter what's done here, so fill it with
4926 for (; i
< elem_bitsize
; i
+= value_bit
)
4931 long tmp
[max_bitsize
/ 32];
4932 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
4934 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
4935 gcc_assert (bitsize
<= elem_bitsize
);
4936 gcc_assert (bitsize
% value_bit
== 0);
4938 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
4941 /* real_to_target produces its result in words affected by
4942 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4943 and use WORDS_BIG_ENDIAN instead; see the documentation
4944 of SUBREG in rtl.texi. */
4945 for (i
= 0; i
< bitsize
; i
+= value_bit
)
4948 if (WORDS_BIG_ENDIAN
)
4949 ibase
= bitsize
- 1 - i
;
4952 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
4955 /* It shouldn't matter what's done here, so fill it with
4957 for (; i
< elem_bitsize
; i
+= value_bit
)
4963 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4965 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4966 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
4970 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4971 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
4972 for (; i
< 2 * HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4974 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
4975 >> (i
- HOST_BITS_PER_WIDE_INT
);
4976 for (; i
< elem_bitsize
; i
+= value_bit
)
4986 /* Now, pick the right byte to start with. */
4987 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4988 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4989 will already have offset 0. */
4990 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
4992 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
4994 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4995 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4996 byte
= (subword_byte
% UNITS_PER_WORD
4997 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5000 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5001 so if it's become negative it will instead be very large.) */
5002 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5004 /* Convert from bytes to chunks of size value_bit. */
5005 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5007 /* Re-pack the value. */
5009 if (VECTOR_MODE_P (outermode
))
5011 num_elem
= GET_MODE_NUNITS (outermode
);
5012 result_v
= rtvec_alloc (num_elem
);
5013 elems
= &RTVEC_ELT (result_v
, 0);
5014 outer_submode
= GET_MODE_INNER (outermode
);
5020 outer_submode
= outermode
;
5023 outer_class
= GET_MODE_CLASS (outer_submode
);
5024 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5026 gcc_assert (elem_bitsize
% value_bit
== 0);
5027 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5029 for (elem
= 0; elem
< num_elem
; elem
++)
5033 /* Vectors are stored in target memory order. (This is probably
5036 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5037 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5039 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5040 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5041 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5042 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5043 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5046 switch (outer_class
)
5049 case MODE_PARTIAL_INT
:
5051 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5054 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5056 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5057 for (; i
< elem_bitsize
; i
+= value_bit
)
5058 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
5059 << (i
- HOST_BITS_PER_WIDE_INT
));
5061 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5063 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5064 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5065 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
5066 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5073 case MODE_DECIMAL_FLOAT
:
5076 long tmp
[max_bitsize
/ 32];
5078 /* real_from_target wants its input in words affected by
5079 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5080 and use WORDS_BIG_ENDIAN instead; see the documentation
5081 of SUBREG in rtl.texi. */
5082 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5084 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5087 if (WORDS_BIG_ENDIAN
)
5088 ibase
= elem_bitsize
- 1 - i
;
5091 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5094 real_from_target (&r
, tmp
, outer_submode
);
5095 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5107 f
.mode
= outer_submode
;
5110 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5112 f
.data
.low
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5113 for (; i
< elem_bitsize
; i
+= value_bit
)
5114 f
.data
.high
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
5115 << (i
- HOST_BITS_PER_WIDE_INT
));
5117 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5125 if (VECTOR_MODE_P (outermode
))
5126 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5131 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5132 Return 0 if no simplifications are possible. */
5134 simplify_subreg (enum machine_mode outermode
, rtx op
,
5135 enum machine_mode innermode
, unsigned int byte
)
5137 /* Little bit of sanity checking. */
5138 gcc_assert (innermode
!= VOIDmode
);
5139 gcc_assert (outermode
!= VOIDmode
);
5140 gcc_assert (innermode
!= BLKmode
);
5141 gcc_assert (outermode
!= BLKmode
);
5143 gcc_assert (GET_MODE (op
) == innermode
5144 || GET_MODE (op
) == VOIDmode
);
5146 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
5147 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5149 if (outermode
== innermode
&& !byte
)
5152 if (CONST_INT_P (op
)
5153 || GET_CODE (op
) == CONST_DOUBLE
5154 || GET_CODE (op
) == CONST_FIXED
5155 || GET_CODE (op
) == CONST_VECTOR
)
5156 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5158 /* Changing mode twice with SUBREG => just change it once,
5159 or not at all if changing back op starting mode. */
5160 if (GET_CODE (op
) == SUBREG
)
5162 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5163 int final_offset
= byte
+ SUBREG_BYTE (op
);
5166 if (outermode
== innermostmode
5167 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5168 return SUBREG_REG (op
);
5170 /* The SUBREG_BYTE represents offset, as if the value were stored
5171 in memory. Irritating exception is paradoxical subreg, where
5172 we define SUBREG_BYTE to be 0. On big endian machines, this
5173 value should be negative. For a moment, undo this exception. */
5174 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5176 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5177 if (WORDS_BIG_ENDIAN
)
5178 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5179 if (BYTES_BIG_ENDIAN
)
5180 final_offset
+= difference
% UNITS_PER_WORD
;
5182 if (SUBREG_BYTE (op
) == 0
5183 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5185 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5186 if (WORDS_BIG_ENDIAN
)
5187 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5188 if (BYTES_BIG_ENDIAN
)
5189 final_offset
+= difference
% UNITS_PER_WORD
;
5192 /* See whether resulting subreg will be paradoxical. */
5193 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5195 /* In nonparadoxical subregs we can't handle negative offsets. */
5196 if (final_offset
< 0)
5198 /* Bail out in case resulting subreg would be incorrect. */
5199 if (final_offset
% GET_MODE_SIZE (outermode
)
5200 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5206 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5208 /* In paradoxical subreg, see if we are still looking on lower part.
5209 If so, our SUBREG_BYTE will be 0. */
5210 if (WORDS_BIG_ENDIAN
)
5211 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5212 if (BYTES_BIG_ENDIAN
)
5213 offset
+= difference
% UNITS_PER_WORD
;
5214 if (offset
== final_offset
)
5220 /* Recurse for further possible simplifications. */
5221 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5225 if (validate_subreg (outermode
, innermostmode
,
5226 SUBREG_REG (op
), final_offset
))
5228 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5229 if (SUBREG_PROMOTED_VAR_P (op
)
5230 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5231 && GET_MODE_CLASS (outermode
) == MODE_INT
5232 && IN_RANGE (GET_MODE_SIZE (outermode
),
5233 GET_MODE_SIZE (innermode
),
5234 GET_MODE_SIZE (innermostmode
))
5235 && subreg_lowpart_p (newx
))
5237 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5238 SUBREG_PROMOTED_UNSIGNED_SET
5239 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5246 /* Merge implicit and explicit truncations. */
5248 if (GET_CODE (op
) == TRUNCATE
5249 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
5250 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
5251 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
5252 GET_MODE (XEXP (op
, 0)));
5254 /* SUBREG of a hard register => just change the register number
5255 and/or mode. If the hard register is not valid in that mode,
5256 suppress this simplification. If the hard register is the stack,
5257 frame, or argument pointer, leave this as a SUBREG. */
5259 if (REG_P (op
) && HARD_REGISTER_P (op
))
5261 unsigned int regno
, final_regno
;
5264 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5265 if (HARD_REGISTER_NUM_P (final_regno
))
5268 int final_offset
= byte
;
5270 /* Adjust offset for paradoxical subregs. */
5272 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5274 int difference
= (GET_MODE_SIZE (innermode
)
5275 - GET_MODE_SIZE (outermode
));
5276 if (WORDS_BIG_ENDIAN
)
5277 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5278 if (BYTES_BIG_ENDIAN
)
5279 final_offset
+= difference
% UNITS_PER_WORD
;
5282 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5284 /* Propagate original regno. We don't have any way to specify
5285 the offset inside original regno, so do so only for lowpart.
5286 The information is used only by alias analysis that can not
5287 grog partial register anyway. */
5289 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5290 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5295 /* If we have a SUBREG of a register that we are replacing and we are
5296 replacing it with a MEM, make a new MEM and try replacing the
5297 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5298 or if we would be widening it. */
5301 && ! mode_dependent_address_p (XEXP (op
, 0))
5302 /* Allow splitting of volatile memory references in case we don't
5303 have instruction to move the whole thing. */
5304 && (! MEM_VOLATILE_P (op
)
5305 || ! have_insn_for (SET
, innermode
))
5306 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5307 return adjust_address_nv (op
, outermode
, byte
);
5309 /* Handle complex values represented as CONCAT
5310 of real and imaginary part. */
5311 if (GET_CODE (op
) == CONCAT
)
5313 unsigned int part_size
, final_offset
;
5316 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5317 if (byte
< part_size
)
5319 part
= XEXP (op
, 0);
5320 final_offset
= byte
;
5324 part
= XEXP (op
, 1);
5325 final_offset
= byte
- part_size
;
5328 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5331 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5334 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5335 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5339 /* Optimize SUBREG truncations of zero and sign extended values. */
5340 if ((GET_CODE (op
) == ZERO_EXTEND
5341 || GET_CODE (op
) == SIGN_EXTEND
)
5342 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
5344 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5346 /* If we're requesting the lowpart of a zero or sign extension,
5347 there are three possibilities. If the outermode is the same
5348 as the origmode, we can omit both the extension and the subreg.
5349 If the outermode is not larger than the origmode, we can apply
5350 the truncation without the extension. Finally, if the outermode
5351 is larger than the origmode, but both are integer modes, we
5352 can just extend to the appropriate mode. */
5355 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
5356 if (outermode
== origmode
)
5357 return XEXP (op
, 0);
5358 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
5359 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
5360 subreg_lowpart_offset (outermode
,
5362 if (SCALAR_INT_MODE_P (outermode
))
5363 return simplify_gen_unary (GET_CODE (op
), outermode
,
5364 XEXP (op
, 0), origmode
);
5367 /* A SUBREG resulting from a zero extension may fold to zero if
5368 it extracts higher bits that the ZERO_EXTEND's source bits. */
5369 if (GET_CODE (op
) == ZERO_EXTEND
5370 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
5371 return CONST0_RTX (outermode
);
5374 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5375 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5376 the outer subreg is effectively a truncation to the original mode. */
5377 if ((GET_CODE (op
) == LSHIFTRT
5378 || GET_CODE (op
) == ASHIFTRT
)
5379 && SCALAR_INT_MODE_P (outermode
)
5380 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5381 to avoid the possibility that an outer LSHIFTRT shifts by more
5382 than the sign extension's sign_bit_copies and introduces zeros
5383 into the high bits of the result. */
5384 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
5385 && CONST_INT_P (XEXP (op
, 1))
5386 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
5387 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5388 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5389 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5390 return simplify_gen_binary (ASHIFTRT
, outermode
,
5391 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5393 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5394 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5395 the outer subreg is effectively a truncation to the original mode. */
5396 if ((GET_CODE (op
) == LSHIFTRT
5397 || GET_CODE (op
) == ASHIFTRT
)
5398 && SCALAR_INT_MODE_P (outermode
)
5399 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5400 && CONST_INT_P (XEXP (op
, 1))
5401 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5402 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5403 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5404 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5405 return simplify_gen_binary (LSHIFTRT
, outermode
,
5406 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5408 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5409 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5410 the outer subreg is effectively a truncation to the original mode. */
5411 if (GET_CODE (op
) == ASHIFT
5412 && SCALAR_INT_MODE_P (outermode
)
5413 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5414 && CONST_INT_P (XEXP (op
, 1))
5415 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5416 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
5417 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5418 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5419 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5420 return simplify_gen_binary (ASHIFT
, outermode
,
5421 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5423 /* Recognize a word extraction from a multi-word subreg. */
5424 if ((GET_CODE (op
) == LSHIFTRT
5425 || GET_CODE (op
) == ASHIFTRT
)
5426 && SCALAR_INT_MODE_P (outermode
)
5427 && GET_MODE_BITSIZE (outermode
) >= BITS_PER_WORD
5428 && GET_MODE_BITSIZE (innermode
) >= (2 * GET_MODE_BITSIZE (outermode
))
5429 && CONST_INT_P (XEXP (op
, 1))
5430 && (INTVAL (XEXP (op
, 1)) & (GET_MODE_BITSIZE (outermode
) - 1)) == 0
5431 && INTVAL (XEXP (op
, 1)) >= 0
5432 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5433 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5435 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5436 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
,
5438 ? byte
- shifted_bytes
5439 : byte
+ shifted_bytes
));
5445 /* Make a SUBREG operation or equivalent if it folds. */
5448 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5449 enum machine_mode innermode
, unsigned int byte
)
5453 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5457 if (GET_CODE (op
) == SUBREG
5458 || GET_CODE (op
) == CONCAT
5459 || GET_MODE (op
) == VOIDmode
)
5462 if (validate_subreg (outermode
, innermode
, op
, byte
))
5463 return gen_rtx_SUBREG (outermode
, op
, byte
);
5468 /* Simplify X, an rtx expression.
5470 Return the simplified expression or NULL if no simplifications
5473 This is the preferred entry point into the simplification routines;
5474 however, we still allow passes to call the more specific routines.
5476 Right now GCC has three (yes, three) major bodies of RTL simplification
5477 code that need to be unified.
5479 1. fold_rtx in cse.c. This code uses various CSE specific
5480 information to aid in RTL simplification.
5482 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5483 it uses combine specific information to aid in RTL
5486 3. The routines in this file.
5489 Long term we want to only have one body of simplification code; to
5490 get to that state I recommend the following steps:
5492 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5493 which are not pass dependent state into these routines.
5495 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5496 use this routine whenever possible.
5498 3. Allow for pass dependent state to be provided to these
5499 routines and add simplifications based on the pass dependent
5500 state. Remove code from cse.c & combine.c that becomes
5503 It will take time, but ultimately the compiler will be easier to
5504 maintain and improve. It's totally silly that when we add a
5505 simplification that it needs to be added to 4 places (3 for RTL
5506 simplification and 1 for tree simplification. */
5509 simplify_rtx (const_rtx x
)
5511 const enum rtx_code code
= GET_CODE (x
);
5512 const enum machine_mode mode
= GET_MODE (x
);
5514 switch (GET_RTX_CLASS (code
))
5517 return simplify_unary_operation (code
, mode
,
5518 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5519 case RTX_COMM_ARITH
:
5520 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5521 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5523 /* Fall through.... */
5526 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5529 case RTX_BITFIELD_OPS
:
5530 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5531 XEXP (x
, 0), XEXP (x
, 1),
5535 case RTX_COMM_COMPARE
:
5536 return simplify_relational_operation (code
, mode
,
5537 ((GET_MODE (XEXP (x
, 0))
5539 ? GET_MODE (XEXP (x
, 0))
5540 : GET_MODE (XEXP (x
, 1))),
5546 return simplify_subreg (mode
, SUBREG_REG (x
),
5547 GET_MODE (SUBREG_REG (x
)),
5554 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5555 if (GET_CODE (XEXP (x
, 0)) == HIGH
5556 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))