1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
35 #include "diagnostic-core.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
48 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
49 static bool plus_minus_operand_p (const_rtx
);
50 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
51 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
52 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
54 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
56 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
57 enum machine_mode
, rtx
, rtx
);
58 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
59 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
65 neg_const_int (enum machine_mode mode
, const_rtx i
)
67 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
74 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
76 unsigned HOST_WIDE_INT val
;
79 if (GET_MODE_CLASS (mode
) != MODE_INT
)
82 width
= GET_MODE_PRECISION (mode
);
86 if (width
<= HOST_BITS_PER_WIDE_INT
89 else if (width
<= HOST_BITS_PER_DOUBLE_INT
90 && CONST_DOUBLE_AS_INT_P (x
)
91 && CONST_DOUBLE_LOW (x
) == 0)
93 val
= CONST_DOUBLE_HIGH (x
);
94 width
-= HOST_BITS_PER_WIDE_INT
;
97 /* FIXME: We don't yet have a representation for wider modes. */
100 if (width
< HOST_BITS_PER_WIDE_INT
)
101 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
102 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
105 /* Test whether VAL is equal to the most significant bit of mode MODE
106 (after masking with the mode mask of MODE). Returns false if the
107 precision of MODE is too large to handle. */
110 val_signbit_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
114 if (GET_MODE_CLASS (mode
) != MODE_INT
)
117 width
= GET_MODE_PRECISION (mode
);
118 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
121 val
&= GET_MODE_MASK (mode
);
122 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
125 /* Test whether the most significant bit of mode MODE is set in VAL.
126 Returns false if the precision of MODE is too large to handle. */
128 val_signbit_known_set_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
132 if (GET_MODE_CLASS (mode
) != MODE_INT
)
135 width
= GET_MODE_PRECISION (mode
);
136 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
139 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
143 /* Test whether the most significant bit of mode MODE is clear in VAL.
144 Returns false if the precision of MODE is too large to handle. */
146 val_signbit_known_clear_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
150 if (GET_MODE_CLASS (mode
) != MODE_INT
)
153 width
= GET_MODE_PRECISION (mode
);
154 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
157 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
161 /* Make a binary operation by properly ordering the operands and
162 seeing if the expression folds. */
165 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
170 /* If this simplifies, do it. */
171 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
175 /* Put complex operands first and constants second if commutative. */
176 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
177 && swap_commutative_operands_p (op0
, op1
))
178 tem
= op0
, op0
= op1
, op1
= tem
;
180 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
183 /* If X is a MEM referencing the constant pool, return the real value.
184 Otherwise return X. */
186 avoid_constant_pool_reference (rtx x
)
189 enum machine_mode cmode
;
190 HOST_WIDE_INT offset
= 0;
192 switch (GET_CODE (x
))
198 /* Handle float extensions of constant pool references. */
200 c
= avoid_constant_pool_reference (tmp
);
201 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
205 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
206 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
214 if (GET_MODE (x
) == BLKmode
)
219 /* Call target hook to avoid the effects of -fpic etc.... */
220 addr
= targetm
.delegitimize_address (addr
);
222 /* Split the address into a base and integer offset. */
223 if (GET_CODE (addr
) == CONST
224 && GET_CODE (XEXP (addr
, 0)) == PLUS
225 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
227 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
228 addr
= XEXP (XEXP (addr
, 0), 0);
231 if (GET_CODE (addr
) == LO_SUM
)
232 addr
= XEXP (addr
, 1);
234 /* If this is a constant pool reference, we can turn it into its
235 constant and hope that simplifications happen. */
236 if (GET_CODE (addr
) == SYMBOL_REF
237 && CONSTANT_POOL_ADDRESS_P (addr
))
239 c
= get_pool_constant (addr
);
240 cmode
= get_pool_mode (addr
);
242 /* If we're accessing the constant in a different mode than it was
243 originally stored, attempt to fix that up via subreg simplifications.
244 If that fails we have no choice but to return the original memory. */
245 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
246 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
248 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
249 if (tem
&& CONSTANT_P (tem
))
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x
)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
270 && MEM_OFFSET_KNOWN_P (x
))
272 tree decl
= MEM_EXPR (x
);
273 enum machine_mode mode
= GET_MODE (x
);
274 HOST_WIDE_INT offset
= 0;
276 switch (TREE_CODE (decl
))
286 case ARRAY_RANGE_REF
:
291 case VIEW_CONVERT_EXPR
:
293 HOST_WIDE_INT bitsize
, bitpos
;
295 int unsignedp
, volatilep
= 0;
297 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
298 &mode
, &unsignedp
, &volatilep
, false);
299 if (bitsize
!= GET_MODE_BITSIZE (mode
)
300 || (bitpos
% BITS_PER_UNIT
)
301 || (toffset
&& !host_integerp (toffset
, 0)))
305 offset
+= bitpos
/ BITS_PER_UNIT
;
307 offset
+= TREE_INT_CST_LOW (toffset
);
314 && mode
== GET_MODE (x
)
315 && TREE_CODE (decl
) == VAR_DECL
316 && (TREE_STATIC (decl
)
317 || DECL_THREAD_LOCAL_P (decl
))
318 && DECL_RTL_SET_P (decl
)
319 && MEM_P (DECL_RTL (decl
)))
323 offset
+= MEM_OFFSET (x
);
325 newx
= DECL_RTL (decl
);
329 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
331 /* Avoid creating a new MEM needlessly if we already had
332 the same address. We do if there's no OFFSET and the
333 old address X is identical to NEWX, or if X is of the
334 form (plus NEWX OFFSET), or the NEWX is of the form
335 (plus Y (const_int Z)) and X is that with the offset
336 added: (plus Y (const_int Z+OFFSET)). */
338 || (GET_CODE (o
) == PLUS
339 && GET_CODE (XEXP (o
, 1)) == CONST_INT
340 && (offset
== INTVAL (XEXP (o
, 1))
341 || (GET_CODE (n
) == PLUS
342 && GET_CODE (XEXP (n
, 1)) == CONST_INT
343 && (INTVAL (XEXP (n
, 1)) + offset
344 == INTVAL (XEXP (o
, 1)))
345 && (n
= XEXP (n
, 0))))
346 && (o
= XEXP (o
, 0))))
347 && rtx_equal_p (o
, n
)))
348 x
= adjust_address_nv (newx
, mode
, offset
);
350 else if (GET_MODE (x
) == GET_MODE (newx
)
359 /* Make a unary operation by first seeing if it folds and otherwise making
360 the specified operation. */
363 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
364 enum machine_mode op_mode
)
368 /* If this simplifies, use it. */
369 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
372 return gen_rtx_fmt_e (code
, mode
, op
);
375 /* Likewise for ternary operations. */
378 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
379 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
383 /* If this simplifies, use it. */
384 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
388 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
391 /* Likewise, for relational operations.
392 CMP_MODE specifies mode comparison is done in. */
395 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
396 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
400 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
404 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
407 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
408 and simplify the result. If FN is non-NULL, call this callback on each
409 X, if it returns non-NULL, replace X with its return value and simplify the
413 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
414 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
416 enum rtx_code code
= GET_CODE (x
);
417 enum machine_mode mode
= GET_MODE (x
);
418 enum machine_mode op_mode
;
420 rtx op0
, op1
, op2
, newx
, op
;
424 if (__builtin_expect (fn
!= NULL
, 0))
426 newx
= fn (x
, old_rtx
, data
);
430 else if (rtx_equal_p (x
, old_rtx
))
431 return copy_rtx ((rtx
) data
);
433 switch (GET_RTX_CLASS (code
))
437 op_mode
= GET_MODE (op0
);
438 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
439 if (op0
== XEXP (x
, 0))
441 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
445 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
446 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
447 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
449 return simplify_gen_binary (code
, mode
, op0
, op1
);
452 case RTX_COMM_COMPARE
:
455 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
456 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
457 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
458 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
460 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
463 case RTX_BITFIELD_OPS
:
465 op_mode
= GET_MODE (op0
);
466 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
467 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
468 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
469 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
471 if (op_mode
== VOIDmode
)
472 op_mode
= GET_MODE (op0
);
473 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
478 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
479 if (op0
== SUBREG_REG (x
))
481 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
482 GET_MODE (SUBREG_REG (x
)),
484 return op0
? op0
: x
;
491 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
492 if (op0
== XEXP (x
, 0))
494 return replace_equiv_address_nv (x
, op0
);
496 else if (code
== LO_SUM
)
498 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
499 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
501 /* (lo_sum (high x) x) -> x */
502 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
505 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
507 return gen_rtx_LO_SUM (mode
, op0
, op1
);
516 fmt
= GET_RTX_FORMAT (code
);
517 for (i
= 0; fmt
[i
]; i
++)
522 newvec
= XVEC (newx
, i
);
523 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
525 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
527 if (op
!= RTVEC_ELT (vec
, j
))
531 newvec
= shallow_copy_rtvec (vec
);
533 newx
= shallow_copy_rtx (x
);
534 XVEC (newx
, i
) = newvec
;
536 RTVEC_ELT (newvec
, j
) = op
;
544 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
545 if (op
!= XEXP (x
, i
))
548 newx
= shallow_copy_rtx (x
);
557 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
558 resulting RTX. Return a new RTX which is as simplified as possible. */
561 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
563 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
566 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
567 Only handle cases where the truncated value is inherently an rvalue.
569 RTL provides two ways of truncating a value:
571 1. a lowpart subreg. This form is only a truncation when both
572 the outer and inner modes (here MODE and OP_MODE respectively)
573 are scalar integers, and only then when the subreg is used as
576 It is only valid to form such truncating subregs if the
577 truncation requires no action by the target. The onus for
578 proving this is on the creator of the subreg -- e.g. the
579 caller to simplify_subreg or simplify_gen_subreg -- and typically
580 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
582 2. a TRUNCATE. This form handles both scalar and compound integers.
584 The first form is preferred where valid. However, the TRUNCATE
585 handling in simplify_unary_operation turns the second form into the
586 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
587 so it is generally safe to form rvalue truncations using:
589 simplify_gen_unary (TRUNCATE, ...)
591 and leave simplify_unary_operation to work out which representation
594 Because of the proof requirements on (1), simplify_truncation must
595 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
596 regardless of whether the outer truncation came from a SUBREG or a
597 TRUNCATE. For example, if the caller has proven that an SImode
602 is a no-op and can be represented as a subreg, it does not follow
603 that SImode truncations of X and Y are also no-ops. On a target
604 like 64-bit MIPS that requires SImode values to be stored in
605 sign-extended form, an SImode truncation of:
607 (and:DI (reg:DI X) (const_int 63))
609 is trivially a no-op because only the lower 6 bits can be set.
610 However, X is still an arbitrary 64-bit number and so we cannot
611 assume that truncating it too is a no-op. */
614 simplify_truncation (enum machine_mode mode
, rtx op
,
615 enum machine_mode op_mode
)
617 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
618 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
619 gcc_assert (precision
<= op_precision
);
621 /* Optimize truncations of zero and sign extended values. */
622 if (GET_CODE (op
) == ZERO_EXTEND
623 || GET_CODE (op
) == SIGN_EXTEND
)
625 /* There are three possibilities. If MODE is the same as the
626 origmode, we can omit both the extension and the subreg.
627 If MODE is not larger than the origmode, we can apply the
628 truncation without the extension. Finally, if the outermode
629 is larger than the origmode, we can just extend to the appropriate
631 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
632 if (mode
== origmode
)
634 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
635 return simplify_gen_unary (TRUNCATE
, mode
,
636 XEXP (op
, 0), origmode
);
638 return simplify_gen_unary (GET_CODE (op
), mode
,
639 XEXP (op
, 0), origmode
);
642 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
643 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
644 if (GET_CODE (op
) == PLUS
645 || GET_CODE (op
) == MINUS
646 || GET_CODE (op
) == MULT
)
648 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
651 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
653 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
657 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
658 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
659 the outer subreg is effectively a truncation to the original mode. */
660 if ((GET_CODE (op
) == LSHIFTRT
661 || GET_CODE (op
) == ASHIFTRT
)
662 /* Ensure that OP_MODE is at least twice as wide as MODE
663 to avoid the possibility that an outer LSHIFTRT shifts by more
664 than the sign extension's sign_bit_copies and introduces zeros
665 into the high bits of the result. */
666 && 2 * precision
<= op_precision
667 && CONST_INT_P (XEXP (op
, 1))
668 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
669 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
670 && UINTVAL (XEXP (op
, 1)) < precision
)
671 return simplify_gen_binary (ASHIFTRT
, mode
,
672 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
674 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
675 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
676 the outer subreg is effectively a truncation to the original mode. */
677 if ((GET_CODE (op
) == LSHIFTRT
678 || GET_CODE (op
) == ASHIFTRT
)
679 && CONST_INT_P (XEXP (op
, 1))
680 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
681 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
682 && UINTVAL (XEXP (op
, 1)) < precision
)
683 return simplify_gen_binary (LSHIFTRT
, mode
,
684 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
686 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
687 to (ashift:QI (x:QI) C), where C is a suitable small constant and
688 the outer subreg is effectively a truncation to the original mode. */
689 if (GET_CODE (op
) == ASHIFT
690 && CONST_INT_P (XEXP (op
, 1))
691 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
692 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
693 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
694 && UINTVAL (XEXP (op
, 1)) < precision
)
695 return simplify_gen_binary (ASHIFT
, mode
,
696 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
698 /* Recognize a word extraction from a multi-word subreg. */
699 if ((GET_CODE (op
) == LSHIFTRT
700 || GET_CODE (op
) == ASHIFTRT
)
701 && SCALAR_INT_MODE_P (mode
)
702 && SCALAR_INT_MODE_P (op_mode
)
703 && precision
>= BITS_PER_WORD
704 && 2 * precision
<= op_precision
705 && CONST_INT_P (XEXP (op
, 1))
706 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
707 && UINTVAL (XEXP (op
, 1)) < op_precision
)
709 int byte
= subreg_lowpart_offset (mode
, op_mode
);
710 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
711 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
713 ? byte
- shifted_bytes
714 : byte
+ shifted_bytes
));
717 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
718 and try replacing the TRUNCATE and shift with it. Don't do this
719 if the MEM has a mode-dependent address. */
720 if ((GET_CODE (op
) == LSHIFTRT
721 || GET_CODE (op
) == ASHIFTRT
)
722 && SCALAR_INT_MODE_P (op_mode
)
723 && MEM_P (XEXP (op
, 0))
724 && CONST_INT_P (XEXP (op
, 1))
725 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
726 && INTVAL (XEXP (op
, 1)) > 0
727 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
728 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
729 MEM_ADDR_SPACE (XEXP (op
, 0)))
730 && ! MEM_VOLATILE_P (XEXP (op
, 0))
731 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
732 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
734 int byte
= subreg_lowpart_offset (mode
, op_mode
);
735 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
736 return adjust_address_nv (XEXP (op
, 0), mode
,
738 ? byte
- shifted_bytes
739 : byte
+ shifted_bytes
));
742 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
743 (OP:SI foo:SI) if OP is NEG or ABS. */
744 if ((GET_CODE (op
) == ABS
745 || GET_CODE (op
) == NEG
)
746 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
747 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
748 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
749 return simplify_gen_unary (GET_CODE (op
), mode
,
750 XEXP (XEXP (op
, 0), 0), mode
);
752 /* (truncate:A (subreg:B (truncate:C X) 0)) is
754 if (GET_CODE (op
) == SUBREG
755 && SCALAR_INT_MODE_P (mode
)
756 && SCALAR_INT_MODE_P (op_mode
)
757 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
758 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
759 && subreg_lowpart_p (op
))
761 rtx inner
= XEXP (SUBREG_REG (op
), 0);
762 if (GET_MODE_PRECISION (mode
)
763 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
764 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
766 /* If subreg above is paradoxical and C is narrower
767 than A, return (subreg:A (truncate:C X) 0). */
768 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
769 GET_MODE (SUBREG_REG (op
)), 0);
772 /* (truncate:A (truncate:B X)) is (truncate:A X). */
773 if (GET_CODE (op
) == TRUNCATE
)
774 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
775 GET_MODE (XEXP (op
, 0)));
780 /* Try to simplify a unary operation CODE whose output mode is to be
781 MODE with input operand OP whose mode was originally OP_MODE.
782 Return zero if no simplification can be made. */
784 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
785 rtx op
, enum machine_mode op_mode
)
789 trueop
= avoid_constant_pool_reference (op
);
791 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
795 return simplify_unary_operation_1 (code
, mode
, op
);
798 /* Perform some simplifications we can do even if the operands
801 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
803 enum rtx_code reversed
;
809 /* (not (not X)) == X. */
810 if (GET_CODE (op
) == NOT
)
813 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
814 comparison is all ones. */
815 if (COMPARISON_P (op
)
816 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
817 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
818 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
819 XEXP (op
, 0), XEXP (op
, 1));
821 /* (not (plus X -1)) can become (neg X). */
822 if (GET_CODE (op
) == PLUS
823 && XEXP (op
, 1) == constm1_rtx
)
824 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
826 /* Similarly, (not (neg X)) is (plus X -1). */
827 if (GET_CODE (op
) == NEG
)
828 return plus_constant (mode
, XEXP (op
, 0), -1);
830 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
831 if (GET_CODE (op
) == XOR
832 && CONST_INT_P (XEXP (op
, 1))
833 && (temp
= simplify_unary_operation (NOT
, mode
,
834 XEXP (op
, 1), mode
)) != 0)
835 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
837 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
838 if (GET_CODE (op
) == PLUS
839 && CONST_INT_P (XEXP (op
, 1))
840 && mode_signbit_p (mode
, XEXP (op
, 1))
841 && (temp
= simplify_unary_operation (NOT
, mode
,
842 XEXP (op
, 1), mode
)) != 0)
843 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
846 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
847 operands other than 1, but that is not valid. We could do a
848 similar simplification for (not (lshiftrt C X)) where C is
849 just the sign bit, but this doesn't seem common enough to
851 if (GET_CODE (op
) == ASHIFT
852 && XEXP (op
, 0) == const1_rtx
)
854 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
855 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
858 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
859 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
860 so we can perform the above simplification. */
861 if (STORE_FLAG_VALUE
== -1
862 && GET_CODE (op
) == ASHIFTRT
863 && GET_CODE (XEXP (op
, 1))
864 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
865 return simplify_gen_relational (GE
, mode
, VOIDmode
,
866 XEXP (op
, 0), const0_rtx
);
869 if (GET_CODE (op
) == SUBREG
870 && subreg_lowpart_p (op
)
871 && (GET_MODE_SIZE (GET_MODE (op
))
872 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
873 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
874 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
876 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
879 x
= gen_rtx_ROTATE (inner_mode
,
880 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
882 XEXP (SUBREG_REG (op
), 1));
883 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
888 /* Apply De Morgan's laws to reduce number of patterns for machines
889 with negating logical insns (and-not, nand, etc.). If result has
890 only one NOT, put it first, since that is how the patterns are
892 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
894 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
895 enum machine_mode op_mode
;
897 op_mode
= GET_MODE (in1
);
898 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
900 op_mode
= GET_MODE (in2
);
901 if (op_mode
== VOIDmode
)
903 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
905 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
908 in2
= in1
; in1
= tem
;
911 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
915 /* (not (bswap x)) -> (bswap (not x)). */
916 if (GET_CODE (op
) == BSWAP
)
918 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
919 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
924 /* (neg (neg X)) == X. */
925 if (GET_CODE (op
) == NEG
)
928 /* (neg (plus X 1)) can become (not X). */
929 if (GET_CODE (op
) == PLUS
930 && XEXP (op
, 1) == const1_rtx
)
931 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
933 /* Similarly, (neg (not X)) is (plus X 1). */
934 if (GET_CODE (op
) == NOT
)
935 return plus_constant (mode
, XEXP (op
, 0), 1);
937 /* (neg (minus X Y)) can become (minus Y X). This transformation
938 isn't safe for modes with signed zeros, since if X and Y are
939 both +0, (minus Y X) is the same as (minus X Y). If the
940 rounding mode is towards +infinity (or -infinity) then the two
941 expressions will be rounded differently. */
942 if (GET_CODE (op
) == MINUS
943 && !HONOR_SIGNED_ZEROS (mode
)
944 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
945 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
947 if (GET_CODE (op
) == PLUS
948 && !HONOR_SIGNED_ZEROS (mode
)
949 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
951 /* (neg (plus A C)) is simplified to (minus -C A). */
952 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
953 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
955 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
957 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
960 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
961 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
962 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
965 /* (neg (mult A B)) becomes (mult A (neg B)).
966 This works even for floating-point values. */
967 if (GET_CODE (op
) == MULT
968 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
970 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
971 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
974 /* NEG commutes with ASHIFT since it is multiplication. Only do
975 this if we can then eliminate the NEG (e.g., if the operand
977 if (GET_CODE (op
) == ASHIFT
)
979 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
981 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
984 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
985 C is equal to the width of MODE minus 1. */
986 if (GET_CODE (op
) == ASHIFTRT
987 && CONST_INT_P (XEXP (op
, 1))
988 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
989 return simplify_gen_binary (LSHIFTRT
, mode
,
990 XEXP (op
, 0), XEXP (op
, 1));
992 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
993 C is equal to the width of MODE minus 1. */
994 if (GET_CODE (op
) == LSHIFTRT
995 && CONST_INT_P (XEXP (op
, 1))
996 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
997 return simplify_gen_binary (ASHIFTRT
, mode
,
998 XEXP (op
, 0), XEXP (op
, 1));
1000 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1001 if (GET_CODE (op
) == XOR
1002 && XEXP (op
, 1) == const1_rtx
1003 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1004 return plus_constant (mode
, XEXP (op
, 0), -1);
1006 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1007 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1008 if (GET_CODE (op
) == LT
1009 && XEXP (op
, 1) == const0_rtx
1010 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1012 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
1013 int isize
= GET_MODE_PRECISION (inner
);
1014 if (STORE_FLAG_VALUE
== 1)
1016 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1017 GEN_INT (isize
- 1));
1020 if (GET_MODE_PRECISION (mode
) > isize
)
1021 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1022 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1024 else if (STORE_FLAG_VALUE
== -1)
1026 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1027 GEN_INT (isize
- 1));
1030 if (GET_MODE_PRECISION (mode
) > isize
)
1031 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1032 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1038 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1039 with the umulXi3_highpart patterns. */
1040 if (GET_CODE (op
) == LSHIFTRT
1041 && GET_CODE (XEXP (op
, 0)) == MULT
)
1044 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1046 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1048 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1052 /* We can't handle truncation to a partial integer mode here
1053 because we don't know the real bitsize of the partial
1058 if (GET_MODE (op
) != VOIDmode
)
1060 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1065 /* If we know that the value is already truncated, we can
1066 replace the TRUNCATE with a SUBREG. */
1067 if (GET_MODE_NUNITS (mode
) == 1
1068 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1069 || truncated_to_mode (mode
, op
)))
1071 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1076 /* A truncate of a comparison can be replaced with a subreg if
1077 STORE_FLAG_VALUE permits. This is like the previous test,
1078 but it works even if the comparison is done in a mode larger
1079 than HOST_BITS_PER_WIDE_INT. */
1080 if (HWI_COMPUTABLE_MODE_P (mode
)
1081 && COMPARISON_P (op
)
1082 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1084 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1089 /* A truncate of a memory is just loading the low part of the memory
1090 if we are not changing the meaning of the address. */
1091 if (GET_CODE (op
) == MEM
1092 && !VECTOR_MODE_P (mode
)
1093 && !MEM_VOLATILE_P (op
)
1094 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1096 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1103 case FLOAT_TRUNCATE
:
1104 if (DECIMAL_FLOAT_MODE_P (mode
))
1107 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1108 if (GET_CODE (op
) == FLOAT_EXTEND
1109 && GET_MODE (XEXP (op
, 0)) == mode
)
1110 return XEXP (op
, 0);
1112 /* (float_truncate:SF (float_truncate:DF foo:XF))
1113 = (float_truncate:SF foo:XF).
1114 This may eliminate double rounding, so it is unsafe.
1116 (float_truncate:SF (float_extend:XF foo:DF))
1117 = (float_truncate:SF foo:DF).
1119 (float_truncate:DF (float_extend:XF foo:SF))
1120 = (float_extend:SF foo:DF). */
1121 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1122 && flag_unsafe_math_optimizations
)
1123 || GET_CODE (op
) == FLOAT_EXTEND
)
1124 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1126 > GET_MODE_SIZE (mode
)
1127 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1129 XEXP (op
, 0), mode
);
1131 /* (float_truncate (float x)) is (float x) */
1132 if (GET_CODE (op
) == FLOAT
1133 && (flag_unsafe_math_optimizations
1134 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1135 && ((unsigned)significand_size (GET_MODE (op
))
1136 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1137 - num_sign_bit_copies (XEXP (op
, 0),
1138 GET_MODE (XEXP (op
, 0))))))))
1139 return simplify_gen_unary (FLOAT
, mode
,
1141 GET_MODE (XEXP (op
, 0)));
1143 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1144 (OP:SF foo:SF) if OP is NEG or ABS. */
1145 if ((GET_CODE (op
) == ABS
1146 || GET_CODE (op
) == NEG
)
1147 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1148 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1149 return simplify_gen_unary (GET_CODE (op
), mode
,
1150 XEXP (XEXP (op
, 0), 0), mode
);
1152 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1153 is (float_truncate:SF x). */
1154 if (GET_CODE (op
) == SUBREG
1155 && subreg_lowpart_p (op
)
1156 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1157 return SUBREG_REG (op
);
1161 if (DECIMAL_FLOAT_MODE_P (mode
))
1164 /* (float_extend (float_extend x)) is (float_extend x)
1166 (float_extend (float x)) is (float x) assuming that double
1167 rounding can't happen.
1169 if (GET_CODE (op
) == FLOAT_EXTEND
1170 || (GET_CODE (op
) == FLOAT
1171 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1172 && ((unsigned)significand_size (GET_MODE (op
))
1173 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1174 - num_sign_bit_copies (XEXP (op
, 0),
1175 GET_MODE (XEXP (op
, 0)))))))
1176 return simplify_gen_unary (GET_CODE (op
), mode
,
1178 GET_MODE (XEXP (op
, 0)));
1183 /* (abs (neg <foo>)) -> (abs <foo>) */
1184 if (GET_CODE (op
) == NEG
)
1185 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1186 GET_MODE (XEXP (op
, 0)));
1188 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1190 if (GET_MODE (op
) == VOIDmode
)
1193 /* If operand is something known to be positive, ignore the ABS. */
1194 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1195 || val_signbit_known_clear_p (GET_MODE (op
),
1196 nonzero_bits (op
, GET_MODE (op
))))
1199 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1200 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1201 return gen_rtx_NEG (mode
, op
);
1206 /* (ffs (*_extend <X>)) = (ffs <X>) */
1207 if (GET_CODE (op
) == SIGN_EXTEND
1208 || GET_CODE (op
) == ZERO_EXTEND
)
1209 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1210 GET_MODE (XEXP (op
, 0)));
1214 switch (GET_CODE (op
))
1218 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1219 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1220 GET_MODE (XEXP (op
, 0)));
1224 /* Rotations don't affect popcount. */
1225 if (!side_effects_p (XEXP (op
, 1)))
1226 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1227 GET_MODE (XEXP (op
, 0)));
1236 switch (GET_CODE (op
))
1242 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1243 GET_MODE (XEXP (op
, 0)));
1247 /* Rotations don't affect parity. */
1248 if (!side_effects_p (XEXP (op
, 1)))
1249 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1250 GET_MODE (XEXP (op
, 0)));
1259 /* (bswap (bswap x)) -> x. */
1260 if (GET_CODE (op
) == BSWAP
)
1261 return XEXP (op
, 0);
1265 /* (float (sign_extend <X>)) = (float <X>). */
1266 if (GET_CODE (op
) == SIGN_EXTEND
)
1267 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1268 GET_MODE (XEXP (op
, 0)));
1272 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1273 becomes just the MINUS if its mode is MODE. This allows
1274 folding switch statements on machines using casesi (such as
1276 if (GET_CODE (op
) == TRUNCATE
1277 && GET_MODE (XEXP (op
, 0)) == mode
1278 && GET_CODE (XEXP (op
, 0)) == MINUS
1279 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1280 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1281 return XEXP (op
, 0);
1283 /* Extending a widening multiplication should be canonicalized to
1284 a wider widening multiplication. */
1285 if (GET_CODE (op
) == MULT
)
1287 rtx lhs
= XEXP (op
, 0);
1288 rtx rhs
= XEXP (op
, 1);
1289 enum rtx_code lcode
= GET_CODE (lhs
);
1290 enum rtx_code rcode
= GET_CODE (rhs
);
1292 /* Widening multiplies usually extend both operands, but sometimes
1293 they use a shift to extract a portion of a register. */
1294 if ((lcode
== SIGN_EXTEND
1295 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1296 && (rcode
== SIGN_EXTEND
1297 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1299 enum machine_mode lmode
= GET_MODE (lhs
);
1300 enum machine_mode rmode
= GET_MODE (rhs
);
1303 if (lcode
== ASHIFTRT
)
1304 /* Number of bits not shifted off the end. */
1305 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1306 else /* lcode == SIGN_EXTEND */
1307 /* Size of inner mode. */
1308 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1310 if (rcode
== ASHIFTRT
)
1311 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1312 else /* rcode == SIGN_EXTEND */
1313 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1315 /* We can only widen multiplies if the result is mathematiclly
1316 equivalent. I.e. if overflow was impossible. */
1317 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1318 return simplify_gen_binary
1320 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1321 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1325 /* Check for a sign extension of a subreg of a promoted
1326 variable, where the promotion is sign-extended, and the
1327 target mode is the same as the variable's promotion. */
1328 if (GET_CODE (op
) == SUBREG
1329 && SUBREG_PROMOTED_VAR_P (op
)
1330 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1331 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1333 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1338 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1339 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1340 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1342 gcc_assert (GET_MODE_BITSIZE (mode
)
1343 > GET_MODE_BITSIZE (GET_MODE (op
)));
1344 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1345 GET_MODE (XEXP (op
, 0)));
1348 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1349 is (sign_extend:M (subreg:O <X>)) if there is mode with
1350 GET_MODE_BITSIZE (N) - I bits.
1351 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1352 is similarly (zero_extend:M (subreg:O <X>)). */
1353 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1354 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1355 && CONST_INT_P (XEXP (op
, 1))
1356 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1357 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1359 enum machine_mode tmode
1360 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1361 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1362 gcc_assert (GET_MODE_BITSIZE (mode
)
1363 > GET_MODE_BITSIZE (GET_MODE (op
)));
1364 if (tmode
!= BLKmode
)
1367 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1369 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1370 ? SIGN_EXTEND
: ZERO_EXTEND
,
1371 mode
, inner
, tmode
);
1375 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1376 /* As we do not know which address space the pointer is referring to,
1377 we can do this only if the target does not support different pointer
1378 or address modes depending on the address space. */
1379 if (target_default_pointer_address_modes_p ()
1380 && ! POINTERS_EXTEND_UNSIGNED
1381 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1383 || (GET_CODE (op
) == SUBREG
1384 && REG_P (SUBREG_REG (op
))
1385 && REG_POINTER (SUBREG_REG (op
))
1386 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1387 return convert_memory_address (Pmode
, op
);
1392 /* Check for a zero extension of a subreg of a promoted
1393 variable, where the promotion is zero-extended, and the
1394 target mode is the same as the variable's promotion. */
1395 if (GET_CODE (op
) == SUBREG
1396 && SUBREG_PROMOTED_VAR_P (op
)
1397 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1398 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1400 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1405 /* Extending a widening multiplication should be canonicalized to
1406 a wider widening multiplication. */
1407 if (GET_CODE (op
) == MULT
)
1409 rtx lhs
= XEXP (op
, 0);
1410 rtx rhs
= XEXP (op
, 1);
1411 enum rtx_code lcode
= GET_CODE (lhs
);
1412 enum rtx_code rcode
= GET_CODE (rhs
);
1414 /* Widening multiplies usually extend both operands, but sometimes
1415 they use a shift to extract a portion of a register. */
1416 if ((lcode
== ZERO_EXTEND
1417 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1418 && (rcode
== ZERO_EXTEND
1419 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1421 enum machine_mode lmode
= GET_MODE (lhs
);
1422 enum machine_mode rmode
= GET_MODE (rhs
);
1425 if (lcode
== LSHIFTRT
)
1426 /* Number of bits not shifted off the end. */
1427 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1428 else /* lcode == ZERO_EXTEND */
1429 /* Size of inner mode. */
1430 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1432 if (rcode
== LSHIFTRT
)
1433 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1434 else /* rcode == ZERO_EXTEND */
1435 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1437 /* We can only widen multiplies if the result is mathematiclly
1438 equivalent. I.e. if overflow was impossible. */
1439 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1440 return simplify_gen_binary
1442 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1443 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1447 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1448 if (GET_CODE (op
) == ZERO_EXTEND
)
1449 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1450 GET_MODE (XEXP (op
, 0)));
1452 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1453 is (zero_extend:M (subreg:O <X>)) if there is mode with
1454 GET_MODE_BITSIZE (N) - I bits. */
1455 if (GET_CODE (op
) == LSHIFTRT
1456 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1457 && CONST_INT_P (XEXP (op
, 1))
1458 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1459 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1461 enum machine_mode tmode
1462 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1463 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1464 if (tmode
!= BLKmode
)
1467 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1469 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1473 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1474 /* As we do not know which address space the pointer is referring to,
1475 we can do this only if the target does not support different pointer
1476 or address modes depending on the address space. */
1477 if (target_default_pointer_address_modes_p ()
1478 && POINTERS_EXTEND_UNSIGNED
> 0
1479 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1481 || (GET_CODE (op
) == SUBREG
1482 && REG_P (SUBREG_REG (op
))
1483 && REG_POINTER (SUBREG_REG (op
))
1484 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1485 return convert_memory_address (Pmode
, op
);
1496 /* Try to compute the value of a unary operation CODE whose output mode is to
1497 be MODE with input operand OP whose mode was originally OP_MODE.
1498 Return zero if the value cannot be computed. */
1500 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1501 rtx op
, enum machine_mode op_mode
)
1503 unsigned int width
= GET_MODE_PRECISION (mode
);
1504 unsigned int op_width
= GET_MODE_PRECISION (op_mode
);
1506 if (code
== VEC_DUPLICATE
)
1508 gcc_assert (VECTOR_MODE_P (mode
));
1509 if (GET_MODE (op
) != VOIDmode
)
1511 if (!VECTOR_MODE_P (GET_MODE (op
)))
1512 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1514 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1517 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1518 || GET_CODE (op
) == CONST_VECTOR
)
1520 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1521 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1522 rtvec v
= rtvec_alloc (n_elts
);
1525 if (GET_CODE (op
) != CONST_VECTOR
)
1526 for (i
= 0; i
< n_elts
; i
++)
1527 RTVEC_ELT (v
, i
) = op
;
1530 enum machine_mode inmode
= GET_MODE (op
);
1531 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1532 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1534 gcc_assert (in_n_elts
< n_elts
);
1535 gcc_assert ((n_elts
% in_n_elts
) == 0);
1536 for (i
= 0; i
< n_elts
; i
++)
1537 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1539 return gen_rtx_CONST_VECTOR (mode
, v
);
1543 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1545 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1546 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1547 enum machine_mode opmode
= GET_MODE (op
);
1548 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1549 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1550 rtvec v
= rtvec_alloc (n_elts
);
1553 gcc_assert (op_n_elts
== n_elts
);
1554 for (i
= 0; i
< n_elts
; i
++)
1556 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1557 CONST_VECTOR_ELT (op
, i
),
1558 GET_MODE_INNER (opmode
));
1561 RTVEC_ELT (v
, i
) = x
;
1563 return gen_rtx_CONST_VECTOR (mode
, v
);
1566 /* The order of these tests is critical so that, for example, we don't
1567 check the wrong mode (input vs. output) for a conversion operation,
1568 such as FIX. At some point, this should be simplified. */
1570 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1572 HOST_WIDE_INT hv
, lv
;
1575 if (CONST_INT_P (op
))
1576 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1578 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1580 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1581 d
= real_value_truncate (mode
, d
);
1582 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1584 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1586 HOST_WIDE_INT hv
, lv
;
1589 if (CONST_INT_P (op
))
1590 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1592 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1594 if (op_mode
== VOIDmode
1595 || GET_MODE_PRECISION (op_mode
) > HOST_BITS_PER_DOUBLE_INT
)
1596 /* We should never get a negative number. */
1597 gcc_assert (hv
>= 0);
1598 else if (GET_MODE_PRECISION (op_mode
) <= HOST_BITS_PER_WIDE_INT
)
1599 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1601 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1602 d
= real_value_truncate (mode
, d
);
1603 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1606 if (CONST_INT_P (op
)
1607 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1609 HOST_WIDE_INT arg0
= INTVAL (op
);
1623 val
= (arg0
>= 0 ? arg0
: - arg0
);
1627 arg0
&= GET_MODE_MASK (mode
);
1628 val
= ffs_hwi (arg0
);
1632 arg0
&= GET_MODE_MASK (mode
);
1633 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1636 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 1;
1640 arg0
&= GET_MODE_MASK (mode
);
1642 val
= GET_MODE_PRECISION (mode
) - 1;
1644 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 2;
1646 val
= GET_MODE_PRECISION (mode
) - floor_log2 (~arg0
) - 2;
1650 arg0
&= GET_MODE_MASK (mode
);
1653 /* Even if the value at zero is undefined, we have to come
1654 up with some replacement. Seems good enough. */
1655 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1656 val
= GET_MODE_PRECISION (mode
);
1659 val
= ctz_hwi (arg0
);
1663 arg0
&= GET_MODE_MASK (mode
);
1666 val
++, arg0
&= arg0
- 1;
1670 arg0
&= GET_MODE_MASK (mode
);
1673 val
++, arg0
&= arg0
- 1;
1682 for (s
= 0; s
< width
; s
+= 8)
1684 unsigned int d
= width
- s
- 8;
1685 unsigned HOST_WIDE_INT byte
;
1686 byte
= (arg0
>> s
) & 0xff;
1697 /* When zero-extending a CONST_INT, we need to know its
1699 gcc_assert (op_mode
!= VOIDmode
);
1700 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1702 /* If we were really extending the mode,
1703 we would have to distinguish between zero-extension
1704 and sign-extension. */
1705 gcc_assert (width
== op_width
);
1708 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1709 val
= arg0
& GET_MODE_MASK (op_mode
);
1715 if (op_mode
== VOIDmode
)
1717 op_width
= GET_MODE_PRECISION (op_mode
);
1718 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1720 /* If we were really extending the mode,
1721 we would have to distinguish between zero-extension
1722 and sign-extension. */
1723 gcc_assert (width
== op_width
);
1726 else if (op_width
< HOST_BITS_PER_WIDE_INT
)
1728 val
= arg0
& GET_MODE_MASK (op_mode
);
1729 if (val_signbit_known_set_p (op_mode
, val
))
1730 val
|= ~GET_MODE_MASK (op_mode
);
1738 case FLOAT_TRUNCATE
:
1750 return gen_int_mode (val
, mode
);
1753 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1754 for a DImode operation on a CONST_INT. */
1755 else if (width
<= HOST_BITS_PER_DOUBLE_INT
1756 && (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1758 double_int first
, value
;
1760 if (CONST_DOUBLE_AS_INT_P (op
))
1761 first
= double_int::from_pair (CONST_DOUBLE_HIGH (op
),
1762 CONST_DOUBLE_LOW (op
));
1764 first
= double_int::from_shwi (INTVAL (op
));
1777 if (first
.is_negative ())
1786 value
.low
= ffs_hwi (first
.low
);
1787 else if (first
.high
!= 0)
1788 value
.low
= HOST_BITS_PER_WIDE_INT
+ ffs_hwi (first
.high
);
1795 if (first
.high
!= 0)
1796 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.high
) - 1
1797 - HOST_BITS_PER_WIDE_INT
;
1798 else if (first
.low
!= 0)
1799 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.low
) - 1;
1800 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1801 value
.low
= GET_MODE_PRECISION (mode
);
1807 value
.low
= ctz_hwi (first
.low
);
1808 else if (first
.high
!= 0)
1809 value
.low
= HOST_BITS_PER_WIDE_INT
+ ctz_hwi (first
.high
);
1810 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1811 value
.low
= GET_MODE_PRECISION (mode
);
1815 value
= double_int_zero
;
1819 first
.low
&= first
.low
- 1;
1824 first
.high
&= first
.high
- 1;
1829 value
= double_int_zero
;
1833 first
.low
&= first
.low
- 1;
1838 first
.high
&= first
.high
- 1;
1847 value
= double_int_zero
;
1848 for (s
= 0; s
< width
; s
+= 8)
1850 unsigned int d
= width
- s
- 8;
1851 unsigned HOST_WIDE_INT byte
;
1853 if (s
< HOST_BITS_PER_WIDE_INT
)
1854 byte
= (first
.low
>> s
) & 0xff;
1856 byte
= (first
.high
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1858 if (d
< HOST_BITS_PER_WIDE_INT
)
1859 value
.low
|= byte
<< d
;
1861 value
.high
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1867 /* This is just a change-of-mode, so do nothing. */
1872 gcc_assert (op_mode
!= VOIDmode
);
1874 if (op_width
> HOST_BITS_PER_WIDE_INT
)
1877 value
= double_int::from_uhwi (first
.low
& GET_MODE_MASK (op_mode
));
1881 if (op_mode
== VOIDmode
1882 || op_width
> HOST_BITS_PER_WIDE_INT
)
1886 value
.low
= first
.low
& GET_MODE_MASK (op_mode
);
1887 if (val_signbit_known_set_p (op_mode
, value
.low
))
1888 value
.low
|= ~GET_MODE_MASK (op_mode
);
1890 value
.high
= HWI_SIGN_EXTEND (value
.low
);
1901 return immed_double_int_const (value
, mode
);
1904 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1905 && SCALAR_FLOAT_MODE_P (mode
)
1906 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1908 REAL_VALUE_TYPE d
, t
;
1909 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1914 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1916 real_sqrt (&t
, mode
, &d
);
1920 d
= real_value_abs (&d
);
1923 d
= real_value_negate (&d
);
1925 case FLOAT_TRUNCATE
:
1926 d
= real_value_truncate (mode
, d
);
1929 /* All this does is change the mode, unless changing
1931 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1932 real_convert (&d
, mode
, &d
);
1935 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1942 real_to_target (tmp
, &d
, GET_MODE (op
));
1943 for (i
= 0; i
< 4; i
++)
1945 real_from_target (&d
, tmp
, mode
);
1951 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1954 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1955 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1956 && GET_MODE_CLASS (mode
) == MODE_INT
1957 && width
<= HOST_BITS_PER_DOUBLE_INT
&& width
> 0)
1959 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1960 operators are intentionally left unspecified (to ease implementation
1961 by target backends), for consistency, this routine implements the
1962 same semantics for constant folding as used by the middle-end. */
1964 /* This was formerly used only for non-IEEE float.
1965 eggert@twinsun.com says it is safe for IEEE also. */
1966 HOST_WIDE_INT xh
, xl
, th
, tl
;
1967 REAL_VALUE_TYPE x
, t
;
1968 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1972 if (REAL_VALUE_ISNAN (x
))
1975 /* Test against the signed upper bound. */
1976 if (width
> HOST_BITS_PER_WIDE_INT
)
1978 th
= ((unsigned HOST_WIDE_INT
) 1
1979 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1985 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1987 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1988 if (REAL_VALUES_LESS (t
, x
))
1995 /* Test against the signed lower bound. */
1996 if (width
> HOST_BITS_PER_WIDE_INT
)
1998 th
= (unsigned HOST_WIDE_INT
) (-1)
1999 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
2005 tl
= (unsigned HOST_WIDE_INT
) (-1) << (width
- 1);
2007 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
2008 if (REAL_VALUES_LESS (x
, t
))
2014 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
2018 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
2021 /* Test against the unsigned upper bound. */
2022 if (width
== HOST_BITS_PER_DOUBLE_INT
)
2027 else if (width
>= HOST_BITS_PER_WIDE_INT
)
2029 th
= ((unsigned HOST_WIDE_INT
) 1
2030 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
2036 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
2038 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
2039 if (REAL_VALUES_LESS (t
, x
))
2046 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
2052 return immed_double_const (xl
, xh
, mode
);
2058 /* Subroutine of simplify_binary_operation to simplify a binary operation
2059 CODE that can commute with byte swapping, with result mode MODE and
2060 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2061 Return zero if no simplification or canonicalization is possible. */
2064 simplify_byte_swapping_operation (enum rtx_code code
, enum machine_mode mode
,
2069 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2070 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2072 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2073 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2074 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2077 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2078 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2080 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2081 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2087 /* Subroutine of simplify_binary_operation to simplify a commutative,
2088 associative binary operation CODE with result mode MODE, operating
2089 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2090 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2091 canonicalization is possible. */
2094 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
2099 /* Linearize the operator to the left. */
2100 if (GET_CODE (op1
) == code
)
2102 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2103 if (GET_CODE (op0
) == code
)
2105 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2106 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2109 /* "a op (b op c)" becomes "(b op c) op a". */
2110 if (! swap_commutative_operands_p (op1
, op0
))
2111 return simplify_gen_binary (code
, mode
, op1
, op0
);
2118 if (GET_CODE (op0
) == code
)
2120 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2121 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2123 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2124 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2127 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2128 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2130 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2132 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2133 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2135 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2142 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2143 and OP1. Return 0 if no simplification is possible.
2145 Don't use this for relational operations such as EQ or LT.
2146 Use simplify_relational_operation instead. */
2148 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2151 rtx trueop0
, trueop1
;
2154 /* Relational operations don't work here. We must know the mode
2155 of the operands in order to do the comparison correctly.
2156 Assuming a full word can give incorrect results.
2157 Consider comparing 128 with -128 in QImode. */
2158 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2159 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2161 /* Make sure the constant is second. */
2162 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2163 && swap_commutative_operands_p (op0
, op1
))
2165 tem
= op0
, op0
= op1
, op1
= tem
;
2168 trueop0
= avoid_constant_pool_reference (op0
);
2169 trueop1
= avoid_constant_pool_reference (op1
);
2171 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2174 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2177 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2178 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2179 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2180 actual constants. */
2183 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
2184 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2186 rtx tem
, reversed
, opleft
, opright
;
2188 unsigned int width
= GET_MODE_PRECISION (mode
);
2190 /* Even if we can't compute a constant result,
2191 there are some cases worth simplifying. */
2196 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2197 when x is NaN, infinite, or finite and nonzero. They aren't
2198 when x is -0 and the rounding mode is not towards -infinity,
2199 since (-0) + 0 is then 0. */
2200 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2203 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2204 transformations are safe even for IEEE. */
2205 if (GET_CODE (op0
) == NEG
)
2206 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2207 else if (GET_CODE (op1
) == NEG
)
2208 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2210 /* (~a) + 1 -> -a */
2211 if (INTEGRAL_MODE_P (mode
)
2212 && GET_CODE (op0
) == NOT
2213 && trueop1
== const1_rtx
)
2214 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2216 /* Handle both-operands-constant cases. We can only add
2217 CONST_INTs to constants since the sum of relocatable symbols
2218 can't be handled by most assemblers. Don't add CONST_INT
2219 to CONST_INT since overflow won't be computed properly if wider
2220 than HOST_BITS_PER_WIDE_INT. */
2222 if ((GET_CODE (op0
) == CONST
2223 || GET_CODE (op0
) == SYMBOL_REF
2224 || GET_CODE (op0
) == LABEL_REF
)
2225 && CONST_INT_P (op1
))
2226 return plus_constant (mode
, op0
, INTVAL (op1
));
2227 else if ((GET_CODE (op1
) == CONST
2228 || GET_CODE (op1
) == SYMBOL_REF
2229 || GET_CODE (op1
) == LABEL_REF
)
2230 && CONST_INT_P (op0
))
2231 return plus_constant (mode
, op1
, INTVAL (op0
));
2233 /* See if this is something like X * C - X or vice versa or
2234 if the multiplication is written as a shift. If so, we can
2235 distribute and make a new multiply, shift, or maybe just
2236 have X (if C is 2 in the example above). But don't make
2237 something more expensive than we had before. */
2239 if (SCALAR_INT_MODE_P (mode
))
2241 double_int coeff0
, coeff1
;
2242 rtx lhs
= op0
, rhs
= op1
;
2244 coeff0
= double_int_one
;
2245 coeff1
= double_int_one
;
2247 if (GET_CODE (lhs
) == NEG
)
2249 coeff0
= double_int_minus_one
;
2250 lhs
= XEXP (lhs
, 0);
2252 else if (GET_CODE (lhs
) == MULT
2253 && CONST_INT_P (XEXP (lhs
, 1)))
2255 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
2256 lhs
= XEXP (lhs
, 0);
2258 else if (GET_CODE (lhs
) == ASHIFT
2259 && CONST_INT_P (XEXP (lhs
, 1))
2260 && INTVAL (XEXP (lhs
, 1)) >= 0
2261 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2263 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2264 lhs
= XEXP (lhs
, 0);
2267 if (GET_CODE (rhs
) == NEG
)
2269 coeff1
= double_int_minus_one
;
2270 rhs
= XEXP (rhs
, 0);
2272 else if (GET_CODE (rhs
) == MULT
2273 && CONST_INT_P (XEXP (rhs
, 1)))
2275 coeff1
= double_int::from_shwi (INTVAL (XEXP (rhs
, 1)));
2276 rhs
= XEXP (rhs
, 0);
2278 else if (GET_CODE (rhs
) == ASHIFT
2279 && CONST_INT_P (XEXP (rhs
, 1))
2280 && INTVAL (XEXP (rhs
, 1)) >= 0
2281 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2283 coeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2284 rhs
= XEXP (rhs
, 0);
2287 if (rtx_equal_p (lhs
, rhs
))
2289 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2292 bool speed
= optimize_function_for_speed_p (cfun
);
2294 val
= coeff0
+ coeff1
;
2295 coeff
= immed_double_int_const (val
, mode
);
2297 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2298 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2303 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2304 if (CONST_SCALAR_INT_P (op1
)
2305 && GET_CODE (op0
) == XOR
2306 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2307 && mode_signbit_p (mode
, op1
))
2308 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2309 simplify_gen_binary (XOR
, mode
, op1
,
2312 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2313 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2314 && GET_CODE (op0
) == MULT
2315 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2319 in1
= XEXP (XEXP (op0
, 0), 0);
2320 in2
= XEXP (op0
, 1);
2321 return simplify_gen_binary (MINUS
, mode
, op1
,
2322 simplify_gen_binary (MULT
, mode
,
2326 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2327 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2329 if (COMPARISON_P (op0
)
2330 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2331 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2332 && (reversed
= reversed_comparison (op0
, mode
)))
2334 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2336 /* If one of the operands is a PLUS or a MINUS, see if we can
2337 simplify this by the associative law.
2338 Don't use the associative law for floating point.
2339 The inaccuracy makes it nonassociative,
2340 and subtle programs can break if operations are associated. */
2342 if (INTEGRAL_MODE_P (mode
)
2343 && (plus_minus_operand_p (op0
)
2344 || plus_minus_operand_p (op1
))
2345 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2348 /* Reassociate floating point addition only when the user
2349 specifies associative math operations. */
2350 if (FLOAT_MODE_P (mode
)
2351 && flag_associative_math
)
2353 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2360 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2361 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2362 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2363 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2365 rtx xop00
= XEXP (op0
, 0);
2366 rtx xop10
= XEXP (op1
, 0);
2369 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2371 if (REG_P (xop00
) && REG_P (xop10
)
2372 && GET_MODE (xop00
) == GET_MODE (xop10
)
2373 && REGNO (xop00
) == REGNO (xop10
)
2374 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2375 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2382 /* We can't assume x-x is 0 even with non-IEEE floating point,
2383 but since it is zero except in very strange circumstances, we
2384 will treat it as zero with -ffinite-math-only. */
2385 if (rtx_equal_p (trueop0
, trueop1
)
2386 && ! side_effects_p (op0
)
2387 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2388 return CONST0_RTX (mode
);
2390 /* Change subtraction from zero into negation. (0 - x) is the
2391 same as -x when x is NaN, infinite, or finite and nonzero.
2392 But if the mode has signed zeros, and does not round towards
2393 -infinity, then 0 - 0 is 0, not -0. */
2394 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2395 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2397 /* (-1 - a) is ~a. */
2398 if (trueop0
== constm1_rtx
)
2399 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2401 /* Subtracting 0 has no effect unless the mode has signed zeros
2402 and supports rounding towards -infinity. In such a case,
2404 if (!(HONOR_SIGNED_ZEROS (mode
)
2405 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2406 && trueop1
== CONST0_RTX (mode
))
2409 /* See if this is something like X * C - X or vice versa or
2410 if the multiplication is written as a shift. If so, we can
2411 distribute and make a new multiply, shift, or maybe just
2412 have X (if C is 2 in the example above). But don't make
2413 something more expensive than we had before. */
2415 if (SCALAR_INT_MODE_P (mode
))
2417 double_int coeff0
, negcoeff1
;
2418 rtx lhs
= op0
, rhs
= op1
;
2420 coeff0
= double_int_one
;
2421 negcoeff1
= double_int_minus_one
;
2423 if (GET_CODE (lhs
) == NEG
)
2425 coeff0
= double_int_minus_one
;
2426 lhs
= XEXP (lhs
, 0);
2428 else if (GET_CODE (lhs
) == MULT
2429 && CONST_INT_P (XEXP (lhs
, 1)))
2431 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
2432 lhs
= XEXP (lhs
, 0);
2434 else if (GET_CODE (lhs
) == ASHIFT
2435 && CONST_INT_P (XEXP (lhs
, 1))
2436 && INTVAL (XEXP (lhs
, 1)) >= 0
2437 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2439 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2440 lhs
= XEXP (lhs
, 0);
2443 if (GET_CODE (rhs
) == NEG
)
2445 negcoeff1
= double_int_one
;
2446 rhs
= XEXP (rhs
, 0);
2448 else if (GET_CODE (rhs
) == MULT
2449 && CONST_INT_P (XEXP (rhs
, 1)))
2451 negcoeff1
= double_int::from_shwi (-INTVAL (XEXP (rhs
, 1)));
2452 rhs
= XEXP (rhs
, 0);
2454 else if (GET_CODE (rhs
) == ASHIFT
2455 && CONST_INT_P (XEXP (rhs
, 1))
2456 && INTVAL (XEXP (rhs
, 1)) >= 0
2457 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2459 negcoeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2460 negcoeff1
= -negcoeff1
;
2461 rhs
= XEXP (rhs
, 0);
2464 if (rtx_equal_p (lhs
, rhs
))
2466 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2469 bool speed
= optimize_function_for_speed_p (cfun
);
2471 val
= coeff0
+ negcoeff1
;
2472 coeff
= immed_double_int_const (val
, mode
);
2474 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2475 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2480 /* (a - (-b)) -> (a + b). True even for IEEE. */
2481 if (GET_CODE (op1
) == NEG
)
2482 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2484 /* (-x - c) may be simplified as (-c - x). */
2485 if (GET_CODE (op0
) == NEG
2486 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2488 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2490 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2493 /* Don't let a relocatable value get a negative coeff. */
2494 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2495 return simplify_gen_binary (PLUS
, mode
,
2497 neg_const_int (mode
, op1
));
2499 /* (x - (x & y)) -> (x & ~y) */
2500 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2502 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2504 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2505 GET_MODE (XEXP (op1
, 1)));
2506 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2508 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2510 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2511 GET_MODE (XEXP (op1
, 0)));
2512 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2516 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2517 by reversing the comparison code if valid. */
2518 if (STORE_FLAG_VALUE
== 1
2519 && trueop0
== const1_rtx
2520 && COMPARISON_P (op1
)
2521 && (reversed
= reversed_comparison (op1
, mode
)))
2524 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2525 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2526 && GET_CODE (op1
) == MULT
2527 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2531 in1
= XEXP (XEXP (op1
, 0), 0);
2532 in2
= XEXP (op1
, 1);
2533 return simplify_gen_binary (PLUS
, mode
,
2534 simplify_gen_binary (MULT
, mode
,
2539 /* Canonicalize (minus (neg A) (mult B C)) to
2540 (minus (mult (neg B) C) A). */
2541 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2542 && GET_CODE (op1
) == MULT
2543 && GET_CODE (op0
) == NEG
)
2547 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2548 in2
= XEXP (op1
, 1);
2549 return simplify_gen_binary (MINUS
, mode
,
2550 simplify_gen_binary (MULT
, mode
,
2555 /* If one of the operands is a PLUS or a MINUS, see if we can
2556 simplify this by the associative law. This will, for example,
2557 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2558 Don't use the associative law for floating point.
2559 The inaccuracy makes it nonassociative,
2560 and subtle programs can break if operations are associated. */
2562 if (INTEGRAL_MODE_P (mode
)
2563 && (plus_minus_operand_p (op0
)
2564 || plus_minus_operand_p (op1
))
2565 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2570 if (trueop1
== constm1_rtx
)
2571 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2573 if (GET_CODE (op0
) == NEG
)
2575 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2576 /* If op1 is a MULT as well and simplify_unary_operation
2577 just moved the NEG to the second operand, simplify_gen_binary
2578 below could through simplify_associative_operation move
2579 the NEG around again and recurse endlessly. */
2581 && GET_CODE (op1
) == MULT
2582 && GET_CODE (temp
) == MULT
2583 && XEXP (op1
, 0) == XEXP (temp
, 0)
2584 && GET_CODE (XEXP (temp
, 1)) == NEG
2585 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2588 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2590 if (GET_CODE (op1
) == NEG
)
2592 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2593 /* If op0 is a MULT as well and simplify_unary_operation
2594 just moved the NEG to the second operand, simplify_gen_binary
2595 below could through simplify_associative_operation move
2596 the NEG around again and recurse endlessly. */
2598 && GET_CODE (op0
) == MULT
2599 && GET_CODE (temp
) == MULT
2600 && XEXP (op0
, 0) == XEXP (temp
, 0)
2601 && GET_CODE (XEXP (temp
, 1)) == NEG
2602 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2605 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2608 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2609 x is NaN, since x * 0 is then also NaN. Nor is it valid
2610 when the mode has signed zeros, since multiplying a negative
2611 number by 0 will give -0, not 0. */
2612 if (!HONOR_NANS (mode
)
2613 && !HONOR_SIGNED_ZEROS (mode
)
2614 && trueop1
== CONST0_RTX (mode
)
2615 && ! side_effects_p (op0
))
2618 /* In IEEE floating point, x*1 is not equivalent to x for
2620 if (!HONOR_SNANS (mode
)
2621 && trueop1
== CONST1_RTX (mode
))
2624 /* Convert multiply by constant power of two into shift unless
2625 we are still generating RTL. This test is a kludge. */
2626 if (CONST_INT_P (trueop1
)
2627 && (val
= exact_log2 (UINTVAL (trueop1
))) >= 0
2628 /* If the mode is larger than the host word size, and the
2629 uppermost bit is set, then this isn't a power of two due
2630 to implicit sign extension. */
2631 && (width
<= HOST_BITS_PER_WIDE_INT
2632 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2633 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2635 /* Likewise for multipliers wider than a word. */
2636 if (CONST_DOUBLE_AS_INT_P (trueop1
)
2637 && GET_MODE (op0
) == mode
2638 && CONST_DOUBLE_LOW (trueop1
) == 0
2639 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0
2640 && (val
< HOST_BITS_PER_DOUBLE_INT
- 1
2641 || GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_DOUBLE_INT
))
2642 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2643 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2645 /* x*2 is x+x and x*(-1) is -x */
2646 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2647 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2648 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2649 && GET_MODE (op0
) == mode
)
2652 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2654 if (REAL_VALUES_EQUAL (d
, dconst2
))
2655 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2657 if (!HONOR_SNANS (mode
)
2658 && REAL_VALUES_EQUAL (d
, dconstm1
))
2659 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2662 /* Optimize -x * -x as x * x. */
2663 if (FLOAT_MODE_P (mode
)
2664 && GET_CODE (op0
) == NEG
2665 && GET_CODE (op1
) == NEG
2666 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2667 && !side_effects_p (XEXP (op0
, 0)))
2668 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2670 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2671 if (SCALAR_FLOAT_MODE_P (mode
)
2672 && GET_CODE (op0
) == ABS
2673 && GET_CODE (op1
) == ABS
2674 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2675 && !side_effects_p (XEXP (op0
, 0)))
2676 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2678 /* Reassociate multiplication, but for floating point MULTs
2679 only when the user specifies unsafe math optimizations. */
2680 if (! FLOAT_MODE_P (mode
)
2681 || flag_unsafe_math_optimizations
)
2683 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2690 if (trueop1
== CONST0_RTX (mode
))
2692 if (INTEGRAL_MODE_P (mode
)
2693 && trueop1
== CONSTM1_RTX (mode
)
2694 && !side_effects_p (op0
))
2696 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2698 /* A | (~A) -> -1 */
2699 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2700 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2701 && ! side_effects_p (op0
)
2702 && SCALAR_INT_MODE_P (mode
))
2705 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2706 if (CONST_INT_P (op1
)
2707 && HWI_COMPUTABLE_MODE_P (mode
)
2708 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2709 && !side_effects_p (op0
))
2712 /* Canonicalize (X & C1) | C2. */
2713 if (GET_CODE (op0
) == AND
2714 && CONST_INT_P (trueop1
)
2715 && CONST_INT_P (XEXP (op0
, 1)))
2717 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2718 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2719 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2721 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2723 && !side_effects_p (XEXP (op0
, 0)))
2726 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2727 if (((c1
|c2
) & mask
) == mask
)
2728 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2730 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2731 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2733 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2734 gen_int_mode (c1
& ~c2
, mode
));
2735 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2739 /* Convert (A & B) | A to A. */
2740 if (GET_CODE (op0
) == AND
2741 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2742 || rtx_equal_p (XEXP (op0
, 1), op1
))
2743 && ! side_effects_p (XEXP (op0
, 0))
2744 && ! side_effects_p (XEXP (op0
, 1)))
2747 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2748 mode size to (rotate A CX). */
2750 if (GET_CODE (op1
) == ASHIFT
2751 || GET_CODE (op1
) == SUBREG
)
2762 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2763 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2764 && CONST_INT_P (XEXP (opleft
, 1))
2765 && CONST_INT_P (XEXP (opright
, 1))
2766 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2767 == GET_MODE_PRECISION (mode
)))
2768 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2770 /* Same, but for ashift that has been "simplified" to a wider mode
2771 by simplify_shift_const. */
2773 if (GET_CODE (opleft
) == SUBREG
2774 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2775 && GET_CODE (opright
) == LSHIFTRT
2776 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2777 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2778 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2779 && (GET_MODE_SIZE (GET_MODE (opleft
))
2780 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2781 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2782 SUBREG_REG (XEXP (opright
, 0)))
2783 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2784 && CONST_INT_P (XEXP (opright
, 1))
2785 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2786 == GET_MODE_PRECISION (mode
)))
2787 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2788 XEXP (SUBREG_REG (opleft
), 1));
2790 /* If we have (ior (and (X C1) C2)), simplify this by making
2791 C1 as small as possible if C1 actually changes. */
2792 if (CONST_INT_P (op1
)
2793 && (HWI_COMPUTABLE_MODE_P (mode
)
2794 || INTVAL (op1
) > 0)
2795 && GET_CODE (op0
) == AND
2796 && CONST_INT_P (XEXP (op0
, 1))
2797 && CONST_INT_P (op1
)
2798 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2799 return simplify_gen_binary (IOR
, mode
,
2801 (AND
, mode
, XEXP (op0
, 0),
2802 GEN_INT (UINTVAL (XEXP (op0
, 1))
2806 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2807 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2808 the PLUS does not affect any of the bits in OP1: then we can do
2809 the IOR as a PLUS and we can associate. This is valid if OP1
2810 can be safely shifted left C bits. */
2811 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2812 && GET_CODE (XEXP (op0
, 0)) == PLUS
2813 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2814 && CONST_INT_P (XEXP (op0
, 1))
2815 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2817 int count
= INTVAL (XEXP (op0
, 1));
2818 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2820 if (mask
>> count
== INTVAL (trueop1
)
2821 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2822 return simplify_gen_binary (ASHIFTRT
, mode
,
2823 plus_constant (mode
, XEXP (op0
, 0),
2828 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2832 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2838 if (trueop1
== CONST0_RTX (mode
))
2840 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2841 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2842 if (rtx_equal_p (trueop0
, trueop1
)
2843 && ! side_effects_p (op0
)
2844 && GET_MODE_CLASS (mode
) != MODE_CC
)
2845 return CONST0_RTX (mode
);
2847 /* Canonicalize XOR of the most significant bit to PLUS. */
2848 if (CONST_SCALAR_INT_P (op1
)
2849 && mode_signbit_p (mode
, op1
))
2850 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2851 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2852 if (CONST_SCALAR_INT_P (op1
)
2853 && GET_CODE (op0
) == PLUS
2854 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2855 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2856 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2857 simplify_gen_binary (XOR
, mode
, op1
,
2860 /* If we are XORing two things that have no bits in common,
2861 convert them into an IOR. This helps to detect rotation encoded
2862 using those methods and possibly other simplifications. */
2864 if (HWI_COMPUTABLE_MODE_P (mode
)
2865 && (nonzero_bits (op0
, mode
)
2866 & nonzero_bits (op1
, mode
)) == 0)
2867 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2869 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2870 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2873 int num_negated
= 0;
2875 if (GET_CODE (op0
) == NOT
)
2876 num_negated
++, op0
= XEXP (op0
, 0);
2877 if (GET_CODE (op1
) == NOT
)
2878 num_negated
++, op1
= XEXP (op1
, 0);
2880 if (num_negated
== 2)
2881 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2882 else if (num_negated
== 1)
2883 return simplify_gen_unary (NOT
, mode
,
2884 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2888 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2889 correspond to a machine insn or result in further simplifications
2890 if B is a constant. */
2892 if (GET_CODE (op0
) == AND
2893 && rtx_equal_p (XEXP (op0
, 1), op1
)
2894 && ! side_effects_p (op1
))
2895 return simplify_gen_binary (AND
, mode
,
2896 simplify_gen_unary (NOT
, mode
,
2897 XEXP (op0
, 0), mode
),
2900 else if (GET_CODE (op0
) == AND
2901 && rtx_equal_p (XEXP (op0
, 0), op1
)
2902 && ! side_effects_p (op1
))
2903 return simplify_gen_binary (AND
, mode
,
2904 simplify_gen_unary (NOT
, mode
,
2905 XEXP (op0
, 1), mode
),
2908 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2909 we can transform like this:
2910 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2911 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2912 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2913 Attempt a few simplifications when B and C are both constants. */
2914 if (GET_CODE (op0
) == AND
2915 && CONST_INT_P (op1
)
2916 && CONST_INT_P (XEXP (op0
, 1)))
2918 rtx a
= XEXP (op0
, 0);
2919 rtx b
= XEXP (op0
, 1);
2921 HOST_WIDE_INT bval
= INTVAL (b
);
2922 HOST_WIDE_INT cval
= INTVAL (c
);
2925 = simplify_binary_operation (AND
, mode
,
2926 simplify_gen_unary (NOT
, mode
, a
, mode
),
2928 if ((~cval
& bval
) == 0)
2930 /* Try to simplify ~A&C | ~B&C. */
2931 if (na_c
!= NULL_RTX
)
2932 return simplify_gen_binary (IOR
, mode
, na_c
,
2933 GEN_INT (~bval
& cval
));
2937 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2938 if (na_c
== const0_rtx
)
2940 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2941 GEN_INT (~cval
& bval
));
2942 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2943 GEN_INT (~bval
& cval
));
2948 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2949 comparison if STORE_FLAG_VALUE is 1. */
2950 if (STORE_FLAG_VALUE
== 1
2951 && trueop1
== const1_rtx
2952 && COMPARISON_P (op0
)
2953 && (reversed
= reversed_comparison (op0
, mode
)))
2956 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2957 is (lt foo (const_int 0)), so we can perform the above
2958 simplification if STORE_FLAG_VALUE is 1. */
2960 if (STORE_FLAG_VALUE
== 1
2961 && trueop1
== const1_rtx
2962 && GET_CODE (op0
) == LSHIFTRT
2963 && CONST_INT_P (XEXP (op0
, 1))
2964 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2965 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2967 /* (xor (comparison foo bar) (const_int sign-bit))
2968 when STORE_FLAG_VALUE is the sign bit. */
2969 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2970 && trueop1
== const_true_rtx
2971 && COMPARISON_P (op0
)
2972 && (reversed
= reversed_comparison (op0
, mode
)))
2975 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2979 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2985 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2987 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2989 if (HWI_COMPUTABLE_MODE_P (mode
))
2991 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2992 HOST_WIDE_INT nzop1
;
2993 if (CONST_INT_P (trueop1
))
2995 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2996 /* If we are turning off bits already known off in OP0, we need
2998 if ((nzop0
& ~val1
) == 0)
3001 nzop1
= nonzero_bits (trueop1
, mode
);
3002 /* If we are clearing all the nonzero bits, the result is zero. */
3003 if ((nzop1
& nzop0
) == 0
3004 && !side_effects_p (op0
) && !side_effects_p (op1
))
3005 return CONST0_RTX (mode
);
3007 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3008 && GET_MODE_CLASS (mode
) != MODE_CC
)
3011 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3012 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3013 && ! side_effects_p (op0
)
3014 && GET_MODE_CLASS (mode
) != MODE_CC
)
3015 return CONST0_RTX (mode
);
3017 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3018 there are no nonzero bits of C outside of X's mode. */
3019 if ((GET_CODE (op0
) == SIGN_EXTEND
3020 || GET_CODE (op0
) == ZERO_EXTEND
)
3021 && CONST_INT_P (trueop1
)
3022 && HWI_COMPUTABLE_MODE_P (mode
)
3023 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3024 & UINTVAL (trueop1
)) == 0)
3026 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3027 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3028 gen_int_mode (INTVAL (trueop1
),
3030 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3033 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3034 we might be able to further simplify the AND with X and potentially
3035 remove the truncation altogether. */
3036 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3038 rtx x
= XEXP (op0
, 0);
3039 enum machine_mode xmode
= GET_MODE (x
);
3040 tem
= simplify_gen_binary (AND
, xmode
, x
,
3041 gen_int_mode (INTVAL (trueop1
), xmode
));
3042 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3045 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3046 if (GET_CODE (op0
) == IOR
3047 && CONST_INT_P (trueop1
)
3048 && CONST_INT_P (XEXP (op0
, 1)))
3050 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3051 return simplify_gen_binary (IOR
, mode
,
3052 simplify_gen_binary (AND
, mode
,
3053 XEXP (op0
, 0), op1
),
3054 gen_int_mode (tmp
, mode
));
3057 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3058 insn (and may simplify more). */
3059 if (GET_CODE (op0
) == XOR
3060 && rtx_equal_p (XEXP (op0
, 0), op1
)
3061 && ! side_effects_p (op1
))
3062 return simplify_gen_binary (AND
, mode
,
3063 simplify_gen_unary (NOT
, mode
,
3064 XEXP (op0
, 1), mode
),
3067 if (GET_CODE (op0
) == XOR
3068 && rtx_equal_p (XEXP (op0
, 1), op1
)
3069 && ! side_effects_p (op1
))
3070 return simplify_gen_binary (AND
, mode
,
3071 simplify_gen_unary (NOT
, mode
,
3072 XEXP (op0
, 0), mode
),
3075 /* Similarly for (~(A ^ B)) & A. */
3076 if (GET_CODE (op0
) == NOT
3077 && GET_CODE (XEXP (op0
, 0)) == XOR
3078 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3079 && ! side_effects_p (op1
))
3080 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3082 if (GET_CODE (op0
) == NOT
3083 && GET_CODE (XEXP (op0
, 0)) == XOR
3084 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3085 && ! side_effects_p (op1
))
3086 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3088 /* Convert (A | B) & A to A. */
3089 if (GET_CODE (op0
) == IOR
3090 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3091 || rtx_equal_p (XEXP (op0
, 1), op1
))
3092 && ! side_effects_p (XEXP (op0
, 0))
3093 && ! side_effects_p (XEXP (op0
, 1)))
3096 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3097 ((A & N) + B) & M -> (A + B) & M
3098 Similarly if (N & M) == 0,
3099 ((A | N) + B) & M -> (A + B) & M
3100 and for - instead of + and/or ^ instead of |.
3101 Also, if (N & M) == 0, then
3102 (A +- N) & M -> A & M. */
3103 if (CONST_INT_P (trueop1
)
3104 && HWI_COMPUTABLE_MODE_P (mode
)
3105 && ~UINTVAL (trueop1
)
3106 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3107 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3112 pmop
[0] = XEXP (op0
, 0);
3113 pmop
[1] = XEXP (op0
, 1);
3115 if (CONST_INT_P (pmop
[1])
3116 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3117 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3119 for (which
= 0; which
< 2; which
++)
3122 switch (GET_CODE (tem
))
3125 if (CONST_INT_P (XEXP (tem
, 1))
3126 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3127 == UINTVAL (trueop1
))
3128 pmop
[which
] = XEXP (tem
, 0);
3132 if (CONST_INT_P (XEXP (tem
, 1))
3133 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3134 pmop
[which
] = XEXP (tem
, 0);
3141 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3143 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3145 return simplify_gen_binary (code
, mode
, tem
, op1
);
3149 /* (and X (ior (not X) Y) -> (and X Y) */
3150 if (GET_CODE (op1
) == IOR
3151 && GET_CODE (XEXP (op1
, 0)) == NOT
3152 && op0
== XEXP (XEXP (op1
, 0), 0))
3153 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3155 /* (and (ior (not X) Y) X) -> (and X Y) */
3156 if (GET_CODE (op0
) == IOR
3157 && GET_CODE (XEXP (op0
, 0)) == NOT
3158 && op1
== XEXP (XEXP (op0
, 0), 0))
3159 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3161 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3165 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3171 /* 0/x is 0 (or x&0 if x has side-effects). */
3172 if (trueop0
== CONST0_RTX (mode
))
3174 if (side_effects_p (op1
))
3175 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3179 if (trueop1
== CONST1_RTX (mode
))
3181 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3185 /* Convert divide by power of two into shift. */
3186 if (CONST_INT_P (trueop1
)
3187 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3188 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3192 /* Handle floating point and integers separately. */
3193 if (SCALAR_FLOAT_MODE_P (mode
))
3195 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3196 safe for modes with NaNs, since 0.0 / 0.0 will then be
3197 NaN rather than 0.0. Nor is it safe for modes with signed
3198 zeros, since dividing 0 by a negative number gives -0.0 */
3199 if (trueop0
== CONST0_RTX (mode
)
3200 && !HONOR_NANS (mode
)
3201 && !HONOR_SIGNED_ZEROS (mode
)
3202 && ! side_effects_p (op1
))
3205 if (trueop1
== CONST1_RTX (mode
)
3206 && !HONOR_SNANS (mode
))
3209 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3210 && trueop1
!= CONST0_RTX (mode
))
3213 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
3216 if (REAL_VALUES_EQUAL (d
, dconstm1
)
3217 && !HONOR_SNANS (mode
))
3218 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3220 /* Change FP division by a constant into multiplication.
3221 Only do this with -freciprocal-math. */
3222 if (flag_reciprocal_math
3223 && !REAL_VALUES_EQUAL (d
, dconst0
))
3225 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3226 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3227 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3231 else if (SCALAR_INT_MODE_P (mode
))
3233 /* 0/x is 0 (or x&0 if x has side-effects). */
3234 if (trueop0
== CONST0_RTX (mode
)
3235 && !cfun
->can_throw_non_call_exceptions
)
3237 if (side_effects_p (op1
))
3238 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3242 if (trueop1
== CONST1_RTX (mode
))
3244 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3249 if (trueop1
== constm1_rtx
)
3251 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3253 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3259 /* 0%x is 0 (or x&0 if x has side-effects). */
3260 if (trueop0
== CONST0_RTX (mode
))
3262 if (side_effects_p (op1
))
3263 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3266 /* x%1 is 0 (of x&0 if x has side-effects). */
3267 if (trueop1
== CONST1_RTX (mode
))
3269 if (side_effects_p (op0
))
3270 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3271 return CONST0_RTX (mode
);
3273 /* Implement modulus by power of two as AND. */
3274 if (CONST_INT_P (trueop1
)
3275 && exact_log2 (UINTVAL (trueop1
)) > 0)
3276 return simplify_gen_binary (AND
, mode
, op0
,
3277 GEN_INT (INTVAL (op1
) - 1));
3281 /* 0%x is 0 (or x&0 if x has side-effects). */
3282 if (trueop0
== CONST0_RTX (mode
))
3284 if (side_effects_p (op1
))
3285 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3288 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3289 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3291 if (side_effects_p (op0
))
3292 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3293 return CONST0_RTX (mode
);
3299 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3300 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3301 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3303 if (CONST_INT_P (trueop1
)
3304 && IN_RANGE (INTVAL (trueop1
),
3305 GET_MODE_BITSIZE (mode
) / 2 + (code
== ROTATE
),
3306 GET_MODE_BITSIZE (mode
) - 1))
3307 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3308 mode
, op0
, GEN_INT (GET_MODE_BITSIZE (mode
)
3309 - INTVAL (trueop1
)));
3312 if (trueop1
== CONST0_RTX (mode
))
3314 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3316 /* Rotating ~0 always results in ~0. */
3317 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3318 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3319 && ! side_effects_p (op1
))
3322 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3324 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
3325 if (val
!= INTVAL (op1
))
3326 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3333 if (trueop1
== CONST0_RTX (mode
))
3335 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3337 goto canonicalize_shift
;
3340 if (trueop1
== CONST0_RTX (mode
))
3342 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3344 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3345 if (GET_CODE (op0
) == CLZ
3346 && CONST_INT_P (trueop1
)
3347 && STORE_FLAG_VALUE
== 1
3348 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3350 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3351 unsigned HOST_WIDE_INT zero_val
= 0;
3353 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3354 && zero_val
== GET_MODE_PRECISION (imode
)
3355 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3356 return simplify_gen_relational (EQ
, mode
, imode
,
3357 XEXP (op0
, 0), const0_rtx
);
3359 goto canonicalize_shift
;
3362 if (width
<= HOST_BITS_PER_WIDE_INT
3363 && mode_signbit_p (mode
, trueop1
)
3364 && ! side_effects_p (op0
))
3366 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3368 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3374 if (width
<= HOST_BITS_PER_WIDE_INT
3375 && CONST_INT_P (trueop1
)
3376 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3377 && ! side_effects_p (op0
))
3379 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3381 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3387 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3389 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3391 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3397 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3399 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3401 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3414 /* ??? There are simplifications that can be done. */
3418 if (!VECTOR_MODE_P (mode
))
3420 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3421 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3422 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3423 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3424 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3426 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3427 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3430 /* Extract a scalar element from a nested VEC_SELECT expression
3431 (with optional nested VEC_CONCAT expression). Some targets
3432 (i386) extract scalar element from a vector using chain of
3433 nested VEC_SELECT expressions. When input operand is a memory
3434 operand, this operation can be simplified to a simple scalar
3435 load from an offseted memory address. */
3436 if (GET_CODE (trueop0
) == VEC_SELECT
)
3438 rtx op0
= XEXP (trueop0
, 0);
3439 rtx op1
= XEXP (trueop0
, 1);
3441 enum machine_mode opmode
= GET_MODE (op0
);
3442 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3443 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3445 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3451 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3452 gcc_assert (i
< n_elts
);
3454 /* Select element, pointed by nested selector. */
3455 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3457 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3458 if (GET_CODE (op0
) == VEC_CONCAT
)
3460 rtx op00
= XEXP (op0
, 0);
3461 rtx op01
= XEXP (op0
, 1);
3463 enum machine_mode mode00
, mode01
;
3464 int n_elts00
, n_elts01
;
3466 mode00
= GET_MODE (op00
);
3467 mode01
= GET_MODE (op01
);
3469 /* Find out number of elements of each operand. */
3470 if (VECTOR_MODE_P (mode00
))
3472 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3473 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3478 if (VECTOR_MODE_P (mode01
))
3480 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3481 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3486 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3488 /* Select correct operand of VEC_CONCAT
3489 and adjust selector. */
3490 if (elem
< n_elts01
)
3501 vec
= rtvec_alloc (1);
3502 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3504 tmp
= gen_rtx_fmt_ee (code
, mode
,
3505 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3508 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3509 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3510 return XEXP (trueop0
, 0);
3514 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3515 gcc_assert (GET_MODE_INNER (mode
)
3516 == GET_MODE_INNER (GET_MODE (trueop0
)));
3517 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3519 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3521 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3522 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3523 rtvec v
= rtvec_alloc (n_elts
);
3526 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3527 for (i
= 0; i
< n_elts
; i
++)
3529 rtx x
= XVECEXP (trueop1
, 0, i
);
3531 gcc_assert (CONST_INT_P (x
));
3532 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3536 return gen_rtx_CONST_VECTOR (mode
, v
);
3539 /* Recognize the identity. */
3540 if (GET_MODE (trueop0
) == mode
)
3542 bool maybe_ident
= true;
3543 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3545 rtx j
= XVECEXP (trueop1
, 0, i
);
3546 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3548 maybe_ident
= false;
3556 /* If we build {a,b} then permute it, build the result directly. */
3557 if (XVECLEN (trueop1
, 0) == 2
3558 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3559 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3560 && GET_CODE (trueop0
) == VEC_CONCAT
3561 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3562 && GET_MODE (XEXP (trueop0
, 0)) == mode
3563 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3564 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3566 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3567 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3570 gcc_assert (i0
< 4 && i1
< 4);
3571 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3572 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3574 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3577 if (XVECLEN (trueop1
, 0) == 2
3578 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3579 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3580 && GET_CODE (trueop0
) == VEC_CONCAT
3581 && GET_MODE (trueop0
) == mode
)
3583 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3584 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3587 gcc_assert (i0
< 2 && i1
< 2);
3588 subop0
= XEXP (trueop0
, i0
);
3589 subop1
= XEXP (trueop0
, i1
);
3591 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3595 if (XVECLEN (trueop1
, 0) == 1
3596 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3597 && GET_CODE (trueop0
) == VEC_CONCAT
)
3600 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3602 /* Try to find the element in the VEC_CONCAT. */
3603 while (GET_MODE (vec
) != mode
3604 && GET_CODE (vec
) == VEC_CONCAT
)
3606 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3607 if (offset
< vec_size
)
3608 vec
= XEXP (vec
, 0);
3612 vec
= XEXP (vec
, 1);
3614 vec
= avoid_constant_pool_reference (vec
);
3617 if (GET_MODE (vec
) == mode
)
3621 /* If we select elements in a vec_merge that all come from the same
3622 operand, select from that operand directly. */
3623 if (GET_CODE (op0
) == VEC_MERGE
)
3625 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3626 if (CONST_INT_P (trueop02
))
3628 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3629 bool all_operand0
= true;
3630 bool all_operand1
= true;
3631 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3633 rtx j
= XVECEXP (trueop1
, 0, i
);
3634 if (sel
& (1 << UINTVAL (j
)))
3635 all_operand1
= false;
3637 all_operand0
= false;
3639 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3640 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3641 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3642 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3649 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3650 ? GET_MODE (trueop0
)
3651 : GET_MODE_INNER (mode
));
3652 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3653 ? GET_MODE (trueop1
)
3654 : GET_MODE_INNER (mode
));
3656 gcc_assert (VECTOR_MODE_P (mode
));
3657 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3658 == GET_MODE_SIZE (mode
));
3660 if (VECTOR_MODE_P (op0_mode
))
3661 gcc_assert (GET_MODE_INNER (mode
)
3662 == GET_MODE_INNER (op0_mode
));
3664 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3666 if (VECTOR_MODE_P (op1_mode
))
3667 gcc_assert (GET_MODE_INNER (mode
)
3668 == GET_MODE_INNER (op1_mode
));
3670 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3672 if ((GET_CODE (trueop0
) == CONST_VECTOR
3673 || CONST_SCALAR_INT_P (trueop0
)
3674 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3675 && (GET_CODE (trueop1
) == CONST_VECTOR
3676 || CONST_SCALAR_INT_P (trueop1
)
3677 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3679 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3680 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3681 rtvec v
= rtvec_alloc (n_elts
);
3683 unsigned in_n_elts
= 1;
3685 if (VECTOR_MODE_P (op0_mode
))
3686 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3687 for (i
= 0; i
< n_elts
; i
++)
3691 if (!VECTOR_MODE_P (op0_mode
))
3692 RTVEC_ELT (v
, i
) = trueop0
;
3694 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3698 if (!VECTOR_MODE_P (op1_mode
))
3699 RTVEC_ELT (v
, i
) = trueop1
;
3701 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3706 return gen_rtx_CONST_VECTOR (mode
, v
);
3709 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3710 Restrict the transformation to avoid generating a VEC_SELECT with a
3711 mode unrelated to its operand. */
3712 if (GET_CODE (trueop0
) == VEC_SELECT
3713 && GET_CODE (trueop1
) == VEC_SELECT
3714 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3715 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3717 rtx par0
= XEXP (trueop0
, 1);
3718 rtx par1
= XEXP (trueop1
, 1);
3719 int len0
= XVECLEN (par0
, 0);
3720 int len1
= XVECLEN (par1
, 0);
3721 rtvec vec
= rtvec_alloc (len0
+ len1
);
3722 for (int i
= 0; i
< len0
; i
++)
3723 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3724 for (int i
= 0; i
< len1
; i
++)
3725 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3726 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3727 gen_rtx_PARALLEL (VOIDmode
, vec
));
3740 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3743 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3745 unsigned int width
= GET_MODE_PRECISION (mode
);
3747 if (VECTOR_MODE_P (mode
)
3748 && code
!= VEC_CONCAT
3749 && GET_CODE (op0
) == CONST_VECTOR
3750 && GET_CODE (op1
) == CONST_VECTOR
)
3752 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3753 enum machine_mode op0mode
= GET_MODE (op0
);
3754 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3755 enum machine_mode op1mode
= GET_MODE (op1
);
3756 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3757 rtvec v
= rtvec_alloc (n_elts
);
3760 gcc_assert (op0_n_elts
== n_elts
);
3761 gcc_assert (op1_n_elts
== n_elts
);
3762 for (i
= 0; i
< n_elts
; i
++)
3764 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3765 CONST_VECTOR_ELT (op0
, i
),
3766 CONST_VECTOR_ELT (op1
, i
));
3769 RTVEC_ELT (v
, i
) = x
;
3772 return gen_rtx_CONST_VECTOR (mode
, v
);
3775 if (VECTOR_MODE_P (mode
)
3776 && code
== VEC_CONCAT
3777 && (CONST_SCALAR_INT_P (op0
)
3778 || GET_CODE (op0
) == CONST_FIXED
3779 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3780 && (CONST_SCALAR_INT_P (op1
)
3781 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3782 || GET_CODE (op1
) == CONST_FIXED
))
3784 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3785 rtvec v
= rtvec_alloc (n_elts
);
3787 gcc_assert (n_elts
>= 2);
3790 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3791 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3793 RTVEC_ELT (v
, 0) = op0
;
3794 RTVEC_ELT (v
, 1) = op1
;
3798 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3799 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3802 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3803 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3804 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3806 for (i
= 0; i
< op0_n_elts
; ++i
)
3807 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3808 for (i
= 0; i
< op1_n_elts
; ++i
)
3809 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3812 return gen_rtx_CONST_VECTOR (mode
, v
);
3815 if (SCALAR_FLOAT_MODE_P (mode
)
3816 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3817 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3818 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3829 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3831 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3833 for (i
= 0; i
< 4; i
++)
3850 real_from_target (&r
, tmp0
, mode
);
3851 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3855 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3858 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3859 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3860 real_convert (&f0
, mode
, &f0
);
3861 real_convert (&f1
, mode
, &f1
);
3863 if (HONOR_SNANS (mode
)
3864 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3868 && REAL_VALUES_EQUAL (f1
, dconst0
)
3869 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3872 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3873 && flag_trapping_math
3874 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3876 int s0
= REAL_VALUE_NEGATIVE (f0
);
3877 int s1
= REAL_VALUE_NEGATIVE (f1
);
3882 /* Inf + -Inf = NaN plus exception. */
3887 /* Inf - Inf = NaN plus exception. */
3892 /* Inf / Inf = NaN plus exception. */
3899 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3900 && flag_trapping_math
3901 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3902 || (REAL_VALUE_ISINF (f1
)
3903 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3904 /* Inf * 0 = NaN plus exception. */
3907 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3909 real_convert (&result
, mode
, &value
);
3911 /* Don't constant fold this floating point operation if
3912 the result has overflowed and flag_trapping_math. */
3914 if (flag_trapping_math
3915 && MODE_HAS_INFINITIES (mode
)
3916 && REAL_VALUE_ISINF (result
)
3917 && !REAL_VALUE_ISINF (f0
)
3918 && !REAL_VALUE_ISINF (f1
))
3919 /* Overflow plus exception. */
3922 /* Don't constant fold this floating point operation if the
3923 result may dependent upon the run-time rounding mode and
3924 flag_rounding_math is set, or if GCC's software emulation
3925 is unable to accurately represent the result. */
3927 if ((flag_rounding_math
3928 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3929 && (inexact
|| !real_identical (&result
, &value
)))
3932 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3936 /* We can fold some multi-word operations. */
3937 if (GET_MODE_CLASS (mode
) == MODE_INT
3938 && width
== HOST_BITS_PER_DOUBLE_INT
3939 && (CONST_DOUBLE_AS_INT_P (op0
) || CONST_INT_P (op0
))
3940 && (CONST_DOUBLE_AS_INT_P (op1
) || CONST_INT_P (op1
)))
3942 double_int o0
, o1
, res
, tmp
;
3945 o0
= rtx_to_double_int (op0
);
3946 o1
= rtx_to_double_int (op1
);
3951 /* A - B == A + (-B). */
3954 /* Fall through.... */
3965 res
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
3972 tmp
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
3979 res
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
3986 tmp
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
4020 case LSHIFTRT
: case ASHIFTRT
:
4022 case ROTATE
: case ROTATERT
:
4024 unsigned HOST_WIDE_INT cnt
;
4026 if (SHIFT_COUNT_TRUNCATED
)
4029 o1
.low
&= GET_MODE_PRECISION (mode
) - 1;
4032 if (!o1
.fits_uhwi ()
4033 || o1
.to_uhwi () >= GET_MODE_PRECISION (mode
))
4036 cnt
= o1
.to_uhwi ();
4037 unsigned short prec
= GET_MODE_PRECISION (mode
);
4039 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
4040 res
= o0
.rshift (cnt
, prec
, code
== ASHIFTRT
);
4041 else if (code
== ASHIFT
)
4042 res
= o0
.alshift (cnt
, prec
);
4043 else if (code
== ROTATE
)
4044 res
= o0
.lrotate (cnt
, prec
);
4045 else /* code == ROTATERT */
4046 res
= o0
.rrotate (cnt
, prec
);
4054 return immed_double_int_const (res
, mode
);
4057 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
4058 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
4060 /* Get the integer argument values in two forms:
4061 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4063 arg0
= INTVAL (op0
);
4064 arg1
= INTVAL (op1
);
4066 if (width
< HOST_BITS_PER_WIDE_INT
)
4068 arg0
&= GET_MODE_MASK (mode
);
4069 arg1
&= GET_MODE_MASK (mode
);
4072 if (val_signbit_known_set_p (mode
, arg0s
))
4073 arg0s
|= ~GET_MODE_MASK (mode
);
4076 if (val_signbit_known_set_p (mode
, arg1s
))
4077 arg1s
|= ~GET_MODE_MASK (mode
);
4085 /* Compute the value of the arithmetic. */
4090 val
= arg0s
+ arg1s
;
4094 val
= arg0s
- arg1s
;
4098 val
= arg0s
* arg1s
;
4103 || ((unsigned HOST_WIDE_INT
) arg0s
4104 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4107 val
= arg0s
/ arg1s
;
4112 || ((unsigned HOST_WIDE_INT
) arg0s
4113 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4116 val
= arg0s
% arg1s
;
4121 || ((unsigned HOST_WIDE_INT
) arg0s
4122 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4125 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
4130 || ((unsigned HOST_WIDE_INT
) arg0s
4131 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4134 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
4152 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4153 the value is in range. We can't return any old value for
4154 out-of-range arguments because either the middle-end (via
4155 shift_truncation_mask) or the back-end might be relying on
4156 target-specific knowledge. Nor can we rely on
4157 shift_truncation_mask, since the shift might not be part of an
4158 ashlM3, lshrM3 or ashrM3 instruction. */
4159 if (SHIFT_COUNT_TRUNCATED
)
4160 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
4161 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
4164 val
= (code
== ASHIFT
4165 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
4166 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
4168 /* Sign-extend the result for arithmetic right shifts. */
4169 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
4170 val
|= ((unsigned HOST_WIDE_INT
) (-1)) << (width
- arg1
);
4178 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
4179 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
4187 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
4188 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
4192 /* Do nothing here. */
4196 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
4200 val
= ((unsigned HOST_WIDE_INT
) arg0
4201 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
4205 val
= arg0s
> arg1s
? arg0s
: arg1s
;
4209 val
= ((unsigned HOST_WIDE_INT
) arg0
4210 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
4223 /* ??? There are simplifications that can be done. */
4230 return gen_int_mode (val
, mode
);
4238 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4241 Rather than test for specific case, we do this by a brute-force method
4242 and do all possible simplifications until no more changes occur. Then
4243 we rebuild the operation. */
4245 struct simplify_plus_minus_op_data
4252 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4256 result
= (commutative_operand_precedence (y
)
4257 - commutative_operand_precedence (x
));
4261 /* Group together equal REGs to do more simplification. */
4262 if (REG_P (x
) && REG_P (y
))
4263 return REGNO (x
) > REGNO (y
);
4269 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
4272 struct simplify_plus_minus_op_data ops
[8];
4274 int n_ops
= 2, input_ops
= 2;
4275 int changed
, n_constants
= 0, canonicalized
= 0;
4278 memset (ops
, 0, sizeof ops
);
4280 /* Set up the two operands and then expand them until nothing has been
4281 changed. If we run out of room in our array, give up; this should
4282 almost never happen. */
4287 ops
[1].neg
= (code
== MINUS
);
4293 for (i
= 0; i
< n_ops
; i
++)
4295 rtx this_op
= ops
[i
].op
;
4296 int this_neg
= ops
[i
].neg
;
4297 enum rtx_code this_code
= GET_CODE (this_op
);
4306 ops
[n_ops
].op
= XEXP (this_op
, 1);
4307 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4310 ops
[i
].op
= XEXP (this_op
, 0);
4313 canonicalized
|= this_neg
;
4317 ops
[i
].op
= XEXP (this_op
, 0);
4318 ops
[i
].neg
= ! this_neg
;
4325 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4326 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4327 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4329 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4330 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4331 ops
[n_ops
].neg
= this_neg
;
4339 /* ~a -> (-a - 1) */
4342 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4343 ops
[n_ops
++].neg
= this_neg
;
4344 ops
[i
].op
= XEXP (this_op
, 0);
4345 ops
[i
].neg
= !this_neg
;
4355 ops
[i
].op
= neg_const_int (mode
, this_op
);
4369 if (n_constants
> 1)
4372 gcc_assert (n_ops
>= 2);
4374 /* If we only have two operands, we can avoid the loops. */
4377 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4380 /* Get the two operands. Be careful with the order, especially for
4381 the cases where code == MINUS. */
4382 if (ops
[0].neg
&& ops
[1].neg
)
4384 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4387 else if (ops
[0].neg
)
4398 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4401 /* Now simplify each pair of operands until nothing changes. */
4404 /* Insertion sort is good enough for an eight-element array. */
4405 for (i
= 1; i
< n_ops
; i
++)
4407 struct simplify_plus_minus_op_data save
;
4409 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4415 ops
[j
+ 1] = ops
[j
];
4416 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4421 for (i
= n_ops
- 1; i
> 0; i
--)
4422 for (j
= i
- 1; j
>= 0; j
--)
4424 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4425 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4427 if (lhs
!= 0 && rhs
!= 0)
4429 enum rtx_code ncode
= PLUS
;
4435 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4437 else if (swap_commutative_operands_p (lhs
, rhs
))
4438 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4440 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4441 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4443 rtx tem_lhs
, tem_rhs
;
4445 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4446 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4447 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4449 if (tem
&& !CONSTANT_P (tem
))
4450 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4453 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4455 /* Reject "simplifications" that just wrap the two
4456 arguments in a CONST. Failure to do so can result
4457 in infinite recursion with simplify_binary_operation
4458 when it calls us to simplify CONST operations. */
4460 && ! (GET_CODE (tem
) == CONST
4461 && GET_CODE (XEXP (tem
, 0)) == ncode
4462 && XEXP (XEXP (tem
, 0), 0) == lhs
4463 && XEXP (XEXP (tem
, 0), 1) == rhs
))
4466 if (GET_CODE (tem
) == NEG
)
4467 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4468 if (CONST_INT_P (tem
) && lneg
)
4469 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4473 ops
[j
].op
= NULL_RTX
;
4480 /* If nothing changed, fail. */
4484 /* Pack all the operands to the lower-numbered entries. */
4485 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4495 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4497 && CONST_INT_P (ops
[1].op
)
4498 && CONSTANT_P (ops
[0].op
)
4500 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4502 /* We suppressed creation of trivial CONST expressions in the
4503 combination loop to avoid recursion. Create one manually now.
4504 The combination loop should have ensured that there is exactly
4505 one CONST_INT, and the sort will have ensured that it is last
4506 in the array and that any other constant will be next-to-last. */
4509 && CONST_INT_P (ops
[n_ops
- 1].op
)
4510 && CONSTANT_P (ops
[n_ops
- 2].op
))
4512 rtx value
= ops
[n_ops
- 1].op
;
4513 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4514 value
= neg_const_int (mode
, value
);
4515 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4520 /* Put a non-negated operand first, if possible. */
4522 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4525 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4534 /* Now make the result by performing the requested operations. */
4536 for (i
= 1; i
< n_ops
; i
++)
4537 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4538 mode
, result
, ops
[i
].op
);
4543 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4545 plus_minus_operand_p (const_rtx x
)
4547 return GET_CODE (x
) == PLUS
4548 || GET_CODE (x
) == MINUS
4549 || (GET_CODE (x
) == CONST
4550 && GET_CODE (XEXP (x
, 0)) == PLUS
4551 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4552 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4555 /* Like simplify_binary_operation except used for relational operators.
4556 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4557 not also be VOIDmode.
4559 CMP_MODE specifies in which mode the comparison is done in, so it is
4560 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4561 the operands or, if both are VOIDmode, the operands are compared in
4562 "infinite precision". */
4564 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4565 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4567 rtx tem
, trueop0
, trueop1
;
4569 if (cmp_mode
== VOIDmode
)
4570 cmp_mode
= GET_MODE (op0
);
4571 if (cmp_mode
== VOIDmode
)
4572 cmp_mode
= GET_MODE (op1
);
4574 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4577 if (SCALAR_FLOAT_MODE_P (mode
))
4579 if (tem
== const0_rtx
)
4580 return CONST0_RTX (mode
);
4581 #ifdef FLOAT_STORE_FLAG_VALUE
4583 REAL_VALUE_TYPE val
;
4584 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4585 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4591 if (VECTOR_MODE_P (mode
))
4593 if (tem
== const0_rtx
)
4594 return CONST0_RTX (mode
);
4595 #ifdef VECTOR_STORE_FLAG_VALUE
4600 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4601 if (val
== NULL_RTX
)
4603 if (val
== const1_rtx
)
4604 return CONST1_RTX (mode
);
4606 units
= GET_MODE_NUNITS (mode
);
4607 v
= rtvec_alloc (units
);
4608 for (i
= 0; i
< units
; i
++)
4609 RTVEC_ELT (v
, i
) = val
;
4610 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4620 /* For the following tests, ensure const0_rtx is op1. */
4621 if (swap_commutative_operands_p (op0
, op1
)
4622 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4623 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4625 /* If op0 is a compare, extract the comparison arguments from it. */
4626 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4627 return simplify_gen_relational (code
, mode
, VOIDmode
,
4628 XEXP (op0
, 0), XEXP (op0
, 1));
4630 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4634 trueop0
= avoid_constant_pool_reference (op0
);
4635 trueop1
= avoid_constant_pool_reference (op1
);
4636 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4640 /* This part of simplify_relational_operation is only used when CMP_MODE
4641 is not in class MODE_CC (i.e. it is a real comparison).
4643 MODE is the mode of the result, while CMP_MODE specifies in which
4644 mode the comparison is done in, so it is the mode of the operands. */
4647 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4648 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4650 enum rtx_code op0code
= GET_CODE (op0
);
4652 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4654 /* If op0 is a comparison, extract the comparison arguments
4658 if (GET_MODE (op0
) == mode
)
4659 return simplify_rtx (op0
);
4661 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4662 XEXP (op0
, 0), XEXP (op0
, 1));
4664 else if (code
== EQ
)
4666 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4667 if (new_code
!= UNKNOWN
)
4668 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4669 XEXP (op0
, 0), XEXP (op0
, 1));
4673 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4674 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4675 if ((code
== LTU
|| code
== GEU
)
4676 && GET_CODE (op0
) == PLUS
4677 && CONST_INT_P (XEXP (op0
, 1))
4678 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4679 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4680 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4681 && XEXP (op0
, 1) != const0_rtx
)
4684 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4685 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4686 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4689 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4690 if ((code
== LTU
|| code
== GEU
)
4691 && GET_CODE (op0
) == PLUS
4692 && rtx_equal_p (op1
, XEXP (op0
, 1))
4693 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4694 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4695 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4696 copy_rtx (XEXP (op0
, 0)));
4698 if (op1
== const0_rtx
)
4700 /* Canonicalize (GTU x 0) as (NE x 0). */
4702 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4703 /* Canonicalize (LEU x 0) as (EQ x 0). */
4705 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4707 else if (op1
== const1_rtx
)
4712 /* Canonicalize (GE x 1) as (GT x 0). */
4713 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4716 /* Canonicalize (GEU x 1) as (NE x 0). */
4717 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4720 /* Canonicalize (LT x 1) as (LE x 0). */
4721 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4724 /* Canonicalize (LTU x 1) as (EQ x 0). */
4725 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4731 else if (op1
== constm1_rtx
)
4733 /* Canonicalize (LE x -1) as (LT x 0). */
4735 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4736 /* Canonicalize (GT x -1) as (GE x 0). */
4738 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4741 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4742 if ((code
== EQ
|| code
== NE
)
4743 && (op0code
== PLUS
|| op0code
== MINUS
)
4745 && CONSTANT_P (XEXP (op0
, 1))
4746 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4748 rtx x
= XEXP (op0
, 0);
4749 rtx c
= XEXP (op0
, 1);
4750 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4751 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4753 /* Detect an infinite recursive condition, where we oscillate at this
4754 simplification case between:
4755 A + B == C <---> C - B == A,
4756 where A, B, and C are all constants with non-simplifiable expressions,
4757 usually SYMBOL_REFs. */
4758 if (GET_CODE (tem
) == invcode
4760 && rtx_equal_p (c
, XEXP (tem
, 1)))
4763 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4766 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4767 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4769 && op1
== const0_rtx
4770 && GET_MODE_CLASS (mode
) == MODE_INT
4771 && cmp_mode
!= VOIDmode
4772 /* ??? Work-around BImode bugs in the ia64 backend. */
4774 && cmp_mode
!= BImode
4775 && nonzero_bits (op0
, cmp_mode
) == 1
4776 && STORE_FLAG_VALUE
== 1)
4777 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4778 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4779 : lowpart_subreg (mode
, op0
, cmp_mode
);
4781 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4782 if ((code
== EQ
|| code
== NE
)
4783 && op1
== const0_rtx
4785 return simplify_gen_relational (code
, mode
, cmp_mode
,
4786 XEXP (op0
, 0), XEXP (op0
, 1));
4788 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4789 if ((code
== EQ
|| code
== NE
)
4791 && rtx_equal_p (XEXP (op0
, 0), op1
)
4792 && !side_effects_p (XEXP (op0
, 0)))
4793 return simplify_gen_relational (code
, mode
, cmp_mode
,
4794 XEXP (op0
, 1), const0_rtx
);
4796 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4797 if ((code
== EQ
|| code
== NE
)
4799 && rtx_equal_p (XEXP (op0
, 1), op1
)
4800 && !side_effects_p (XEXP (op0
, 1)))
4801 return simplify_gen_relational (code
, mode
, cmp_mode
,
4802 XEXP (op0
, 0), const0_rtx
);
4804 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4805 if ((code
== EQ
|| code
== NE
)
4807 && CONST_SCALAR_INT_P (op1
)
4808 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4809 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4810 simplify_gen_binary (XOR
, cmp_mode
,
4811 XEXP (op0
, 1), op1
));
4813 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4814 if ((code
== EQ
|| code
== NE
)
4815 && GET_CODE (op0
) == BSWAP
4816 && CONST_SCALAR_INT_P (op1
))
4817 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4818 simplify_gen_unary (BSWAP
, cmp_mode
,
4821 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4822 if ((code
== EQ
|| code
== NE
)
4823 && GET_CODE (op0
) == BSWAP
4824 && GET_CODE (op1
) == BSWAP
)
4825 return simplify_gen_relational (code
, mode
, cmp_mode
,
4826 XEXP (op0
, 0), XEXP (op1
, 0));
4828 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4834 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4835 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4836 XEXP (op0
, 0), const0_rtx
);
4841 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4842 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4843 XEXP (op0
, 0), const0_rtx
);
4862 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4863 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4864 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4865 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4866 For floating-point comparisons, assume that the operands were ordered. */
4869 comparison_result (enum rtx_code code
, int known_results
)
4875 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4878 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4882 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4885 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4889 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4892 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4895 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4897 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4900 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4902 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4905 return const_true_rtx
;
4913 /* Check if the given comparison (done in the given MODE) is actually a
4914 tautology or a contradiction.
4915 If no simplification is possible, this function returns zero.
4916 Otherwise, it returns either const_true_rtx or const0_rtx. */
4919 simplify_const_relational_operation (enum rtx_code code
,
4920 enum machine_mode mode
,
4927 gcc_assert (mode
!= VOIDmode
4928 || (GET_MODE (op0
) == VOIDmode
4929 && GET_MODE (op1
) == VOIDmode
));
4931 /* If op0 is a compare, extract the comparison arguments from it. */
4932 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4934 op1
= XEXP (op0
, 1);
4935 op0
= XEXP (op0
, 0);
4937 if (GET_MODE (op0
) != VOIDmode
)
4938 mode
= GET_MODE (op0
);
4939 else if (GET_MODE (op1
) != VOIDmode
)
4940 mode
= GET_MODE (op1
);
4945 /* We can't simplify MODE_CC values since we don't know what the
4946 actual comparison is. */
4947 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4950 /* Make sure the constant is second. */
4951 if (swap_commutative_operands_p (op0
, op1
))
4953 tem
= op0
, op0
= op1
, op1
= tem
;
4954 code
= swap_condition (code
);
4957 trueop0
= avoid_constant_pool_reference (op0
);
4958 trueop1
= avoid_constant_pool_reference (op1
);
4960 /* For integer comparisons of A and B maybe we can simplify A - B and can
4961 then simplify a comparison of that with zero. If A and B are both either
4962 a register or a CONST_INT, this can't help; testing for these cases will
4963 prevent infinite recursion here and speed things up.
4965 We can only do this for EQ and NE comparisons as otherwise we may
4966 lose or introduce overflow which we cannot disregard as undefined as
4967 we do not know the signedness of the operation on either the left or
4968 the right hand side of the comparison. */
4970 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4971 && (code
== EQ
|| code
== NE
)
4972 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4973 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4974 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4975 /* We cannot do this if tem is a nonzero address. */
4976 && ! nonzero_address_p (tem
))
4977 return simplify_const_relational_operation (signed_condition (code
),
4978 mode
, tem
, const0_rtx
);
4980 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4981 return const_true_rtx
;
4983 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4986 /* For modes without NaNs, if the two operands are equal, we know the
4987 result except if they have side-effects. Even with NaNs we know
4988 the result of unordered comparisons and, if signaling NaNs are
4989 irrelevant, also the result of LT/GT/LTGT. */
4990 if ((! HONOR_NANS (GET_MODE (trueop0
))
4991 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4992 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4993 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4994 && rtx_equal_p (trueop0
, trueop1
)
4995 && ! side_effects_p (trueop0
))
4996 return comparison_result (code
, CMP_EQ
);
4998 /* If the operands are floating-point constants, see if we can fold
5000 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5001 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5002 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5004 REAL_VALUE_TYPE d0
, d1
;
5006 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
5007 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
5009 /* Comparisons are unordered iff at least one of the values is NaN. */
5010 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
5020 return const_true_rtx
;
5033 return comparison_result (code
,
5034 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
5035 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
5038 /* Otherwise, see if the operands are both integers. */
5039 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5040 && (CONST_DOUBLE_AS_INT_P (trueop0
) || CONST_INT_P (trueop0
))
5041 && (CONST_DOUBLE_AS_INT_P (trueop1
) || CONST_INT_P (trueop1
)))
5043 int width
= GET_MODE_PRECISION (mode
);
5044 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
5045 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
5047 /* Get the two words comprising each integer constant. */
5048 if (CONST_DOUBLE_AS_INT_P (trueop0
))
5050 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
5051 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
5055 l0u
= l0s
= INTVAL (trueop0
);
5056 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
5059 if (CONST_DOUBLE_AS_INT_P (trueop1
))
5061 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
5062 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
5066 l1u
= l1s
= INTVAL (trueop1
);
5067 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
5070 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
5071 we have to sign or zero-extend the values. */
5072 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
5074 l0u
&= GET_MODE_MASK (mode
);
5075 l1u
&= GET_MODE_MASK (mode
);
5077 if (val_signbit_known_set_p (mode
, l0s
))
5078 l0s
|= ~GET_MODE_MASK (mode
);
5080 if (val_signbit_known_set_p (mode
, l1s
))
5081 l1s
|= ~GET_MODE_MASK (mode
);
5083 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
5084 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
5086 if (h0u
== h1u
&& l0u
== l1u
)
5087 return comparison_result (code
, CMP_EQ
);
5091 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
5092 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
5093 return comparison_result (code
, cr
);
5097 /* Optimize comparisons with upper and lower bounds. */
5098 if (HWI_COMPUTABLE_MODE_P (mode
)
5099 && CONST_INT_P (trueop1
))
5102 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
5103 HOST_WIDE_INT val
= INTVAL (trueop1
);
5104 HOST_WIDE_INT mmin
, mmax
;
5114 /* Get a reduced range if the sign bit is zero. */
5115 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
5122 rtx mmin_rtx
, mmax_rtx
;
5123 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
5125 mmin
= INTVAL (mmin_rtx
);
5126 mmax
= INTVAL (mmax_rtx
);
5129 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
5131 mmin
>>= (sign_copies
- 1);
5132 mmax
>>= (sign_copies
- 1);
5138 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5140 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5141 return const_true_rtx
;
5142 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5147 return const_true_rtx
;
5152 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5154 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5155 return const_true_rtx
;
5156 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5161 return const_true_rtx
;
5167 /* x == y is always false for y out of range. */
5168 if (val
< mmin
|| val
> mmax
)
5172 /* x > y is always false for y >= mmax, always true for y < mmin. */
5174 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5176 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5177 return const_true_rtx
;
5183 return const_true_rtx
;
5186 /* x < y is always false for y <= mmin, always true for y > mmax. */
5188 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5190 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5191 return const_true_rtx
;
5197 return const_true_rtx
;
5201 /* x != y is always true for y out of range. */
5202 if (val
< mmin
|| val
> mmax
)
5203 return const_true_rtx
;
5211 /* Optimize integer comparisons with zero. */
5212 if (trueop1
== const0_rtx
)
5214 /* Some addresses are known to be nonzero. We don't know
5215 their sign, but equality comparisons are known. */
5216 if (nonzero_address_p (trueop0
))
5218 if (code
== EQ
|| code
== LEU
)
5220 if (code
== NE
|| code
== GTU
)
5221 return const_true_rtx
;
5224 /* See if the first operand is an IOR with a constant. If so, we
5225 may be able to determine the result of this comparison. */
5226 if (GET_CODE (op0
) == IOR
)
5228 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5229 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5231 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5232 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5233 && (UINTVAL (inner_const
)
5234 & ((unsigned HOST_WIDE_INT
) 1
5244 return const_true_rtx
;
5248 return const_true_rtx
;
5262 /* Optimize comparison of ABS with zero. */
5263 if (trueop1
== CONST0_RTX (mode
)
5264 && (GET_CODE (trueop0
) == ABS
5265 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5266 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5271 /* Optimize abs(x) < 0.0. */
5272 if (!HONOR_SNANS (mode
)
5273 && (!INTEGRAL_MODE_P (mode
)
5274 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5276 if (INTEGRAL_MODE_P (mode
)
5277 && (issue_strict_overflow_warning
5278 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5279 warning (OPT_Wstrict_overflow
,
5280 ("assuming signed overflow does not occur when "
5281 "assuming abs (x) < 0 is false"));
5287 /* Optimize abs(x) >= 0.0. */
5288 if (!HONOR_NANS (mode
)
5289 && (!INTEGRAL_MODE_P (mode
)
5290 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5292 if (INTEGRAL_MODE_P (mode
)
5293 && (issue_strict_overflow_warning
5294 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5295 warning (OPT_Wstrict_overflow
,
5296 ("assuming signed overflow does not occur when "
5297 "assuming abs (x) >= 0 is true"));
5298 return const_true_rtx
;
5303 /* Optimize ! (abs(x) < 0.0). */
5304 return const_true_rtx
;
5314 /* Simplify CODE, an operation with result mode MODE and three operands,
5315 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5316 a constant. Return 0 if no simplifications is possible. */
5319 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
5320 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
5323 unsigned int width
= GET_MODE_PRECISION (mode
);
5324 bool any_change
= false;
5327 /* VOIDmode means "infinite" precision. */
5329 width
= HOST_BITS_PER_WIDE_INT
;
5334 /* Simplify negations around the multiplication. */
5335 /* -a * -b + c => a * b + c. */
5336 if (GET_CODE (op0
) == NEG
)
5338 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5340 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5342 else if (GET_CODE (op1
) == NEG
)
5344 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5346 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5349 /* Canonicalize the two multiplication operands. */
5350 /* a * -b + c => -b * a + c. */
5351 if (swap_commutative_operands_p (op0
, op1
))
5352 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
5355 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5360 if (CONST_INT_P (op0
)
5361 && CONST_INT_P (op1
)
5362 && CONST_INT_P (op2
)
5363 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5364 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5366 /* Extracting a bit-field from a constant */
5367 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5368 HOST_WIDE_INT op1val
= INTVAL (op1
);
5369 HOST_WIDE_INT op2val
= INTVAL (op2
);
5370 if (BITS_BIG_ENDIAN
)
5371 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5375 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5377 /* First zero-extend. */
5378 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5379 /* If desired, propagate sign bit. */
5380 if (code
== SIGN_EXTRACT
5381 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5383 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5386 return gen_int_mode (val
, mode
);
5391 if (CONST_INT_P (op0
))
5392 return op0
!= const0_rtx
? op1
: op2
;
5394 /* Convert c ? a : a into "a". */
5395 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5398 /* Convert a != b ? a : b into "a". */
5399 if (GET_CODE (op0
) == NE
5400 && ! side_effects_p (op0
)
5401 && ! HONOR_NANS (mode
)
5402 && ! HONOR_SIGNED_ZEROS (mode
)
5403 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5404 && rtx_equal_p (XEXP (op0
, 1), op2
))
5405 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5406 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5409 /* Convert a == b ? a : b into "b". */
5410 if (GET_CODE (op0
) == EQ
5411 && ! side_effects_p (op0
)
5412 && ! HONOR_NANS (mode
)
5413 && ! HONOR_SIGNED_ZEROS (mode
)
5414 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5415 && rtx_equal_p (XEXP (op0
, 1), op2
))
5416 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5417 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5420 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5422 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5423 ? GET_MODE (XEXP (op0
, 1))
5424 : GET_MODE (XEXP (op0
, 0)));
5427 /* Look for happy constants in op1 and op2. */
5428 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5430 HOST_WIDE_INT t
= INTVAL (op1
);
5431 HOST_WIDE_INT f
= INTVAL (op2
);
5433 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5434 code
= GET_CODE (op0
);
5435 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5438 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5446 return simplify_gen_relational (code
, mode
, cmp_mode
,
5447 XEXP (op0
, 0), XEXP (op0
, 1));
5450 if (cmp_mode
== VOIDmode
)
5451 cmp_mode
= op0_mode
;
5452 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5453 cmp_mode
, XEXP (op0
, 0),
5456 /* See if any simplifications were possible. */
5459 if (CONST_INT_P (temp
))
5460 return temp
== const0_rtx
? op2
: op1
;
5462 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5468 gcc_assert (GET_MODE (op0
) == mode
);
5469 gcc_assert (GET_MODE (op1
) == mode
);
5470 gcc_assert (VECTOR_MODE_P (mode
));
5471 trueop2
= avoid_constant_pool_reference (op2
);
5472 if (CONST_INT_P (trueop2
))
5474 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5475 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5476 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5477 unsigned HOST_WIDE_INT mask
;
5478 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5481 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5483 if (!(sel
& mask
) && !side_effects_p (op0
))
5485 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5488 rtx trueop0
= avoid_constant_pool_reference (op0
);
5489 rtx trueop1
= avoid_constant_pool_reference (op1
);
5490 if (GET_CODE (trueop0
) == CONST_VECTOR
5491 && GET_CODE (trueop1
) == CONST_VECTOR
)
5493 rtvec v
= rtvec_alloc (n_elts
);
5496 for (i
= 0; i
< n_elts
; i
++)
5497 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5498 ? CONST_VECTOR_ELT (trueop0
, i
)
5499 : CONST_VECTOR_ELT (trueop1
, i
));
5500 return gen_rtx_CONST_VECTOR (mode
, v
);
5503 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5504 if no element from a appears in the result. */
5505 if (GET_CODE (op0
) == VEC_MERGE
)
5507 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5508 if (CONST_INT_P (tem
))
5510 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5511 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5512 return simplify_gen_ternary (code
, mode
, mode
,
5513 XEXP (op0
, 1), op1
, op2
);
5514 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5515 return simplify_gen_ternary (code
, mode
, mode
,
5516 XEXP (op0
, 0), op1
, op2
);
5519 if (GET_CODE (op1
) == VEC_MERGE
)
5521 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5522 if (CONST_INT_P (tem
))
5524 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5525 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5526 return simplify_gen_ternary (code
, mode
, mode
,
5527 op0
, XEXP (op1
, 1), op2
);
5528 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5529 return simplify_gen_ternary (code
, mode
, mode
,
5530 op0
, XEXP (op1
, 0), op2
);
5535 if (rtx_equal_p (op0
, op1
)
5536 && !side_effects_p (op2
) && !side_effects_p (op1
))
5548 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5550 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5552 Works by unpacking OP into a collection of 8-bit values
5553 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5554 and then repacking them again for OUTERMODE. */
5557 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5558 enum machine_mode innermode
, unsigned int byte
)
5560 /* We support up to 512-bit values (for V8DFmode). */
5564 value_mask
= (1 << value_bit
) - 1
5566 unsigned char value
[max_bitsize
/ value_bit
];
5575 rtvec result_v
= NULL
;
5576 enum mode_class outer_class
;
5577 enum machine_mode outer_submode
;
5579 /* Some ports misuse CCmode. */
5580 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5583 /* We have no way to represent a complex constant at the rtl level. */
5584 if (COMPLEX_MODE_P (outermode
))
5587 /* Unpack the value. */
5589 if (GET_CODE (op
) == CONST_VECTOR
)
5591 num_elem
= CONST_VECTOR_NUNITS (op
);
5592 elems
= &CONST_VECTOR_ELT (op
, 0);
5593 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5599 elem_bitsize
= max_bitsize
;
5601 /* If this asserts, it is too complicated; reducing value_bit may help. */
5602 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5603 /* I don't know how to handle endianness of sub-units. */
5604 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5606 for (elem
= 0; elem
< num_elem
; elem
++)
5609 rtx el
= elems
[elem
];
5611 /* Vectors are kept in target memory order. (This is probably
5614 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5615 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5617 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5618 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5619 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5620 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5621 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5624 switch (GET_CODE (el
))
5628 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5630 *vp
++ = INTVAL (el
) >> i
;
5631 /* CONST_INTs are always logically sign-extended. */
5632 for (; i
< elem_bitsize
; i
+= value_bit
)
5633 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5637 if (GET_MODE (el
) == VOIDmode
)
5639 unsigned char extend
= 0;
5640 /* If this triggers, someone should have generated a
5641 CONST_INT instead. */
5642 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5644 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5645 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5646 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5649 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5653 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5655 for (; i
< elem_bitsize
; i
+= value_bit
)
5660 long tmp
[max_bitsize
/ 32];
5661 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5663 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5664 gcc_assert (bitsize
<= elem_bitsize
);
5665 gcc_assert (bitsize
% value_bit
== 0);
5667 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5670 /* real_to_target produces its result in words affected by
5671 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5672 and use WORDS_BIG_ENDIAN instead; see the documentation
5673 of SUBREG in rtl.texi. */
5674 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5677 if (WORDS_BIG_ENDIAN
)
5678 ibase
= bitsize
- 1 - i
;
5681 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5684 /* It shouldn't matter what's done here, so fill it with
5686 for (; i
< elem_bitsize
; i
+= value_bit
)
5692 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5694 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5695 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5699 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5700 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5701 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5703 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5704 >> (i
- HOST_BITS_PER_WIDE_INT
);
5705 for (; i
< elem_bitsize
; i
+= value_bit
)
5715 /* Now, pick the right byte to start with. */
5716 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5717 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5718 will already have offset 0. */
5719 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5721 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5723 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5724 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5725 byte
= (subword_byte
% UNITS_PER_WORD
5726 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5729 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5730 so if it's become negative it will instead be very large.) */
5731 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5733 /* Convert from bytes to chunks of size value_bit. */
5734 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5736 /* Re-pack the value. */
5738 if (VECTOR_MODE_P (outermode
))
5740 num_elem
= GET_MODE_NUNITS (outermode
);
5741 result_v
= rtvec_alloc (num_elem
);
5742 elems
= &RTVEC_ELT (result_v
, 0);
5743 outer_submode
= GET_MODE_INNER (outermode
);
5749 outer_submode
= outermode
;
5752 outer_class
= GET_MODE_CLASS (outer_submode
);
5753 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5755 gcc_assert (elem_bitsize
% value_bit
== 0);
5756 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5758 for (elem
= 0; elem
< num_elem
; elem
++)
5762 /* Vectors are stored in target memory order. (This is probably
5765 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5766 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5768 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5769 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5770 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5771 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5772 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5775 switch (outer_class
)
5778 case MODE_PARTIAL_INT
:
5780 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5783 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5785 lo
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5786 for (; i
< elem_bitsize
; i
+= value_bit
)
5787 hi
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5788 << (i
- HOST_BITS_PER_WIDE_INT
);
5790 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5792 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5793 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5794 else if (elem_bitsize
<= HOST_BITS_PER_DOUBLE_INT
)
5795 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5802 case MODE_DECIMAL_FLOAT
:
5805 long tmp
[max_bitsize
/ 32];
5807 /* real_from_target wants its input in words affected by
5808 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5809 and use WORDS_BIG_ENDIAN instead; see the documentation
5810 of SUBREG in rtl.texi. */
5811 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5813 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5816 if (WORDS_BIG_ENDIAN
)
5817 ibase
= elem_bitsize
- 1 - i
;
5820 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5823 real_from_target (&r
, tmp
, outer_submode
);
5824 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5836 f
.mode
= outer_submode
;
5839 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5841 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5842 for (; i
< elem_bitsize
; i
+= value_bit
)
5843 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5844 << (i
- HOST_BITS_PER_WIDE_INT
));
5846 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5854 if (VECTOR_MODE_P (outermode
))
5855 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5860 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5861 Return 0 if no simplifications are possible. */
5863 simplify_subreg (enum machine_mode outermode
, rtx op
,
5864 enum machine_mode innermode
, unsigned int byte
)
5866 /* Little bit of sanity checking. */
5867 gcc_assert (innermode
!= VOIDmode
);
5868 gcc_assert (outermode
!= VOIDmode
);
5869 gcc_assert (innermode
!= BLKmode
);
5870 gcc_assert (outermode
!= BLKmode
);
5872 gcc_assert (GET_MODE (op
) == innermode
5873 || GET_MODE (op
) == VOIDmode
);
5875 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5878 if (byte
>= GET_MODE_SIZE (innermode
))
5881 if (outermode
== innermode
&& !byte
)
5884 if (CONST_SCALAR_INT_P (op
)
5885 || CONST_DOUBLE_AS_FLOAT_P (op
)
5886 || GET_CODE (op
) == CONST_FIXED
5887 || GET_CODE (op
) == CONST_VECTOR
)
5888 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5890 /* Changing mode twice with SUBREG => just change it once,
5891 or not at all if changing back op starting mode. */
5892 if (GET_CODE (op
) == SUBREG
)
5894 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5895 int final_offset
= byte
+ SUBREG_BYTE (op
);
5898 if (outermode
== innermostmode
5899 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5900 return SUBREG_REG (op
);
5902 /* The SUBREG_BYTE represents offset, as if the value were stored
5903 in memory. Irritating exception is paradoxical subreg, where
5904 we define SUBREG_BYTE to be 0. On big endian machines, this
5905 value should be negative. For a moment, undo this exception. */
5906 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5908 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5909 if (WORDS_BIG_ENDIAN
)
5910 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5911 if (BYTES_BIG_ENDIAN
)
5912 final_offset
+= difference
% UNITS_PER_WORD
;
5914 if (SUBREG_BYTE (op
) == 0
5915 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5917 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5918 if (WORDS_BIG_ENDIAN
)
5919 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5920 if (BYTES_BIG_ENDIAN
)
5921 final_offset
+= difference
% UNITS_PER_WORD
;
5924 /* See whether resulting subreg will be paradoxical. */
5925 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5927 /* In nonparadoxical subregs we can't handle negative offsets. */
5928 if (final_offset
< 0)
5930 /* Bail out in case resulting subreg would be incorrect. */
5931 if (final_offset
% GET_MODE_SIZE (outermode
)
5932 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5938 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5940 /* In paradoxical subreg, see if we are still looking on lower part.
5941 If so, our SUBREG_BYTE will be 0. */
5942 if (WORDS_BIG_ENDIAN
)
5943 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5944 if (BYTES_BIG_ENDIAN
)
5945 offset
+= difference
% UNITS_PER_WORD
;
5946 if (offset
== final_offset
)
5952 /* Recurse for further possible simplifications. */
5953 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5957 if (validate_subreg (outermode
, innermostmode
,
5958 SUBREG_REG (op
), final_offset
))
5960 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5961 if (SUBREG_PROMOTED_VAR_P (op
)
5962 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5963 && GET_MODE_CLASS (outermode
) == MODE_INT
5964 && IN_RANGE (GET_MODE_SIZE (outermode
),
5965 GET_MODE_SIZE (innermode
),
5966 GET_MODE_SIZE (innermostmode
))
5967 && subreg_lowpart_p (newx
))
5969 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5970 SUBREG_PROMOTED_UNSIGNED_SET
5971 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5978 /* SUBREG of a hard register => just change the register number
5979 and/or mode. If the hard register is not valid in that mode,
5980 suppress this simplification. If the hard register is the stack,
5981 frame, or argument pointer, leave this as a SUBREG. */
5983 if (REG_P (op
) && HARD_REGISTER_P (op
))
5985 unsigned int regno
, final_regno
;
5988 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5989 if (HARD_REGISTER_NUM_P (final_regno
))
5992 int final_offset
= byte
;
5994 /* Adjust offset for paradoxical subregs. */
5996 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5998 int difference
= (GET_MODE_SIZE (innermode
)
5999 - GET_MODE_SIZE (outermode
));
6000 if (WORDS_BIG_ENDIAN
)
6001 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6002 if (BYTES_BIG_ENDIAN
)
6003 final_offset
+= difference
% UNITS_PER_WORD
;
6006 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
6008 /* Propagate original regno. We don't have any way to specify
6009 the offset inside original regno, so do so only for lowpart.
6010 The information is used only by alias analysis that can not
6011 grog partial register anyway. */
6013 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
6014 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6019 /* If we have a SUBREG of a register that we are replacing and we are
6020 replacing it with a MEM, make a new MEM and try replacing the
6021 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6022 or if we would be widening it. */
6025 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6026 /* Allow splitting of volatile memory references in case we don't
6027 have instruction to move the whole thing. */
6028 && (! MEM_VOLATILE_P (op
)
6029 || ! have_insn_for (SET
, innermode
))
6030 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
6031 return adjust_address_nv (op
, outermode
, byte
);
6033 /* Handle complex values represented as CONCAT
6034 of real and imaginary part. */
6035 if (GET_CODE (op
) == CONCAT
)
6037 unsigned int part_size
, final_offset
;
6040 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
6041 if (byte
< part_size
)
6043 part
= XEXP (op
, 0);
6044 final_offset
= byte
;
6048 part
= XEXP (op
, 1);
6049 final_offset
= byte
- part_size
;
6052 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
6055 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
6058 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
6059 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6063 /* A SUBREG resulting from a zero extension may fold to zero if
6064 it extracts higher bits that the ZERO_EXTEND's source bits. */
6065 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6067 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6068 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
6069 return CONST0_RTX (outermode
);
6072 if (SCALAR_INT_MODE_P (outermode
)
6073 && SCALAR_INT_MODE_P (innermode
)
6074 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
6075 && byte
== subreg_lowpart_offset (outermode
, innermode
))
6077 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
6085 /* Make a SUBREG operation or equivalent if it folds. */
6088 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
6089 enum machine_mode innermode
, unsigned int byte
)
6093 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6097 if (GET_CODE (op
) == SUBREG
6098 || GET_CODE (op
) == CONCAT
6099 || GET_MODE (op
) == VOIDmode
)
6102 if (validate_subreg (outermode
, innermode
, op
, byte
))
6103 return gen_rtx_SUBREG (outermode
, op
, byte
);
6108 /* Simplify X, an rtx expression.
6110 Return the simplified expression or NULL if no simplifications
6113 This is the preferred entry point into the simplification routines;
6114 however, we still allow passes to call the more specific routines.
6116 Right now GCC has three (yes, three) major bodies of RTL simplification
6117 code that need to be unified.
6119 1. fold_rtx in cse.c. This code uses various CSE specific
6120 information to aid in RTL simplification.
6122 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6123 it uses combine specific information to aid in RTL
6126 3. The routines in this file.
6129 Long term we want to only have one body of simplification code; to
6130 get to that state I recommend the following steps:
6132 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6133 which are not pass dependent state into these routines.
6135 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6136 use this routine whenever possible.
6138 3. Allow for pass dependent state to be provided to these
6139 routines and add simplifications based on the pass dependent
6140 state. Remove code from cse.c & combine.c that becomes
6143 It will take time, but ultimately the compiler will be easier to
6144 maintain and improve. It's totally silly that when we add a
6145 simplification that it needs to be added to 4 places (3 for RTL
6146 simplification and 1 for tree simplification. */
6149 simplify_rtx (const_rtx x
)
6151 const enum rtx_code code
= GET_CODE (x
);
6152 const enum machine_mode mode
= GET_MODE (x
);
6154 switch (GET_RTX_CLASS (code
))
6157 return simplify_unary_operation (code
, mode
,
6158 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6159 case RTX_COMM_ARITH
:
6160 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6161 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6163 /* Fall through.... */
6166 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6169 case RTX_BITFIELD_OPS
:
6170 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6171 XEXP (x
, 0), XEXP (x
, 1),
6175 case RTX_COMM_COMPARE
:
6176 return simplify_relational_operation (code
, mode
,
6177 ((GET_MODE (XEXP (x
, 0))
6179 ? GET_MODE (XEXP (x
, 0))
6180 : GET_MODE (XEXP (x
, 1))),
6186 return simplify_subreg (mode
, SUBREG_REG (x
),
6187 GET_MODE (SUBREG_REG (x
)),
6194 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6195 if (GET_CODE (XEXP (x
, 0)) == HIGH
6196 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))