1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
35 #include "diagnostic-core.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
48 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
49 static bool plus_minus_operand_p (const_rtx
);
50 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
51 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
52 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
54 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
56 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
57 enum machine_mode
, rtx
, rtx
);
58 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
59 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
65 neg_const_int (enum machine_mode mode
, const_rtx i
)
67 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
74 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
76 unsigned HOST_WIDE_INT val
;
79 if (GET_MODE_CLASS (mode
) != MODE_INT
)
82 width
= GET_MODE_PRECISION (mode
);
86 if (width
<= HOST_BITS_PER_WIDE_INT
89 else if (width
<= HOST_BITS_PER_DOUBLE_INT
90 && CONST_DOUBLE_AS_INT_P (x
)
91 && CONST_DOUBLE_LOW (x
) == 0)
93 val
= CONST_DOUBLE_HIGH (x
);
94 width
-= HOST_BITS_PER_WIDE_INT
;
97 /* FIXME: We don't yet have a representation for wider modes. */
100 if (width
< HOST_BITS_PER_WIDE_INT
)
101 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
102 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
105 /* Test whether VAL is equal to the most significant bit of mode MODE
106 (after masking with the mode mask of MODE). Returns false if the
107 precision of MODE is too large to handle. */
110 val_signbit_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
114 if (GET_MODE_CLASS (mode
) != MODE_INT
)
117 width
= GET_MODE_PRECISION (mode
);
118 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
121 val
&= GET_MODE_MASK (mode
);
122 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
125 /* Test whether the most significant bit of mode MODE is set in VAL.
126 Returns false if the precision of MODE is too large to handle. */
128 val_signbit_known_set_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
132 if (GET_MODE_CLASS (mode
) != MODE_INT
)
135 width
= GET_MODE_PRECISION (mode
);
136 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
139 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
143 /* Test whether the most significant bit of mode MODE is clear in VAL.
144 Returns false if the precision of MODE is too large to handle. */
146 val_signbit_known_clear_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
150 if (GET_MODE_CLASS (mode
) != MODE_INT
)
153 width
= GET_MODE_PRECISION (mode
);
154 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
157 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
161 /* Make a binary operation by properly ordering the operands and
162 seeing if the expression folds. */
165 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
170 /* If this simplifies, do it. */
171 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
175 /* Put complex operands first and constants second if commutative. */
176 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
177 && swap_commutative_operands_p (op0
, op1
))
178 tem
= op0
, op0
= op1
, op1
= tem
;
180 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
183 /* If X is a MEM referencing the constant pool, return the real value.
184 Otherwise return X. */
186 avoid_constant_pool_reference (rtx x
)
189 enum machine_mode cmode
;
190 HOST_WIDE_INT offset
= 0;
192 switch (GET_CODE (x
))
198 /* Handle float extensions of constant pool references. */
200 c
= avoid_constant_pool_reference (tmp
);
201 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
205 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
206 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
214 if (GET_MODE (x
) == BLKmode
)
219 /* Call target hook to avoid the effects of -fpic etc.... */
220 addr
= targetm
.delegitimize_address (addr
);
222 /* Split the address into a base and integer offset. */
223 if (GET_CODE (addr
) == CONST
224 && GET_CODE (XEXP (addr
, 0)) == PLUS
225 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
227 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
228 addr
= XEXP (XEXP (addr
, 0), 0);
231 if (GET_CODE (addr
) == LO_SUM
)
232 addr
= XEXP (addr
, 1);
234 /* If this is a constant pool reference, we can turn it into its
235 constant and hope that simplifications happen. */
236 if (GET_CODE (addr
) == SYMBOL_REF
237 && CONSTANT_POOL_ADDRESS_P (addr
))
239 c
= get_pool_constant (addr
);
240 cmode
= get_pool_mode (addr
);
242 /* If we're accessing the constant in a different mode than it was
243 originally stored, attempt to fix that up via subreg simplifications.
244 If that fails we have no choice but to return the original memory. */
245 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
246 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
248 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
249 if (tem
&& CONSTANT_P (tem
))
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x
)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
270 && MEM_OFFSET_KNOWN_P (x
))
272 tree decl
= MEM_EXPR (x
);
273 enum machine_mode mode
= GET_MODE (x
);
274 HOST_WIDE_INT offset
= 0;
276 switch (TREE_CODE (decl
))
286 case ARRAY_RANGE_REF
:
291 case VIEW_CONVERT_EXPR
:
293 HOST_WIDE_INT bitsize
, bitpos
;
295 int unsignedp
, volatilep
= 0;
297 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
298 &mode
, &unsignedp
, &volatilep
, false);
299 if (bitsize
!= GET_MODE_BITSIZE (mode
)
300 || (bitpos
% BITS_PER_UNIT
)
301 || (toffset
&& !host_integerp (toffset
, 0)))
305 offset
+= bitpos
/ BITS_PER_UNIT
;
307 offset
+= TREE_INT_CST_LOW (toffset
);
314 && mode
== GET_MODE (x
)
315 && TREE_CODE (decl
) == VAR_DECL
316 && (TREE_STATIC (decl
)
317 || DECL_THREAD_LOCAL_P (decl
))
318 && DECL_RTL_SET_P (decl
)
319 && MEM_P (DECL_RTL (decl
)))
323 offset
+= MEM_OFFSET (x
);
325 newx
= DECL_RTL (decl
);
329 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
331 /* Avoid creating a new MEM needlessly if we already had
332 the same address. We do if there's no OFFSET and the
333 old address X is identical to NEWX, or if X is of the
334 form (plus NEWX OFFSET), or the NEWX is of the form
335 (plus Y (const_int Z)) and X is that with the offset
336 added: (plus Y (const_int Z+OFFSET)). */
338 || (GET_CODE (o
) == PLUS
339 && GET_CODE (XEXP (o
, 1)) == CONST_INT
340 && (offset
== INTVAL (XEXP (o
, 1))
341 || (GET_CODE (n
) == PLUS
342 && GET_CODE (XEXP (n
, 1)) == CONST_INT
343 && (INTVAL (XEXP (n
, 1)) + offset
344 == INTVAL (XEXP (o
, 1)))
345 && (n
= XEXP (n
, 0))))
346 && (o
= XEXP (o
, 0))))
347 && rtx_equal_p (o
, n
)))
348 x
= adjust_address_nv (newx
, mode
, offset
);
350 else if (GET_MODE (x
) == GET_MODE (newx
)
359 /* Make a unary operation by first seeing if it folds and otherwise making
360 the specified operation. */
363 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
364 enum machine_mode op_mode
)
368 /* If this simplifies, use it. */
369 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
372 return gen_rtx_fmt_e (code
, mode
, op
);
375 /* Likewise for ternary operations. */
378 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
379 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
383 /* If this simplifies, use it. */
384 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
388 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
391 /* Likewise, for relational operations.
392 CMP_MODE specifies mode comparison is done in. */
395 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
396 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
400 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
404 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
407 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
408 and simplify the result. If FN is non-NULL, call this callback on each
409 X, if it returns non-NULL, replace X with its return value and simplify the
413 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
414 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
416 enum rtx_code code
= GET_CODE (x
);
417 enum machine_mode mode
= GET_MODE (x
);
418 enum machine_mode op_mode
;
420 rtx op0
, op1
, op2
, newx
, op
;
424 if (__builtin_expect (fn
!= NULL
, 0))
426 newx
= fn (x
, old_rtx
, data
);
430 else if (rtx_equal_p (x
, old_rtx
))
431 return copy_rtx ((rtx
) data
);
433 switch (GET_RTX_CLASS (code
))
437 op_mode
= GET_MODE (op0
);
438 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
439 if (op0
== XEXP (x
, 0))
441 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
445 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
446 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
447 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
449 return simplify_gen_binary (code
, mode
, op0
, op1
);
452 case RTX_COMM_COMPARE
:
455 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
456 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
457 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
458 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
460 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
463 case RTX_BITFIELD_OPS
:
465 op_mode
= GET_MODE (op0
);
466 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
467 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
468 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
469 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
471 if (op_mode
== VOIDmode
)
472 op_mode
= GET_MODE (op0
);
473 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
478 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
479 if (op0
== SUBREG_REG (x
))
481 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
482 GET_MODE (SUBREG_REG (x
)),
484 return op0
? op0
: x
;
491 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
492 if (op0
== XEXP (x
, 0))
494 return replace_equiv_address_nv (x
, op0
);
496 else if (code
== LO_SUM
)
498 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
499 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
501 /* (lo_sum (high x) x) -> x */
502 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
505 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
507 return gen_rtx_LO_SUM (mode
, op0
, op1
);
516 fmt
= GET_RTX_FORMAT (code
);
517 for (i
= 0; fmt
[i
]; i
++)
522 newvec
= XVEC (newx
, i
);
523 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
525 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
527 if (op
!= RTVEC_ELT (vec
, j
))
531 newvec
= shallow_copy_rtvec (vec
);
533 newx
= shallow_copy_rtx (x
);
534 XVEC (newx
, i
) = newvec
;
536 RTVEC_ELT (newvec
, j
) = op
;
544 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
545 if (op
!= XEXP (x
, i
))
548 newx
= shallow_copy_rtx (x
);
557 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
558 resulting RTX. Return a new RTX which is as simplified as possible. */
561 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
563 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
566 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
567 Only handle cases where the truncated value is inherently an rvalue.
569 RTL provides two ways of truncating a value:
571 1. a lowpart subreg. This form is only a truncation when both
572 the outer and inner modes (here MODE and OP_MODE respectively)
573 are scalar integers, and only then when the subreg is used as
576 It is only valid to form such truncating subregs if the
577 truncation requires no action by the target. The onus for
578 proving this is on the creator of the subreg -- e.g. the
579 caller to simplify_subreg or simplify_gen_subreg -- and typically
580 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
582 2. a TRUNCATE. This form handles both scalar and compound integers.
584 The first form is preferred where valid. However, the TRUNCATE
585 handling in simplify_unary_operation turns the second form into the
586 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
587 so it is generally safe to form rvalue truncations using:
589 simplify_gen_unary (TRUNCATE, ...)
591 and leave simplify_unary_operation to work out which representation
594 Because of the proof requirements on (1), simplify_truncation must
595 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
596 regardless of whether the outer truncation came from a SUBREG or a
597 TRUNCATE. For example, if the caller has proven that an SImode
602 is a no-op and can be represented as a subreg, it does not follow
603 that SImode truncations of X and Y are also no-ops. On a target
604 like 64-bit MIPS that requires SImode values to be stored in
605 sign-extended form, an SImode truncation of:
607 (and:DI (reg:DI X) (const_int 63))
609 is trivially a no-op because only the lower 6 bits can be set.
610 However, X is still an arbitrary 64-bit number and so we cannot
611 assume that truncating it too is a no-op. */
614 simplify_truncation (enum machine_mode mode
, rtx op
,
615 enum machine_mode op_mode
)
617 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
618 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
619 gcc_assert (precision
<= op_precision
);
621 /* Optimize truncations of zero and sign extended values. */
622 if (GET_CODE (op
) == ZERO_EXTEND
623 || GET_CODE (op
) == SIGN_EXTEND
)
625 /* There are three possibilities. If MODE is the same as the
626 origmode, we can omit both the extension and the subreg.
627 If MODE is not larger than the origmode, we can apply the
628 truncation without the extension. Finally, if the outermode
629 is larger than the origmode, we can just extend to the appropriate
631 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
632 if (mode
== origmode
)
634 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
635 return simplify_gen_unary (TRUNCATE
, mode
,
636 XEXP (op
, 0), origmode
);
638 return simplify_gen_unary (GET_CODE (op
), mode
,
639 XEXP (op
, 0), origmode
);
642 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
643 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
644 if (GET_CODE (op
) == PLUS
645 || GET_CODE (op
) == MINUS
646 || GET_CODE (op
) == MULT
)
648 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
651 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
653 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
657 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
658 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
659 the outer subreg is effectively a truncation to the original mode. */
660 if ((GET_CODE (op
) == LSHIFTRT
661 || GET_CODE (op
) == ASHIFTRT
)
662 /* Ensure that OP_MODE is at least twice as wide as MODE
663 to avoid the possibility that an outer LSHIFTRT shifts by more
664 than the sign extension's sign_bit_copies and introduces zeros
665 into the high bits of the result. */
666 && 2 * precision
<= op_precision
667 && CONST_INT_P (XEXP (op
, 1))
668 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
669 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
670 && UINTVAL (XEXP (op
, 1)) < precision
)
671 return simplify_gen_binary (ASHIFTRT
, mode
,
672 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
674 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
675 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
676 the outer subreg is effectively a truncation to the original mode. */
677 if ((GET_CODE (op
) == LSHIFTRT
678 || GET_CODE (op
) == ASHIFTRT
)
679 && CONST_INT_P (XEXP (op
, 1))
680 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
681 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
682 && UINTVAL (XEXP (op
, 1)) < precision
)
683 return simplify_gen_binary (LSHIFTRT
, mode
,
684 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
686 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
687 to (ashift:QI (x:QI) C), where C is a suitable small constant and
688 the outer subreg is effectively a truncation to the original mode. */
689 if (GET_CODE (op
) == ASHIFT
690 && CONST_INT_P (XEXP (op
, 1))
691 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
692 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
693 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
694 && UINTVAL (XEXP (op
, 1)) < precision
)
695 return simplify_gen_binary (ASHIFT
, mode
,
696 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
698 /* Recognize a word extraction from a multi-word subreg. */
699 if ((GET_CODE (op
) == LSHIFTRT
700 || GET_CODE (op
) == ASHIFTRT
)
701 && SCALAR_INT_MODE_P (mode
)
702 && SCALAR_INT_MODE_P (op_mode
)
703 && precision
>= BITS_PER_WORD
704 && 2 * precision
<= op_precision
705 && CONST_INT_P (XEXP (op
, 1))
706 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
707 && UINTVAL (XEXP (op
, 1)) < op_precision
)
709 int byte
= subreg_lowpart_offset (mode
, op_mode
);
710 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
711 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
713 ? byte
- shifted_bytes
714 : byte
+ shifted_bytes
));
717 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
718 and try replacing the TRUNCATE and shift with it. Don't do this
719 if the MEM has a mode-dependent address. */
720 if ((GET_CODE (op
) == LSHIFTRT
721 || GET_CODE (op
) == ASHIFTRT
)
722 && SCALAR_INT_MODE_P (op_mode
)
723 && MEM_P (XEXP (op
, 0))
724 && CONST_INT_P (XEXP (op
, 1))
725 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
726 && INTVAL (XEXP (op
, 1)) > 0
727 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
728 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
729 MEM_ADDR_SPACE (XEXP (op
, 0)))
730 && ! MEM_VOLATILE_P (XEXP (op
, 0))
731 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
732 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
734 int byte
= subreg_lowpart_offset (mode
, op_mode
);
735 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
736 return adjust_address_nv (XEXP (op
, 0), mode
,
738 ? byte
- shifted_bytes
739 : byte
+ shifted_bytes
));
742 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
743 (OP:SI foo:SI) if OP is NEG or ABS. */
744 if ((GET_CODE (op
) == ABS
745 || GET_CODE (op
) == NEG
)
746 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
747 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
748 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
749 return simplify_gen_unary (GET_CODE (op
), mode
,
750 XEXP (XEXP (op
, 0), 0), mode
);
752 /* (truncate:A (subreg:B (truncate:C X) 0)) is
754 if (GET_CODE (op
) == SUBREG
755 && SCALAR_INT_MODE_P (mode
)
756 && SCALAR_INT_MODE_P (op_mode
)
757 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
758 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
759 && subreg_lowpart_p (op
))
761 rtx inner
= XEXP (SUBREG_REG (op
), 0);
762 if (GET_MODE_PRECISION (mode
)
763 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
764 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
766 /* If subreg above is paradoxical and C is narrower
767 than A, return (subreg:A (truncate:C X) 0). */
768 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
769 GET_MODE (SUBREG_REG (op
)), 0);
772 /* (truncate:A (truncate:B X)) is (truncate:A X). */
773 if (GET_CODE (op
) == TRUNCATE
)
774 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
775 GET_MODE (XEXP (op
, 0)));
780 /* Try to simplify a unary operation CODE whose output mode is to be
781 MODE with input operand OP whose mode was originally OP_MODE.
782 Return zero if no simplification can be made. */
784 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
785 rtx op
, enum machine_mode op_mode
)
789 trueop
= avoid_constant_pool_reference (op
);
791 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
795 return simplify_unary_operation_1 (code
, mode
, op
);
798 /* Perform some simplifications we can do even if the operands
801 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
803 enum rtx_code reversed
;
809 /* (not (not X)) == X. */
810 if (GET_CODE (op
) == NOT
)
813 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
814 comparison is all ones. */
815 if (COMPARISON_P (op
)
816 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
817 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
818 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
819 XEXP (op
, 0), XEXP (op
, 1));
821 /* (not (plus X -1)) can become (neg X). */
822 if (GET_CODE (op
) == PLUS
823 && XEXP (op
, 1) == constm1_rtx
)
824 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
826 /* Similarly, (not (neg X)) is (plus X -1). */
827 if (GET_CODE (op
) == NEG
)
828 return plus_constant (mode
, XEXP (op
, 0), -1);
830 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
831 if (GET_CODE (op
) == XOR
832 && CONST_INT_P (XEXP (op
, 1))
833 && (temp
= simplify_unary_operation (NOT
, mode
,
834 XEXP (op
, 1), mode
)) != 0)
835 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
837 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
838 if (GET_CODE (op
) == PLUS
839 && CONST_INT_P (XEXP (op
, 1))
840 && mode_signbit_p (mode
, XEXP (op
, 1))
841 && (temp
= simplify_unary_operation (NOT
, mode
,
842 XEXP (op
, 1), mode
)) != 0)
843 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
846 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
847 operands other than 1, but that is not valid. We could do a
848 similar simplification for (not (lshiftrt C X)) where C is
849 just the sign bit, but this doesn't seem common enough to
851 if (GET_CODE (op
) == ASHIFT
852 && XEXP (op
, 0) == const1_rtx
)
854 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
855 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
858 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
859 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
860 so we can perform the above simplification. */
861 if (STORE_FLAG_VALUE
== -1
862 && GET_CODE (op
) == ASHIFTRT
863 && GET_CODE (XEXP (op
, 1))
864 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
865 return simplify_gen_relational (GE
, mode
, VOIDmode
,
866 XEXP (op
, 0), const0_rtx
);
869 if (GET_CODE (op
) == SUBREG
870 && subreg_lowpart_p (op
)
871 && (GET_MODE_SIZE (GET_MODE (op
))
872 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
873 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
874 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
876 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
879 x
= gen_rtx_ROTATE (inner_mode
,
880 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
882 XEXP (SUBREG_REG (op
), 1));
883 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
888 /* Apply De Morgan's laws to reduce number of patterns for machines
889 with negating logical insns (and-not, nand, etc.). If result has
890 only one NOT, put it first, since that is how the patterns are
892 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
894 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
895 enum machine_mode op_mode
;
897 op_mode
= GET_MODE (in1
);
898 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
900 op_mode
= GET_MODE (in2
);
901 if (op_mode
== VOIDmode
)
903 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
905 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
908 in2
= in1
; in1
= tem
;
911 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
915 /* (not (bswap x)) -> (bswap (not x)). */
916 if (GET_CODE (op
) == BSWAP
)
918 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
919 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
924 /* (neg (neg X)) == X. */
925 if (GET_CODE (op
) == NEG
)
928 /* (neg (plus X 1)) can become (not X). */
929 if (GET_CODE (op
) == PLUS
930 && XEXP (op
, 1) == const1_rtx
)
931 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
933 /* Similarly, (neg (not X)) is (plus X 1). */
934 if (GET_CODE (op
) == NOT
)
935 return plus_constant (mode
, XEXP (op
, 0), 1);
937 /* (neg (minus X Y)) can become (minus Y X). This transformation
938 isn't safe for modes with signed zeros, since if X and Y are
939 both +0, (minus Y X) is the same as (minus X Y). If the
940 rounding mode is towards +infinity (or -infinity) then the two
941 expressions will be rounded differently. */
942 if (GET_CODE (op
) == MINUS
943 && !HONOR_SIGNED_ZEROS (mode
)
944 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
945 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
947 if (GET_CODE (op
) == PLUS
948 && !HONOR_SIGNED_ZEROS (mode
)
949 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
951 /* (neg (plus A C)) is simplified to (minus -C A). */
952 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
953 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
955 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
957 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
960 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
961 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
962 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
965 /* (neg (mult A B)) becomes (mult A (neg B)).
966 This works even for floating-point values. */
967 if (GET_CODE (op
) == MULT
968 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
970 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
971 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
974 /* NEG commutes with ASHIFT since it is multiplication. Only do
975 this if we can then eliminate the NEG (e.g., if the operand
977 if (GET_CODE (op
) == ASHIFT
)
979 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
981 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
984 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
985 C is equal to the width of MODE minus 1. */
986 if (GET_CODE (op
) == ASHIFTRT
987 && CONST_INT_P (XEXP (op
, 1))
988 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
989 return simplify_gen_binary (LSHIFTRT
, mode
,
990 XEXP (op
, 0), XEXP (op
, 1));
992 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
993 C is equal to the width of MODE minus 1. */
994 if (GET_CODE (op
) == LSHIFTRT
995 && CONST_INT_P (XEXP (op
, 1))
996 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
997 return simplify_gen_binary (ASHIFTRT
, mode
,
998 XEXP (op
, 0), XEXP (op
, 1));
1000 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1001 if (GET_CODE (op
) == XOR
1002 && XEXP (op
, 1) == const1_rtx
1003 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1004 return plus_constant (mode
, XEXP (op
, 0), -1);
1006 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1007 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1008 if (GET_CODE (op
) == LT
1009 && XEXP (op
, 1) == const0_rtx
1010 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1012 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
1013 int isize
= GET_MODE_PRECISION (inner
);
1014 if (STORE_FLAG_VALUE
== 1)
1016 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1017 GEN_INT (isize
- 1));
1020 if (GET_MODE_PRECISION (mode
) > isize
)
1021 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1022 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1024 else if (STORE_FLAG_VALUE
== -1)
1026 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1027 GEN_INT (isize
- 1));
1030 if (GET_MODE_PRECISION (mode
) > isize
)
1031 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1032 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1038 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1039 with the umulXi3_highpart patterns. */
1040 if (GET_CODE (op
) == LSHIFTRT
1041 && GET_CODE (XEXP (op
, 0)) == MULT
)
1044 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1046 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1048 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1052 /* We can't handle truncation to a partial integer mode here
1053 because we don't know the real bitsize of the partial
1058 if (GET_MODE (op
) != VOIDmode
)
1060 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1065 /* If we know that the value is already truncated, we can
1066 replace the TRUNCATE with a SUBREG. */
1067 if (GET_MODE_NUNITS (mode
) == 1
1068 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1069 || truncated_to_mode (mode
, op
)))
1071 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1076 /* A truncate of a comparison can be replaced with a subreg if
1077 STORE_FLAG_VALUE permits. This is like the previous test,
1078 but it works even if the comparison is done in a mode larger
1079 than HOST_BITS_PER_WIDE_INT. */
1080 if (HWI_COMPUTABLE_MODE_P (mode
)
1081 && COMPARISON_P (op
)
1082 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1084 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1089 /* A truncate of a memory is just loading the low part of the memory
1090 if we are not changing the meaning of the address. */
1091 if (GET_CODE (op
) == MEM
1092 && !VECTOR_MODE_P (mode
)
1093 && !MEM_VOLATILE_P (op
)
1094 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1096 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1103 case FLOAT_TRUNCATE
:
1104 if (DECIMAL_FLOAT_MODE_P (mode
))
1107 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1108 if (GET_CODE (op
) == FLOAT_EXTEND
1109 && GET_MODE (XEXP (op
, 0)) == mode
)
1110 return XEXP (op
, 0);
1112 /* (float_truncate:SF (float_truncate:DF foo:XF))
1113 = (float_truncate:SF foo:XF).
1114 This may eliminate double rounding, so it is unsafe.
1116 (float_truncate:SF (float_extend:XF foo:DF))
1117 = (float_truncate:SF foo:DF).
1119 (float_truncate:DF (float_extend:XF foo:SF))
1120 = (float_extend:SF foo:DF). */
1121 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1122 && flag_unsafe_math_optimizations
)
1123 || GET_CODE (op
) == FLOAT_EXTEND
)
1124 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1126 > GET_MODE_SIZE (mode
)
1127 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1129 XEXP (op
, 0), mode
);
1131 /* (float_truncate (float x)) is (float x) */
1132 if (GET_CODE (op
) == FLOAT
1133 && (flag_unsafe_math_optimizations
1134 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1135 && ((unsigned)significand_size (GET_MODE (op
))
1136 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1137 - num_sign_bit_copies (XEXP (op
, 0),
1138 GET_MODE (XEXP (op
, 0))))))))
1139 return simplify_gen_unary (FLOAT
, mode
,
1141 GET_MODE (XEXP (op
, 0)));
1143 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1144 (OP:SF foo:SF) if OP is NEG or ABS. */
1145 if ((GET_CODE (op
) == ABS
1146 || GET_CODE (op
) == NEG
)
1147 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1148 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1149 return simplify_gen_unary (GET_CODE (op
), mode
,
1150 XEXP (XEXP (op
, 0), 0), mode
);
1152 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1153 is (float_truncate:SF x). */
1154 if (GET_CODE (op
) == SUBREG
1155 && subreg_lowpart_p (op
)
1156 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1157 return SUBREG_REG (op
);
1161 if (DECIMAL_FLOAT_MODE_P (mode
))
1164 /* (float_extend (float_extend x)) is (float_extend x)
1166 (float_extend (float x)) is (float x) assuming that double
1167 rounding can't happen.
1169 if (GET_CODE (op
) == FLOAT_EXTEND
1170 || (GET_CODE (op
) == FLOAT
1171 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1172 && ((unsigned)significand_size (GET_MODE (op
))
1173 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1174 - num_sign_bit_copies (XEXP (op
, 0),
1175 GET_MODE (XEXP (op
, 0)))))))
1176 return simplify_gen_unary (GET_CODE (op
), mode
,
1178 GET_MODE (XEXP (op
, 0)));
1183 /* (abs (neg <foo>)) -> (abs <foo>) */
1184 if (GET_CODE (op
) == NEG
)
1185 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1186 GET_MODE (XEXP (op
, 0)));
1188 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1190 if (GET_MODE (op
) == VOIDmode
)
1193 /* If operand is something known to be positive, ignore the ABS. */
1194 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1195 || val_signbit_known_clear_p (GET_MODE (op
),
1196 nonzero_bits (op
, GET_MODE (op
))))
1199 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1200 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1201 return gen_rtx_NEG (mode
, op
);
1206 /* (ffs (*_extend <X>)) = (ffs <X>) */
1207 if (GET_CODE (op
) == SIGN_EXTEND
1208 || GET_CODE (op
) == ZERO_EXTEND
)
1209 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1210 GET_MODE (XEXP (op
, 0)));
1214 switch (GET_CODE (op
))
1218 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1219 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1220 GET_MODE (XEXP (op
, 0)));
1224 /* Rotations don't affect popcount. */
1225 if (!side_effects_p (XEXP (op
, 1)))
1226 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1227 GET_MODE (XEXP (op
, 0)));
1236 switch (GET_CODE (op
))
1242 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1243 GET_MODE (XEXP (op
, 0)));
1247 /* Rotations don't affect parity. */
1248 if (!side_effects_p (XEXP (op
, 1)))
1249 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1250 GET_MODE (XEXP (op
, 0)));
1259 /* (bswap (bswap x)) -> x. */
1260 if (GET_CODE (op
) == BSWAP
)
1261 return XEXP (op
, 0);
1265 /* (float (sign_extend <X>)) = (float <X>). */
1266 if (GET_CODE (op
) == SIGN_EXTEND
)
1267 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1268 GET_MODE (XEXP (op
, 0)));
1272 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1273 becomes just the MINUS if its mode is MODE. This allows
1274 folding switch statements on machines using casesi (such as
1276 if (GET_CODE (op
) == TRUNCATE
1277 && GET_MODE (XEXP (op
, 0)) == mode
1278 && GET_CODE (XEXP (op
, 0)) == MINUS
1279 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1280 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1281 return XEXP (op
, 0);
1283 /* Extending a widening multiplication should be canonicalized to
1284 a wider widening multiplication. */
1285 if (GET_CODE (op
) == MULT
)
1287 rtx lhs
= XEXP (op
, 0);
1288 rtx rhs
= XEXP (op
, 1);
1289 enum rtx_code lcode
= GET_CODE (lhs
);
1290 enum rtx_code rcode
= GET_CODE (rhs
);
1292 /* Widening multiplies usually extend both operands, but sometimes
1293 they use a shift to extract a portion of a register. */
1294 if ((lcode
== SIGN_EXTEND
1295 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1296 && (rcode
== SIGN_EXTEND
1297 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1299 enum machine_mode lmode
= GET_MODE (lhs
);
1300 enum machine_mode rmode
= GET_MODE (rhs
);
1303 if (lcode
== ASHIFTRT
)
1304 /* Number of bits not shifted off the end. */
1305 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1306 else /* lcode == SIGN_EXTEND */
1307 /* Size of inner mode. */
1308 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1310 if (rcode
== ASHIFTRT
)
1311 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1312 else /* rcode == SIGN_EXTEND */
1313 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1315 /* We can only widen multiplies if the result is mathematiclly
1316 equivalent. I.e. if overflow was impossible. */
1317 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1318 return simplify_gen_binary
1320 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1321 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1325 /* Check for a sign extension of a subreg of a promoted
1326 variable, where the promotion is sign-extended, and the
1327 target mode is the same as the variable's promotion. */
1328 if (GET_CODE (op
) == SUBREG
1329 && SUBREG_PROMOTED_VAR_P (op
)
1330 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1331 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1333 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1338 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1339 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1340 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1342 gcc_assert (GET_MODE_BITSIZE (mode
)
1343 > GET_MODE_BITSIZE (GET_MODE (op
)));
1344 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1345 GET_MODE (XEXP (op
, 0)));
1348 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1349 is (sign_extend:M (subreg:O <X>)) if there is mode with
1350 GET_MODE_BITSIZE (N) - I bits.
1351 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1352 is similarly (zero_extend:M (subreg:O <X>)). */
1353 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1354 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1355 && CONST_INT_P (XEXP (op
, 1))
1356 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1357 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1359 enum machine_mode tmode
1360 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1361 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1362 gcc_assert (GET_MODE_BITSIZE (mode
)
1363 > GET_MODE_BITSIZE (GET_MODE (op
)));
1364 if (tmode
!= BLKmode
)
1367 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1369 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1370 ? SIGN_EXTEND
: ZERO_EXTEND
,
1371 mode
, inner
, tmode
);
1375 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1376 /* As we do not know which address space the pointer is referring to,
1377 we can do this only if the target does not support different pointer
1378 or address modes depending on the address space. */
1379 if (target_default_pointer_address_modes_p ()
1380 && ! POINTERS_EXTEND_UNSIGNED
1381 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1383 || (GET_CODE (op
) == SUBREG
1384 && REG_P (SUBREG_REG (op
))
1385 && REG_POINTER (SUBREG_REG (op
))
1386 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1387 return convert_memory_address (Pmode
, op
);
1392 /* Check for a zero extension of a subreg of a promoted
1393 variable, where the promotion is zero-extended, and the
1394 target mode is the same as the variable's promotion. */
1395 if (GET_CODE (op
) == SUBREG
1396 && SUBREG_PROMOTED_VAR_P (op
)
1397 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1398 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1400 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1405 /* Extending a widening multiplication should be canonicalized to
1406 a wider widening multiplication. */
1407 if (GET_CODE (op
) == MULT
)
1409 rtx lhs
= XEXP (op
, 0);
1410 rtx rhs
= XEXP (op
, 1);
1411 enum rtx_code lcode
= GET_CODE (lhs
);
1412 enum rtx_code rcode
= GET_CODE (rhs
);
1414 /* Widening multiplies usually extend both operands, but sometimes
1415 they use a shift to extract a portion of a register. */
1416 if ((lcode
== ZERO_EXTEND
1417 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1418 && (rcode
== ZERO_EXTEND
1419 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1421 enum machine_mode lmode
= GET_MODE (lhs
);
1422 enum machine_mode rmode
= GET_MODE (rhs
);
1425 if (lcode
== LSHIFTRT
)
1426 /* Number of bits not shifted off the end. */
1427 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1428 else /* lcode == ZERO_EXTEND */
1429 /* Size of inner mode. */
1430 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1432 if (rcode
== LSHIFTRT
)
1433 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1434 else /* rcode == ZERO_EXTEND */
1435 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1437 /* We can only widen multiplies if the result is mathematiclly
1438 equivalent. I.e. if overflow was impossible. */
1439 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1440 return simplify_gen_binary
1442 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1443 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1447 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1448 if (GET_CODE (op
) == ZERO_EXTEND
)
1449 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1450 GET_MODE (XEXP (op
, 0)));
1452 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1453 is (zero_extend:M (subreg:O <X>)) if there is mode with
1454 GET_MODE_BITSIZE (N) - I bits. */
1455 if (GET_CODE (op
) == LSHIFTRT
1456 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1457 && CONST_INT_P (XEXP (op
, 1))
1458 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1459 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1461 enum machine_mode tmode
1462 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1463 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1464 if (tmode
!= BLKmode
)
1467 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1469 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1473 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1474 /* As we do not know which address space the pointer is referring to,
1475 we can do this only if the target does not support different pointer
1476 or address modes depending on the address space. */
1477 if (target_default_pointer_address_modes_p ()
1478 && POINTERS_EXTEND_UNSIGNED
> 0
1479 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1481 || (GET_CODE (op
) == SUBREG
1482 && REG_P (SUBREG_REG (op
))
1483 && REG_POINTER (SUBREG_REG (op
))
1484 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1485 return convert_memory_address (Pmode
, op
);
1496 /* Try to compute the value of a unary operation CODE whose output mode is to
1497 be MODE with input operand OP whose mode was originally OP_MODE.
1498 Return zero if the value cannot be computed. */
1500 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1501 rtx op
, enum machine_mode op_mode
)
1503 unsigned int width
= GET_MODE_PRECISION (mode
);
1504 unsigned int op_width
= GET_MODE_PRECISION (op_mode
);
1506 if (code
== VEC_DUPLICATE
)
1508 gcc_assert (VECTOR_MODE_P (mode
));
1509 if (GET_MODE (op
) != VOIDmode
)
1511 if (!VECTOR_MODE_P (GET_MODE (op
)))
1512 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1514 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1517 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1518 || GET_CODE (op
) == CONST_VECTOR
)
1520 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1521 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1522 rtvec v
= rtvec_alloc (n_elts
);
1525 if (GET_CODE (op
) != CONST_VECTOR
)
1526 for (i
= 0; i
< n_elts
; i
++)
1527 RTVEC_ELT (v
, i
) = op
;
1530 enum machine_mode inmode
= GET_MODE (op
);
1531 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1532 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1534 gcc_assert (in_n_elts
< n_elts
);
1535 gcc_assert ((n_elts
% in_n_elts
) == 0);
1536 for (i
= 0; i
< n_elts
; i
++)
1537 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1539 return gen_rtx_CONST_VECTOR (mode
, v
);
1543 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1545 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1546 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1547 enum machine_mode opmode
= GET_MODE (op
);
1548 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1549 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1550 rtvec v
= rtvec_alloc (n_elts
);
1553 gcc_assert (op_n_elts
== n_elts
);
1554 for (i
= 0; i
< n_elts
; i
++)
1556 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1557 CONST_VECTOR_ELT (op
, i
),
1558 GET_MODE_INNER (opmode
));
1561 RTVEC_ELT (v
, i
) = x
;
1563 return gen_rtx_CONST_VECTOR (mode
, v
);
1566 /* The order of these tests is critical so that, for example, we don't
1567 check the wrong mode (input vs. output) for a conversion operation,
1568 such as FIX. At some point, this should be simplified. */
1570 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1572 HOST_WIDE_INT hv
, lv
;
1575 if (CONST_INT_P (op
))
1576 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1578 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1580 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1581 d
= real_value_truncate (mode
, d
);
1582 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1584 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1586 HOST_WIDE_INT hv
, lv
;
1589 if (CONST_INT_P (op
))
1590 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1592 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1594 if (op_mode
== VOIDmode
1595 || GET_MODE_PRECISION (op_mode
) > HOST_BITS_PER_DOUBLE_INT
)
1596 /* We should never get a negative number. */
1597 gcc_assert (hv
>= 0);
1598 else if (GET_MODE_PRECISION (op_mode
) <= HOST_BITS_PER_WIDE_INT
)
1599 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1601 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1602 d
= real_value_truncate (mode
, d
);
1603 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1606 if (CONST_INT_P (op
)
1607 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1609 HOST_WIDE_INT arg0
= INTVAL (op
);
1623 val
= (arg0
>= 0 ? arg0
: - arg0
);
1627 arg0
&= GET_MODE_MASK (mode
);
1628 val
= ffs_hwi (arg0
);
1632 arg0
&= GET_MODE_MASK (mode
);
1633 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1636 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 1;
1640 arg0
&= GET_MODE_MASK (mode
);
1642 val
= GET_MODE_PRECISION (mode
) - 1;
1644 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 2;
1646 val
= GET_MODE_PRECISION (mode
) - floor_log2 (~arg0
) - 2;
1650 arg0
&= GET_MODE_MASK (mode
);
1653 /* Even if the value at zero is undefined, we have to come
1654 up with some replacement. Seems good enough. */
1655 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1656 val
= GET_MODE_PRECISION (mode
);
1659 val
= ctz_hwi (arg0
);
1663 arg0
&= GET_MODE_MASK (mode
);
1666 val
++, arg0
&= arg0
- 1;
1670 arg0
&= GET_MODE_MASK (mode
);
1673 val
++, arg0
&= arg0
- 1;
1682 for (s
= 0; s
< width
; s
+= 8)
1684 unsigned int d
= width
- s
- 8;
1685 unsigned HOST_WIDE_INT byte
;
1686 byte
= (arg0
>> s
) & 0xff;
1697 /* When zero-extending a CONST_INT, we need to know its
1699 gcc_assert (op_mode
!= VOIDmode
);
1700 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1702 /* If we were really extending the mode,
1703 we would have to distinguish between zero-extension
1704 and sign-extension. */
1705 gcc_assert (width
== op_width
);
1708 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1709 val
= arg0
& GET_MODE_MASK (op_mode
);
1715 if (op_mode
== VOIDmode
)
1717 op_width
= GET_MODE_PRECISION (op_mode
);
1718 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1720 /* If we were really extending the mode,
1721 we would have to distinguish between zero-extension
1722 and sign-extension. */
1723 gcc_assert (width
== op_width
);
1726 else if (op_width
< HOST_BITS_PER_WIDE_INT
)
1728 val
= arg0
& GET_MODE_MASK (op_mode
);
1729 if (val_signbit_known_set_p (op_mode
, val
))
1730 val
|= ~GET_MODE_MASK (op_mode
);
1738 case FLOAT_TRUNCATE
:
1750 return gen_int_mode (val
, mode
);
1753 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1754 for a DImode operation on a CONST_INT. */
1755 else if (width
<= HOST_BITS_PER_DOUBLE_INT
1756 && (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1758 double_int first
, value
;
1760 if (CONST_DOUBLE_AS_INT_P (op
))
1761 first
= double_int::from_pair (CONST_DOUBLE_HIGH (op
),
1762 CONST_DOUBLE_LOW (op
));
1764 first
= double_int::from_shwi (INTVAL (op
));
1777 if (first
.is_negative ())
1786 value
.low
= ffs_hwi (first
.low
);
1787 else if (first
.high
!= 0)
1788 value
.low
= HOST_BITS_PER_WIDE_INT
+ ffs_hwi (first
.high
);
1795 if (first
.high
!= 0)
1796 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.high
) - 1
1797 - HOST_BITS_PER_WIDE_INT
;
1798 else if (first
.low
!= 0)
1799 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.low
) - 1;
1800 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1801 value
.low
= GET_MODE_PRECISION (mode
);
1807 value
.low
= ctz_hwi (first
.low
);
1808 else if (first
.high
!= 0)
1809 value
.low
= HOST_BITS_PER_WIDE_INT
+ ctz_hwi (first
.high
);
1810 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1811 value
.low
= GET_MODE_PRECISION (mode
);
1815 value
= double_int_zero
;
1819 first
.low
&= first
.low
- 1;
1824 first
.high
&= first
.high
- 1;
1829 value
= double_int_zero
;
1833 first
.low
&= first
.low
- 1;
1838 first
.high
&= first
.high
- 1;
1847 value
= double_int_zero
;
1848 for (s
= 0; s
< width
; s
+= 8)
1850 unsigned int d
= width
- s
- 8;
1851 unsigned HOST_WIDE_INT byte
;
1853 if (s
< HOST_BITS_PER_WIDE_INT
)
1854 byte
= (first
.low
>> s
) & 0xff;
1856 byte
= (first
.high
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1858 if (d
< HOST_BITS_PER_WIDE_INT
)
1859 value
.low
|= byte
<< d
;
1861 value
.high
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1867 /* This is just a change-of-mode, so do nothing. */
1872 gcc_assert (op_mode
!= VOIDmode
);
1874 if (op_width
> HOST_BITS_PER_WIDE_INT
)
1877 value
= double_int::from_uhwi (first
.low
& GET_MODE_MASK (op_mode
));
1881 if (op_mode
== VOIDmode
1882 || op_width
> HOST_BITS_PER_WIDE_INT
)
1886 value
.low
= first
.low
& GET_MODE_MASK (op_mode
);
1887 if (val_signbit_known_set_p (op_mode
, value
.low
))
1888 value
.low
|= ~GET_MODE_MASK (op_mode
);
1890 value
.high
= HWI_SIGN_EXTEND (value
.low
);
1901 return immed_double_int_const (value
, mode
);
1904 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1905 && SCALAR_FLOAT_MODE_P (mode
)
1906 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1908 REAL_VALUE_TYPE d
, t
;
1909 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1914 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1916 real_sqrt (&t
, mode
, &d
);
1920 d
= real_value_abs (&d
);
1923 d
= real_value_negate (&d
);
1925 case FLOAT_TRUNCATE
:
1926 d
= real_value_truncate (mode
, d
);
1929 /* All this does is change the mode, unless changing
1931 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1932 real_convert (&d
, mode
, &d
);
1935 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1942 real_to_target (tmp
, &d
, GET_MODE (op
));
1943 for (i
= 0; i
< 4; i
++)
1945 real_from_target (&d
, tmp
, mode
);
1951 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1954 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1955 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1956 && GET_MODE_CLASS (mode
) == MODE_INT
1957 && width
<= HOST_BITS_PER_DOUBLE_INT
&& width
> 0)
1959 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1960 operators are intentionally left unspecified (to ease implementation
1961 by target backends), for consistency, this routine implements the
1962 same semantics for constant folding as used by the middle-end. */
1964 /* This was formerly used only for non-IEEE float.
1965 eggert@twinsun.com says it is safe for IEEE also. */
1966 HOST_WIDE_INT xh
, xl
, th
, tl
;
1967 REAL_VALUE_TYPE x
, t
;
1968 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1972 if (REAL_VALUE_ISNAN (x
))
1975 /* Test against the signed upper bound. */
1976 if (width
> HOST_BITS_PER_WIDE_INT
)
1978 th
= ((unsigned HOST_WIDE_INT
) 1
1979 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1985 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1987 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1988 if (REAL_VALUES_LESS (t
, x
))
1995 /* Test against the signed lower bound. */
1996 if (width
> HOST_BITS_PER_WIDE_INT
)
1998 th
= (unsigned HOST_WIDE_INT
) (-1)
1999 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
2005 tl
= (unsigned HOST_WIDE_INT
) (-1) << (width
- 1);
2007 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
2008 if (REAL_VALUES_LESS (x
, t
))
2014 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
2018 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
2021 /* Test against the unsigned upper bound. */
2022 if (width
== HOST_BITS_PER_DOUBLE_INT
)
2027 else if (width
>= HOST_BITS_PER_WIDE_INT
)
2029 th
= ((unsigned HOST_WIDE_INT
) 1
2030 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
2036 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
2038 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
2039 if (REAL_VALUES_LESS (t
, x
))
2046 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
2052 return immed_double_const (xl
, xh
, mode
);
2058 /* Subroutine of simplify_binary_operation to simplify a binary operation
2059 CODE that can commute with byte swapping, with result mode MODE and
2060 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2061 Return zero if no simplification or canonicalization is possible. */
2064 simplify_byte_swapping_operation (enum rtx_code code
, enum machine_mode mode
,
2069 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2070 if (GET_CODE (op0
) == BSWAP
2071 && (CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
)))
2073 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2074 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2075 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2078 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2079 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2081 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2082 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2088 /* Subroutine of simplify_binary_operation to simplify a commutative,
2089 associative binary operation CODE with result mode MODE, operating
2090 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2091 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2092 canonicalization is possible. */
2095 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
2100 /* Linearize the operator to the left. */
2101 if (GET_CODE (op1
) == code
)
2103 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2104 if (GET_CODE (op0
) == code
)
2106 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2107 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2110 /* "a op (b op c)" becomes "(b op c) op a". */
2111 if (! swap_commutative_operands_p (op1
, op0
))
2112 return simplify_gen_binary (code
, mode
, op1
, op0
);
2119 if (GET_CODE (op0
) == code
)
2121 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2122 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2124 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2125 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2128 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2129 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2131 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2133 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2134 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2136 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2143 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2144 and OP1. Return 0 if no simplification is possible.
2146 Don't use this for relational operations such as EQ or LT.
2147 Use simplify_relational_operation instead. */
2149 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2152 rtx trueop0
, trueop1
;
2155 /* Relational operations don't work here. We must know the mode
2156 of the operands in order to do the comparison correctly.
2157 Assuming a full word can give incorrect results.
2158 Consider comparing 128 with -128 in QImode. */
2159 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2160 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2162 /* Make sure the constant is second. */
2163 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2164 && swap_commutative_operands_p (op0
, op1
))
2166 tem
= op0
, op0
= op1
, op1
= tem
;
2169 trueop0
= avoid_constant_pool_reference (op0
);
2170 trueop1
= avoid_constant_pool_reference (op1
);
2172 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2175 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2178 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2179 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2180 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2181 actual constants. */
2184 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
2185 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2187 rtx tem
, reversed
, opleft
, opright
;
2189 unsigned int width
= GET_MODE_PRECISION (mode
);
2191 /* Even if we can't compute a constant result,
2192 there are some cases worth simplifying. */
2197 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2198 when x is NaN, infinite, or finite and nonzero. They aren't
2199 when x is -0 and the rounding mode is not towards -infinity,
2200 since (-0) + 0 is then 0. */
2201 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2204 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2205 transformations are safe even for IEEE. */
2206 if (GET_CODE (op0
) == NEG
)
2207 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2208 else if (GET_CODE (op1
) == NEG
)
2209 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2211 /* (~a) + 1 -> -a */
2212 if (INTEGRAL_MODE_P (mode
)
2213 && GET_CODE (op0
) == NOT
2214 && trueop1
== const1_rtx
)
2215 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2217 /* Handle both-operands-constant cases. We can only add
2218 CONST_INTs to constants since the sum of relocatable symbols
2219 can't be handled by most assemblers. Don't add CONST_INT
2220 to CONST_INT since overflow won't be computed properly if wider
2221 than HOST_BITS_PER_WIDE_INT. */
2223 if ((GET_CODE (op0
) == CONST
2224 || GET_CODE (op0
) == SYMBOL_REF
2225 || GET_CODE (op0
) == LABEL_REF
)
2226 && CONST_INT_P (op1
))
2227 return plus_constant (mode
, op0
, INTVAL (op1
));
2228 else if ((GET_CODE (op1
) == CONST
2229 || GET_CODE (op1
) == SYMBOL_REF
2230 || GET_CODE (op1
) == LABEL_REF
)
2231 && CONST_INT_P (op0
))
2232 return plus_constant (mode
, op1
, INTVAL (op0
));
2234 /* See if this is something like X * C - X or vice versa or
2235 if the multiplication is written as a shift. If so, we can
2236 distribute and make a new multiply, shift, or maybe just
2237 have X (if C is 2 in the example above). But don't make
2238 something more expensive than we had before. */
2240 if (SCALAR_INT_MODE_P (mode
))
2242 double_int coeff0
, coeff1
;
2243 rtx lhs
= op0
, rhs
= op1
;
2245 coeff0
= double_int_one
;
2246 coeff1
= double_int_one
;
2248 if (GET_CODE (lhs
) == NEG
)
2250 coeff0
= double_int_minus_one
;
2251 lhs
= XEXP (lhs
, 0);
2253 else if (GET_CODE (lhs
) == MULT
2254 && CONST_INT_P (XEXP (lhs
, 1)))
2256 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
2257 lhs
= XEXP (lhs
, 0);
2259 else if (GET_CODE (lhs
) == ASHIFT
2260 && CONST_INT_P (XEXP (lhs
, 1))
2261 && INTVAL (XEXP (lhs
, 1)) >= 0
2262 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2264 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2265 lhs
= XEXP (lhs
, 0);
2268 if (GET_CODE (rhs
) == NEG
)
2270 coeff1
= double_int_minus_one
;
2271 rhs
= XEXP (rhs
, 0);
2273 else if (GET_CODE (rhs
) == MULT
2274 && CONST_INT_P (XEXP (rhs
, 1)))
2276 coeff1
= double_int::from_shwi (INTVAL (XEXP (rhs
, 1)));
2277 rhs
= XEXP (rhs
, 0);
2279 else if (GET_CODE (rhs
) == ASHIFT
2280 && CONST_INT_P (XEXP (rhs
, 1))
2281 && INTVAL (XEXP (rhs
, 1)) >= 0
2282 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2284 coeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2285 rhs
= XEXP (rhs
, 0);
2288 if (rtx_equal_p (lhs
, rhs
))
2290 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2293 bool speed
= optimize_function_for_speed_p (cfun
);
2295 val
= coeff0
+ coeff1
;
2296 coeff
= immed_double_int_const (val
, mode
);
2298 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2299 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2304 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2305 if (CONST_SCALAR_INT_P (op1
)
2306 && GET_CODE (op0
) == XOR
2307 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2308 && mode_signbit_p (mode
, op1
))
2309 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2310 simplify_gen_binary (XOR
, mode
, op1
,
2313 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2314 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2315 && GET_CODE (op0
) == MULT
2316 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2320 in1
= XEXP (XEXP (op0
, 0), 0);
2321 in2
= XEXP (op0
, 1);
2322 return simplify_gen_binary (MINUS
, mode
, op1
,
2323 simplify_gen_binary (MULT
, mode
,
2327 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2328 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2330 if (COMPARISON_P (op0
)
2331 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2332 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2333 && (reversed
= reversed_comparison (op0
, mode
)))
2335 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2337 /* If one of the operands is a PLUS or a MINUS, see if we can
2338 simplify this by the associative law.
2339 Don't use the associative law for floating point.
2340 The inaccuracy makes it nonassociative,
2341 and subtle programs can break if operations are associated. */
2343 if (INTEGRAL_MODE_P (mode
)
2344 && (plus_minus_operand_p (op0
)
2345 || plus_minus_operand_p (op1
))
2346 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2349 /* Reassociate floating point addition only when the user
2350 specifies associative math operations. */
2351 if (FLOAT_MODE_P (mode
)
2352 && flag_associative_math
)
2354 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2361 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2362 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2363 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2364 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2366 rtx xop00
= XEXP (op0
, 0);
2367 rtx xop10
= XEXP (op1
, 0);
2370 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2372 if (REG_P (xop00
) && REG_P (xop10
)
2373 && GET_MODE (xop00
) == GET_MODE (xop10
)
2374 && REGNO (xop00
) == REGNO (xop10
)
2375 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2376 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2383 /* We can't assume x-x is 0 even with non-IEEE floating point,
2384 but since it is zero except in very strange circumstances, we
2385 will treat it as zero with -ffinite-math-only. */
2386 if (rtx_equal_p (trueop0
, trueop1
)
2387 && ! side_effects_p (op0
)
2388 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2389 return CONST0_RTX (mode
);
2391 /* Change subtraction from zero into negation. (0 - x) is the
2392 same as -x when x is NaN, infinite, or finite and nonzero.
2393 But if the mode has signed zeros, and does not round towards
2394 -infinity, then 0 - 0 is 0, not -0. */
2395 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2396 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2398 /* (-1 - a) is ~a. */
2399 if (trueop0
== constm1_rtx
)
2400 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2402 /* Subtracting 0 has no effect unless the mode has signed zeros
2403 and supports rounding towards -infinity. In such a case,
2405 if (!(HONOR_SIGNED_ZEROS (mode
)
2406 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2407 && trueop1
== CONST0_RTX (mode
))
2410 /* See if this is something like X * C - X or vice versa or
2411 if the multiplication is written as a shift. If so, we can
2412 distribute and make a new multiply, shift, or maybe just
2413 have X (if C is 2 in the example above). But don't make
2414 something more expensive than we had before. */
2416 if (SCALAR_INT_MODE_P (mode
))
2418 double_int coeff0
, negcoeff1
;
2419 rtx lhs
= op0
, rhs
= op1
;
2421 coeff0
= double_int_one
;
2422 negcoeff1
= double_int_minus_one
;
2424 if (GET_CODE (lhs
) == NEG
)
2426 coeff0
= double_int_minus_one
;
2427 lhs
= XEXP (lhs
, 0);
2429 else if (GET_CODE (lhs
) == MULT
2430 && CONST_INT_P (XEXP (lhs
, 1)))
2432 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
2433 lhs
= XEXP (lhs
, 0);
2435 else if (GET_CODE (lhs
) == ASHIFT
2436 && CONST_INT_P (XEXP (lhs
, 1))
2437 && INTVAL (XEXP (lhs
, 1)) >= 0
2438 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2440 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2441 lhs
= XEXP (lhs
, 0);
2444 if (GET_CODE (rhs
) == NEG
)
2446 negcoeff1
= double_int_one
;
2447 rhs
= XEXP (rhs
, 0);
2449 else if (GET_CODE (rhs
) == MULT
2450 && CONST_INT_P (XEXP (rhs
, 1)))
2452 negcoeff1
= double_int::from_shwi (-INTVAL (XEXP (rhs
, 1)));
2453 rhs
= XEXP (rhs
, 0);
2455 else if (GET_CODE (rhs
) == ASHIFT
2456 && CONST_INT_P (XEXP (rhs
, 1))
2457 && INTVAL (XEXP (rhs
, 1)) >= 0
2458 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2460 negcoeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2461 negcoeff1
= -negcoeff1
;
2462 rhs
= XEXP (rhs
, 0);
2465 if (rtx_equal_p (lhs
, rhs
))
2467 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2470 bool speed
= optimize_function_for_speed_p (cfun
);
2472 val
= coeff0
+ negcoeff1
;
2473 coeff
= immed_double_int_const (val
, mode
);
2475 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2476 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2481 /* (a - (-b)) -> (a + b). True even for IEEE. */
2482 if (GET_CODE (op1
) == NEG
)
2483 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2485 /* (-x - c) may be simplified as (-c - x). */
2486 if (GET_CODE (op0
) == NEG
2487 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2489 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2491 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2494 /* Don't let a relocatable value get a negative coeff. */
2495 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2496 return simplify_gen_binary (PLUS
, mode
,
2498 neg_const_int (mode
, op1
));
2500 /* (x - (x & y)) -> (x & ~y) */
2501 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2503 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2505 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2506 GET_MODE (XEXP (op1
, 1)));
2507 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2509 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2511 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2512 GET_MODE (XEXP (op1
, 0)));
2513 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2517 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2518 by reversing the comparison code if valid. */
2519 if (STORE_FLAG_VALUE
== 1
2520 && trueop0
== const1_rtx
2521 && COMPARISON_P (op1
)
2522 && (reversed
= reversed_comparison (op1
, mode
)))
2525 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2526 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2527 && GET_CODE (op1
) == MULT
2528 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2532 in1
= XEXP (XEXP (op1
, 0), 0);
2533 in2
= XEXP (op1
, 1);
2534 return simplify_gen_binary (PLUS
, mode
,
2535 simplify_gen_binary (MULT
, mode
,
2540 /* Canonicalize (minus (neg A) (mult B C)) to
2541 (minus (mult (neg B) C) A). */
2542 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2543 && GET_CODE (op1
) == MULT
2544 && GET_CODE (op0
) == NEG
)
2548 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2549 in2
= XEXP (op1
, 1);
2550 return simplify_gen_binary (MINUS
, mode
,
2551 simplify_gen_binary (MULT
, mode
,
2556 /* If one of the operands is a PLUS or a MINUS, see if we can
2557 simplify this by the associative law. This will, for example,
2558 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2559 Don't use the associative law for floating point.
2560 The inaccuracy makes it nonassociative,
2561 and subtle programs can break if operations are associated. */
2563 if (INTEGRAL_MODE_P (mode
)
2564 && (plus_minus_operand_p (op0
)
2565 || plus_minus_operand_p (op1
))
2566 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2571 if (trueop1
== constm1_rtx
)
2572 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2574 if (GET_CODE (op0
) == NEG
)
2576 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2577 /* If op1 is a MULT as well and simplify_unary_operation
2578 just moved the NEG to the second operand, simplify_gen_binary
2579 below could through simplify_associative_operation move
2580 the NEG around again and recurse endlessly. */
2582 && GET_CODE (op1
) == MULT
2583 && GET_CODE (temp
) == MULT
2584 && XEXP (op1
, 0) == XEXP (temp
, 0)
2585 && GET_CODE (XEXP (temp
, 1)) == NEG
2586 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2589 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2591 if (GET_CODE (op1
) == NEG
)
2593 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2594 /* If op0 is a MULT as well and simplify_unary_operation
2595 just moved the NEG to the second operand, simplify_gen_binary
2596 below could through simplify_associative_operation move
2597 the NEG around again and recurse endlessly. */
2599 && GET_CODE (op0
) == MULT
2600 && GET_CODE (temp
) == MULT
2601 && XEXP (op0
, 0) == XEXP (temp
, 0)
2602 && GET_CODE (XEXP (temp
, 1)) == NEG
2603 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2606 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2609 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2610 x is NaN, since x * 0 is then also NaN. Nor is it valid
2611 when the mode has signed zeros, since multiplying a negative
2612 number by 0 will give -0, not 0. */
2613 if (!HONOR_NANS (mode
)
2614 && !HONOR_SIGNED_ZEROS (mode
)
2615 && trueop1
== CONST0_RTX (mode
)
2616 && ! side_effects_p (op0
))
2619 /* In IEEE floating point, x*1 is not equivalent to x for
2621 if (!HONOR_SNANS (mode
)
2622 && trueop1
== CONST1_RTX (mode
))
2625 /* Convert multiply by constant power of two into shift unless
2626 we are still generating RTL. This test is a kludge. */
2627 if (CONST_INT_P (trueop1
)
2628 && (val
= exact_log2 (UINTVAL (trueop1
))) >= 0
2629 /* If the mode is larger than the host word size, and the
2630 uppermost bit is set, then this isn't a power of two due
2631 to implicit sign extension. */
2632 && (width
<= HOST_BITS_PER_WIDE_INT
2633 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2634 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2636 /* Likewise for multipliers wider than a word. */
2637 if (CONST_DOUBLE_AS_INT_P (trueop1
)
2638 && GET_MODE (op0
) == mode
2639 && CONST_DOUBLE_LOW (trueop1
) == 0
2640 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0
2641 && (val
< HOST_BITS_PER_DOUBLE_INT
- 1
2642 || GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_DOUBLE_INT
))
2643 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2644 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2646 /* x*2 is x+x and x*(-1) is -x */
2647 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2648 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2649 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2650 && GET_MODE (op0
) == mode
)
2653 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2655 if (REAL_VALUES_EQUAL (d
, dconst2
))
2656 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2658 if (!HONOR_SNANS (mode
)
2659 && REAL_VALUES_EQUAL (d
, dconstm1
))
2660 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2663 /* Optimize -x * -x as x * x. */
2664 if (FLOAT_MODE_P (mode
)
2665 && GET_CODE (op0
) == NEG
2666 && GET_CODE (op1
) == NEG
2667 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2668 && !side_effects_p (XEXP (op0
, 0)))
2669 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2671 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2672 if (SCALAR_FLOAT_MODE_P (mode
)
2673 && GET_CODE (op0
) == ABS
2674 && GET_CODE (op1
) == ABS
2675 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2676 && !side_effects_p (XEXP (op0
, 0)))
2677 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2679 /* Reassociate multiplication, but for floating point MULTs
2680 only when the user specifies unsafe math optimizations. */
2681 if (! FLOAT_MODE_P (mode
)
2682 || flag_unsafe_math_optimizations
)
2684 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2691 if (trueop1
== CONST0_RTX (mode
))
2693 if (INTEGRAL_MODE_P (mode
)
2694 && trueop1
== CONSTM1_RTX (mode
)
2695 && !side_effects_p (op0
))
2697 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2699 /* A | (~A) -> -1 */
2700 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2701 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2702 && ! side_effects_p (op0
)
2703 && SCALAR_INT_MODE_P (mode
))
2706 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2707 if (CONST_INT_P (op1
)
2708 && HWI_COMPUTABLE_MODE_P (mode
)
2709 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2710 && !side_effects_p (op0
))
2713 /* Canonicalize (X & C1) | C2. */
2714 if (GET_CODE (op0
) == AND
2715 && CONST_INT_P (trueop1
)
2716 && CONST_INT_P (XEXP (op0
, 1)))
2718 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2719 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2720 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2722 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2724 && !side_effects_p (XEXP (op0
, 0)))
2727 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2728 if (((c1
|c2
) & mask
) == mask
)
2729 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2731 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2732 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2734 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2735 gen_int_mode (c1
& ~c2
, mode
));
2736 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2740 /* Convert (A & B) | A to A. */
2741 if (GET_CODE (op0
) == AND
2742 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2743 || rtx_equal_p (XEXP (op0
, 1), op1
))
2744 && ! side_effects_p (XEXP (op0
, 0))
2745 && ! side_effects_p (XEXP (op0
, 1)))
2748 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2749 mode size to (rotate A CX). */
2751 if (GET_CODE (op1
) == ASHIFT
2752 || GET_CODE (op1
) == SUBREG
)
2763 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2764 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2765 && CONST_INT_P (XEXP (opleft
, 1))
2766 && CONST_INT_P (XEXP (opright
, 1))
2767 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2768 == GET_MODE_PRECISION (mode
)))
2769 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2771 /* Same, but for ashift that has been "simplified" to a wider mode
2772 by simplify_shift_const. */
2774 if (GET_CODE (opleft
) == SUBREG
2775 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2776 && GET_CODE (opright
) == LSHIFTRT
2777 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2778 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2779 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2780 && (GET_MODE_SIZE (GET_MODE (opleft
))
2781 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2782 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2783 SUBREG_REG (XEXP (opright
, 0)))
2784 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2785 && CONST_INT_P (XEXP (opright
, 1))
2786 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2787 == GET_MODE_PRECISION (mode
)))
2788 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2789 XEXP (SUBREG_REG (opleft
), 1));
2791 /* If we have (ior (and (X C1) C2)), simplify this by making
2792 C1 as small as possible if C1 actually changes. */
2793 if (CONST_INT_P (op1
)
2794 && (HWI_COMPUTABLE_MODE_P (mode
)
2795 || INTVAL (op1
) > 0)
2796 && GET_CODE (op0
) == AND
2797 && CONST_INT_P (XEXP (op0
, 1))
2798 && CONST_INT_P (op1
)
2799 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2800 return simplify_gen_binary (IOR
, mode
,
2802 (AND
, mode
, XEXP (op0
, 0),
2803 GEN_INT (UINTVAL (XEXP (op0
, 1))
2807 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2808 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2809 the PLUS does not affect any of the bits in OP1: then we can do
2810 the IOR as a PLUS and we can associate. This is valid if OP1
2811 can be safely shifted left C bits. */
2812 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2813 && GET_CODE (XEXP (op0
, 0)) == PLUS
2814 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2815 && CONST_INT_P (XEXP (op0
, 1))
2816 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2818 int count
= INTVAL (XEXP (op0
, 1));
2819 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2821 if (mask
>> count
== INTVAL (trueop1
)
2822 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2823 return simplify_gen_binary (ASHIFTRT
, mode
,
2824 plus_constant (mode
, XEXP (op0
, 0),
2829 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2833 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2839 if (trueop1
== CONST0_RTX (mode
))
2841 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2842 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2843 if (rtx_equal_p (trueop0
, trueop1
)
2844 && ! side_effects_p (op0
)
2845 && GET_MODE_CLASS (mode
) != MODE_CC
)
2846 return CONST0_RTX (mode
);
2848 /* Canonicalize XOR of the most significant bit to PLUS. */
2849 if (CONST_SCALAR_INT_P (op1
)
2850 && mode_signbit_p (mode
, op1
))
2851 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2852 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2853 if (CONST_SCALAR_INT_P (op1
)
2854 && GET_CODE (op0
) == PLUS
2855 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2856 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2857 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2858 simplify_gen_binary (XOR
, mode
, op1
,
2861 /* If we are XORing two things that have no bits in common,
2862 convert them into an IOR. This helps to detect rotation encoded
2863 using those methods and possibly other simplifications. */
2865 if (HWI_COMPUTABLE_MODE_P (mode
)
2866 && (nonzero_bits (op0
, mode
)
2867 & nonzero_bits (op1
, mode
)) == 0)
2868 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2870 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2871 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2874 int num_negated
= 0;
2876 if (GET_CODE (op0
) == NOT
)
2877 num_negated
++, op0
= XEXP (op0
, 0);
2878 if (GET_CODE (op1
) == NOT
)
2879 num_negated
++, op1
= XEXP (op1
, 0);
2881 if (num_negated
== 2)
2882 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2883 else if (num_negated
== 1)
2884 return simplify_gen_unary (NOT
, mode
,
2885 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2889 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2890 correspond to a machine insn or result in further simplifications
2891 if B is a constant. */
2893 if (GET_CODE (op0
) == AND
2894 && rtx_equal_p (XEXP (op0
, 1), op1
)
2895 && ! side_effects_p (op1
))
2896 return simplify_gen_binary (AND
, mode
,
2897 simplify_gen_unary (NOT
, mode
,
2898 XEXP (op0
, 0), mode
),
2901 else if (GET_CODE (op0
) == AND
2902 && rtx_equal_p (XEXP (op0
, 0), op1
)
2903 && ! side_effects_p (op1
))
2904 return simplify_gen_binary (AND
, mode
,
2905 simplify_gen_unary (NOT
, mode
,
2906 XEXP (op0
, 1), mode
),
2909 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2910 we can transform like this:
2911 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2912 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2913 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2914 Attempt a few simplifications when B and C are both constants. */
2915 if (GET_CODE (op0
) == AND
2916 && CONST_INT_P (op1
)
2917 && CONST_INT_P (XEXP (op0
, 1)))
2919 rtx a
= XEXP (op0
, 0);
2920 rtx b
= XEXP (op0
, 1);
2922 HOST_WIDE_INT bval
= INTVAL (b
);
2923 HOST_WIDE_INT cval
= INTVAL (c
);
2926 = simplify_binary_operation (AND
, mode
,
2927 simplify_gen_unary (NOT
, mode
, a
, mode
),
2929 if ((~cval
& bval
) == 0)
2931 /* Try to simplify ~A&C | ~B&C. */
2932 if (na_c
!= NULL_RTX
)
2933 return simplify_gen_binary (IOR
, mode
, na_c
,
2934 GEN_INT (~bval
& cval
));
2938 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2939 if (na_c
== const0_rtx
)
2941 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2942 GEN_INT (~cval
& bval
));
2943 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2944 GEN_INT (~bval
& cval
));
2949 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2950 comparison if STORE_FLAG_VALUE is 1. */
2951 if (STORE_FLAG_VALUE
== 1
2952 && trueop1
== const1_rtx
2953 && COMPARISON_P (op0
)
2954 && (reversed
= reversed_comparison (op0
, mode
)))
2957 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2958 is (lt foo (const_int 0)), so we can perform the above
2959 simplification if STORE_FLAG_VALUE is 1. */
2961 if (STORE_FLAG_VALUE
== 1
2962 && trueop1
== const1_rtx
2963 && GET_CODE (op0
) == LSHIFTRT
2964 && CONST_INT_P (XEXP (op0
, 1))
2965 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2966 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2968 /* (xor (comparison foo bar) (const_int sign-bit))
2969 when STORE_FLAG_VALUE is the sign bit. */
2970 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2971 && trueop1
== const_true_rtx
2972 && COMPARISON_P (op0
)
2973 && (reversed
= reversed_comparison (op0
, mode
)))
2976 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2980 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2986 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2988 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2990 if (HWI_COMPUTABLE_MODE_P (mode
))
2992 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2993 HOST_WIDE_INT nzop1
;
2994 if (CONST_INT_P (trueop1
))
2996 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2997 /* If we are turning off bits already known off in OP0, we need
2999 if ((nzop0
& ~val1
) == 0)
3002 nzop1
= nonzero_bits (trueop1
, mode
);
3003 /* If we are clearing all the nonzero bits, the result is zero. */
3004 if ((nzop1
& nzop0
) == 0
3005 && !side_effects_p (op0
) && !side_effects_p (op1
))
3006 return CONST0_RTX (mode
);
3008 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3009 && GET_MODE_CLASS (mode
) != MODE_CC
)
3012 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3013 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3014 && ! side_effects_p (op0
)
3015 && GET_MODE_CLASS (mode
) != MODE_CC
)
3016 return CONST0_RTX (mode
);
3018 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3019 there are no nonzero bits of C outside of X's mode. */
3020 if ((GET_CODE (op0
) == SIGN_EXTEND
3021 || GET_CODE (op0
) == ZERO_EXTEND
)
3022 && CONST_INT_P (trueop1
)
3023 && HWI_COMPUTABLE_MODE_P (mode
)
3024 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3025 & UINTVAL (trueop1
)) == 0)
3027 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3028 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3029 gen_int_mode (INTVAL (trueop1
),
3031 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3034 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3035 we might be able to further simplify the AND with X and potentially
3036 remove the truncation altogether. */
3037 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3039 rtx x
= XEXP (op0
, 0);
3040 enum machine_mode xmode
= GET_MODE (x
);
3041 tem
= simplify_gen_binary (AND
, xmode
, x
,
3042 gen_int_mode (INTVAL (trueop1
), xmode
));
3043 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3046 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3047 if (GET_CODE (op0
) == IOR
3048 && CONST_INT_P (trueop1
)
3049 && CONST_INT_P (XEXP (op0
, 1)))
3051 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3052 return simplify_gen_binary (IOR
, mode
,
3053 simplify_gen_binary (AND
, mode
,
3054 XEXP (op0
, 0), op1
),
3055 gen_int_mode (tmp
, mode
));
3058 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3059 insn (and may simplify more). */
3060 if (GET_CODE (op0
) == XOR
3061 && rtx_equal_p (XEXP (op0
, 0), op1
)
3062 && ! side_effects_p (op1
))
3063 return simplify_gen_binary (AND
, mode
,
3064 simplify_gen_unary (NOT
, mode
,
3065 XEXP (op0
, 1), mode
),
3068 if (GET_CODE (op0
) == XOR
3069 && rtx_equal_p (XEXP (op0
, 1), op1
)
3070 && ! side_effects_p (op1
))
3071 return simplify_gen_binary (AND
, mode
,
3072 simplify_gen_unary (NOT
, mode
,
3073 XEXP (op0
, 0), mode
),
3076 /* Similarly for (~(A ^ B)) & A. */
3077 if (GET_CODE (op0
) == NOT
3078 && GET_CODE (XEXP (op0
, 0)) == XOR
3079 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3080 && ! side_effects_p (op1
))
3081 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3083 if (GET_CODE (op0
) == NOT
3084 && GET_CODE (XEXP (op0
, 0)) == XOR
3085 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3086 && ! side_effects_p (op1
))
3087 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3089 /* Convert (A | B) & A to A. */
3090 if (GET_CODE (op0
) == IOR
3091 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3092 || rtx_equal_p (XEXP (op0
, 1), op1
))
3093 && ! side_effects_p (XEXP (op0
, 0))
3094 && ! side_effects_p (XEXP (op0
, 1)))
3097 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3098 ((A & N) + B) & M -> (A + B) & M
3099 Similarly if (N & M) == 0,
3100 ((A | N) + B) & M -> (A + B) & M
3101 and for - instead of + and/or ^ instead of |.
3102 Also, if (N & M) == 0, then
3103 (A +- N) & M -> A & M. */
3104 if (CONST_INT_P (trueop1
)
3105 && HWI_COMPUTABLE_MODE_P (mode
)
3106 && ~UINTVAL (trueop1
)
3107 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3108 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3113 pmop
[0] = XEXP (op0
, 0);
3114 pmop
[1] = XEXP (op0
, 1);
3116 if (CONST_INT_P (pmop
[1])
3117 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3118 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3120 for (which
= 0; which
< 2; which
++)
3123 switch (GET_CODE (tem
))
3126 if (CONST_INT_P (XEXP (tem
, 1))
3127 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3128 == UINTVAL (trueop1
))
3129 pmop
[which
] = XEXP (tem
, 0);
3133 if (CONST_INT_P (XEXP (tem
, 1))
3134 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3135 pmop
[which
] = XEXP (tem
, 0);
3142 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3144 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3146 return simplify_gen_binary (code
, mode
, tem
, op1
);
3150 /* (and X (ior (not X) Y) -> (and X Y) */
3151 if (GET_CODE (op1
) == IOR
3152 && GET_CODE (XEXP (op1
, 0)) == NOT
3153 && op0
== XEXP (XEXP (op1
, 0), 0))
3154 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3156 /* (and (ior (not X) Y) X) -> (and X Y) */
3157 if (GET_CODE (op0
) == IOR
3158 && GET_CODE (XEXP (op0
, 0)) == NOT
3159 && op1
== XEXP (XEXP (op0
, 0), 0))
3160 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3162 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3166 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3172 /* 0/x is 0 (or x&0 if x has side-effects). */
3173 if (trueop0
== CONST0_RTX (mode
))
3175 if (side_effects_p (op1
))
3176 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3180 if (trueop1
== CONST1_RTX (mode
))
3182 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3186 /* Convert divide by power of two into shift. */
3187 if (CONST_INT_P (trueop1
)
3188 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3189 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3193 /* Handle floating point and integers separately. */
3194 if (SCALAR_FLOAT_MODE_P (mode
))
3196 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3197 safe for modes with NaNs, since 0.0 / 0.0 will then be
3198 NaN rather than 0.0. Nor is it safe for modes with signed
3199 zeros, since dividing 0 by a negative number gives -0.0 */
3200 if (trueop0
== CONST0_RTX (mode
)
3201 && !HONOR_NANS (mode
)
3202 && !HONOR_SIGNED_ZEROS (mode
)
3203 && ! side_effects_p (op1
))
3206 if (trueop1
== CONST1_RTX (mode
)
3207 && !HONOR_SNANS (mode
))
3210 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3211 && trueop1
!= CONST0_RTX (mode
))
3214 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
3217 if (REAL_VALUES_EQUAL (d
, dconstm1
)
3218 && !HONOR_SNANS (mode
))
3219 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3221 /* Change FP division by a constant into multiplication.
3222 Only do this with -freciprocal-math. */
3223 if (flag_reciprocal_math
3224 && !REAL_VALUES_EQUAL (d
, dconst0
))
3226 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3227 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3228 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3232 else if (SCALAR_INT_MODE_P (mode
))
3234 /* 0/x is 0 (or x&0 if x has side-effects). */
3235 if (trueop0
== CONST0_RTX (mode
)
3236 && !cfun
->can_throw_non_call_exceptions
)
3238 if (side_effects_p (op1
))
3239 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3243 if (trueop1
== CONST1_RTX (mode
))
3245 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3250 if (trueop1
== constm1_rtx
)
3252 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3254 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3260 /* 0%x is 0 (or x&0 if x has side-effects). */
3261 if (trueop0
== CONST0_RTX (mode
))
3263 if (side_effects_p (op1
))
3264 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3267 /* x%1 is 0 (of x&0 if x has side-effects). */
3268 if (trueop1
== CONST1_RTX (mode
))
3270 if (side_effects_p (op0
))
3271 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3272 return CONST0_RTX (mode
);
3274 /* Implement modulus by power of two as AND. */
3275 if (CONST_INT_P (trueop1
)
3276 && exact_log2 (UINTVAL (trueop1
)) > 0)
3277 return simplify_gen_binary (AND
, mode
, op0
,
3278 GEN_INT (INTVAL (op1
) - 1));
3282 /* 0%x is 0 (or x&0 if x has side-effects). */
3283 if (trueop0
== CONST0_RTX (mode
))
3285 if (side_effects_p (op1
))
3286 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3289 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3290 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3292 if (side_effects_p (op0
))
3293 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3294 return CONST0_RTX (mode
);
3300 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3301 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3302 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3304 if (CONST_INT_P (trueop1
)
3305 && IN_RANGE (INTVAL (trueop1
),
3306 GET_MODE_BITSIZE (mode
) / 2 + (code
== ROTATE
),
3307 GET_MODE_BITSIZE (mode
) - 1))
3308 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3309 mode
, op0
, GEN_INT (GET_MODE_BITSIZE (mode
)
3310 - INTVAL (trueop1
)));
3313 if (trueop1
== CONST0_RTX (mode
))
3315 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3317 /* Rotating ~0 always results in ~0. */
3318 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3319 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3320 && ! side_effects_p (op1
))
3323 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3325 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
3326 if (val
!= INTVAL (op1
))
3327 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3334 if (trueop1
== CONST0_RTX (mode
))
3336 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3338 goto canonicalize_shift
;
3341 if (trueop1
== CONST0_RTX (mode
))
3343 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3345 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3346 if (GET_CODE (op0
) == CLZ
3347 && CONST_INT_P (trueop1
)
3348 && STORE_FLAG_VALUE
== 1
3349 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3351 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3352 unsigned HOST_WIDE_INT zero_val
= 0;
3354 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3355 && zero_val
== GET_MODE_PRECISION (imode
)
3356 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3357 return simplify_gen_relational (EQ
, mode
, imode
,
3358 XEXP (op0
, 0), const0_rtx
);
3360 goto canonicalize_shift
;
3363 if (width
<= HOST_BITS_PER_WIDE_INT
3364 && mode_signbit_p (mode
, trueop1
)
3365 && ! side_effects_p (op0
))
3367 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3369 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3375 if (width
<= HOST_BITS_PER_WIDE_INT
3376 && CONST_INT_P (trueop1
)
3377 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3378 && ! side_effects_p (op0
))
3380 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3382 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3388 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3390 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3392 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3398 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3400 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3402 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3415 /* ??? There are simplifications that can be done. */
3419 if (!VECTOR_MODE_P (mode
))
3421 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3422 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3423 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3424 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3425 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3427 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3428 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3431 /* Extract a scalar element from a nested VEC_SELECT expression
3432 (with optional nested VEC_CONCAT expression). Some targets
3433 (i386) extract scalar element from a vector using chain of
3434 nested VEC_SELECT expressions. When input operand is a memory
3435 operand, this operation can be simplified to a simple scalar
3436 load from an offseted memory address. */
3437 if (GET_CODE (trueop0
) == VEC_SELECT
)
3439 rtx op0
= XEXP (trueop0
, 0);
3440 rtx op1
= XEXP (trueop0
, 1);
3442 enum machine_mode opmode
= GET_MODE (op0
);
3443 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3444 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3446 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3452 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3453 gcc_assert (i
< n_elts
);
3455 /* Select element, pointed by nested selector. */
3456 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3458 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3459 if (GET_CODE (op0
) == VEC_CONCAT
)
3461 rtx op00
= XEXP (op0
, 0);
3462 rtx op01
= XEXP (op0
, 1);
3464 enum machine_mode mode00
, mode01
;
3465 int n_elts00
, n_elts01
;
3467 mode00
= GET_MODE (op00
);
3468 mode01
= GET_MODE (op01
);
3470 /* Find out number of elements of each operand. */
3471 if (VECTOR_MODE_P (mode00
))
3473 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3474 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3479 if (VECTOR_MODE_P (mode01
))
3481 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3482 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3487 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3489 /* Select correct operand of VEC_CONCAT
3490 and adjust selector. */
3491 if (elem
< n_elts01
)
3502 vec
= rtvec_alloc (1);
3503 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3505 tmp
= gen_rtx_fmt_ee (code
, mode
,
3506 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3509 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3510 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3511 return XEXP (trueop0
, 0);
3515 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3516 gcc_assert (GET_MODE_INNER (mode
)
3517 == GET_MODE_INNER (GET_MODE (trueop0
)));
3518 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3520 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3522 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3523 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3524 rtvec v
= rtvec_alloc (n_elts
);
3527 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3528 for (i
= 0; i
< n_elts
; i
++)
3530 rtx x
= XVECEXP (trueop1
, 0, i
);
3532 gcc_assert (CONST_INT_P (x
));
3533 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3537 return gen_rtx_CONST_VECTOR (mode
, v
);
3540 /* Recognize the identity. */
3541 if (GET_MODE (trueop0
) == mode
)
3543 bool maybe_ident
= true;
3544 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3546 rtx j
= XVECEXP (trueop1
, 0, i
);
3547 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3549 maybe_ident
= false;
3557 /* If we build {a,b} then permute it, build the result directly. */
3558 if (XVECLEN (trueop1
, 0) == 2
3559 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3560 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3561 && GET_CODE (trueop0
) == VEC_CONCAT
3562 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3563 && GET_MODE (XEXP (trueop0
, 0)) == mode
3564 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3565 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3567 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3568 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3571 gcc_assert (i0
< 4 && i1
< 4);
3572 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3573 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3575 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3578 if (XVECLEN (trueop1
, 0) == 2
3579 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3580 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3581 && GET_CODE (trueop0
) == VEC_CONCAT
3582 && GET_MODE (trueop0
) == mode
)
3584 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3585 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3588 gcc_assert (i0
< 2 && i1
< 2);
3589 subop0
= XEXP (trueop0
, i0
);
3590 subop1
= XEXP (trueop0
, i1
);
3592 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3596 if (XVECLEN (trueop1
, 0) == 1
3597 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3598 && GET_CODE (trueop0
) == VEC_CONCAT
)
3601 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3603 /* Try to find the element in the VEC_CONCAT. */
3604 while (GET_MODE (vec
) != mode
3605 && GET_CODE (vec
) == VEC_CONCAT
)
3607 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3608 if (offset
< vec_size
)
3609 vec
= XEXP (vec
, 0);
3613 vec
= XEXP (vec
, 1);
3615 vec
= avoid_constant_pool_reference (vec
);
3618 if (GET_MODE (vec
) == mode
)
3622 /* If we select elements in a vec_merge that all come from the same
3623 operand, select from that operand directly. */
3624 if (GET_CODE (op0
) == VEC_MERGE
)
3626 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3627 if (CONST_INT_P (trueop02
))
3629 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3630 bool all_operand0
= true;
3631 bool all_operand1
= true;
3632 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3634 rtx j
= XVECEXP (trueop1
, 0, i
);
3635 if (sel
& (1 << UINTVAL (j
)))
3636 all_operand1
= false;
3638 all_operand0
= false;
3640 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3641 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3642 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3643 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3650 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3651 ? GET_MODE (trueop0
)
3652 : GET_MODE_INNER (mode
));
3653 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3654 ? GET_MODE (trueop1
)
3655 : GET_MODE_INNER (mode
));
3657 gcc_assert (VECTOR_MODE_P (mode
));
3658 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3659 == GET_MODE_SIZE (mode
));
3661 if (VECTOR_MODE_P (op0_mode
))
3662 gcc_assert (GET_MODE_INNER (mode
)
3663 == GET_MODE_INNER (op0_mode
));
3665 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3667 if (VECTOR_MODE_P (op1_mode
))
3668 gcc_assert (GET_MODE_INNER (mode
)
3669 == GET_MODE_INNER (op1_mode
));
3671 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3673 if ((GET_CODE (trueop0
) == CONST_VECTOR
3674 || CONST_SCALAR_INT_P (trueop0
)
3675 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3676 && (GET_CODE (trueop1
) == CONST_VECTOR
3677 || CONST_SCALAR_INT_P (trueop1
)
3678 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3680 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3681 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3682 rtvec v
= rtvec_alloc (n_elts
);
3684 unsigned in_n_elts
= 1;
3686 if (VECTOR_MODE_P (op0_mode
))
3687 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3688 for (i
= 0; i
< n_elts
; i
++)
3692 if (!VECTOR_MODE_P (op0_mode
))
3693 RTVEC_ELT (v
, i
) = trueop0
;
3695 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3699 if (!VECTOR_MODE_P (op1_mode
))
3700 RTVEC_ELT (v
, i
) = trueop1
;
3702 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3707 return gen_rtx_CONST_VECTOR (mode
, v
);
3710 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3711 Restrict the transformation to avoid generating a VEC_SELECT with a
3712 mode unrelated to its operand. */
3713 if (GET_CODE (trueop0
) == VEC_SELECT
3714 && GET_CODE (trueop1
) == VEC_SELECT
3715 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3716 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3718 rtx par0
= XEXP (trueop0
, 1);
3719 rtx par1
= XEXP (trueop1
, 1);
3720 int len0
= XVECLEN (par0
, 0);
3721 int len1
= XVECLEN (par1
, 0);
3722 rtvec vec
= rtvec_alloc (len0
+ len1
);
3723 for (int i
= 0; i
< len0
; i
++)
3724 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3725 for (int i
= 0; i
< len1
; i
++)
3726 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3727 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3728 gen_rtx_PARALLEL (VOIDmode
, vec
));
3741 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3744 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3746 unsigned int width
= GET_MODE_PRECISION (mode
);
3748 if (VECTOR_MODE_P (mode
)
3749 && code
!= VEC_CONCAT
3750 && GET_CODE (op0
) == CONST_VECTOR
3751 && GET_CODE (op1
) == CONST_VECTOR
)
3753 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3754 enum machine_mode op0mode
= GET_MODE (op0
);
3755 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3756 enum machine_mode op1mode
= GET_MODE (op1
);
3757 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3758 rtvec v
= rtvec_alloc (n_elts
);
3761 gcc_assert (op0_n_elts
== n_elts
);
3762 gcc_assert (op1_n_elts
== n_elts
);
3763 for (i
= 0; i
< n_elts
; i
++)
3765 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3766 CONST_VECTOR_ELT (op0
, i
),
3767 CONST_VECTOR_ELT (op1
, i
));
3770 RTVEC_ELT (v
, i
) = x
;
3773 return gen_rtx_CONST_VECTOR (mode
, v
);
3776 if (VECTOR_MODE_P (mode
)
3777 && code
== VEC_CONCAT
3778 && (CONST_SCALAR_INT_P (op0
)
3779 || GET_CODE (op0
) == CONST_FIXED
3780 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3781 && (CONST_SCALAR_INT_P (op1
)
3782 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3783 || GET_CODE (op1
) == CONST_FIXED
))
3785 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3786 rtvec v
= rtvec_alloc (n_elts
);
3788 gcc_assert (n_elts
>= 2);
3791 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3792 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3794 RTVEC_ELT (v
, 0) = op0
;
3795 RTVEC_ELT (v
, 1) = op1
;
3799 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3800 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3803 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3804 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3805 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3807 for (i
= 0; i
< op0_n_elts
; ++i
)
3808 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3809 for (i
= 0; i
< op1_n_elts
; ++i
)
3810 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3813 return gen_rtx_CONST_VECTOR (mode
, v
);
3816 if (SCALAR_FLOAT_MODE_P (mode
)
3817 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3818 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3819 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3830 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3832 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3834 for (i
= 0; i
< 4; i
++)
3851 real_from_target (&r
, tmp0
, mode
);
3852 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3856 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3859 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3860 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3861 real_convert (&f0
, mode
, &f0
);
3862 real_convert (&f1
, mode
, &f1
);
3864 if (HONOR_SNANS (mode
)
3865 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3869 && REAL_VALUES_EQUAL (f1
, dconst0
)
3870 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3873 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3874 && flag_trapping_math
3875 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3877 int s0
= REAL_VALUE_NEGATIVE (f0
);
3878 int s1
= REAL_VALUE_NEGATIVE (f1
);
3883 /* Inf + -Inf = NaN plus exception. */
3888 /* Inf - Inf = NaN plus exception. */
3893 /* Inf / Inf = NaN plus exception. */
3900 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3901 && flag_trapping_math
3902 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3903 || (REAL_VALUE_ISINF (f1
)
3904 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3905 /* Inf * 0 = NaN plus exception. */
3908 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3910 real_convert (&result
, mode
, &value
);
3912 /* Don't constant fold this floating point operation if
3913 the result has overflowed and flag_trapping_math. */
3915 if (flag_trapping_math
3916 && MODE_HAS_INFINITIES (mode
)
3917 && REAL_VALUE_ISINF (result
)
3918 && !REAL_VALUE_ISINF (f0
)
3919 && !REAL_VALUE_ISINF (f1
))
3920 /* Overflow plus exception. */
3923 /* Don't constant fold this floating point operation if the
3924 result may dependent upon the run-time rounding mode and
3925 flag_rounding_math is set, or if GCC's software emulation
3926 is unable to accurately represent the result. */
3928 if ((flag_rounding_math
3929 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3930 && (inexact
|| !real_identical (&result
, &value
)))
3933 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3937 /* We can fold some multi-word operations. */
3938 if (GET_MODE_CLASS (mode
) == MODE_INT
3939 && width
== HOST_BITS_PER_DOUBLE_INT
3940 && (CONST_DOUBLE_AS_INT_P (op0
) || CONST_INT_P (op0
))
3941 && (CONST_DOUBLE_AS_INT_P (op1
) || CONST_INT_P (op1
)))
3943 double_int o0
, o1
, res
, tmp
;
3946 o0
= rtx_to_double_int (op0
);
3947 o1
= rtx_to_double_int (op1
);
3952 /* A - B == A + (-B). */
3955 /* Fall through.... */
3966 res
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
3973 tmp
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
3980 res
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
3987 tmp
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
4021 case LSHIFTRT
: case ASHIFTRT
:
4023 case ROTATE
: case ROTATERT
:
4025 unsigned HOST_WIDE_INT cnt
;
4027 if (SHIFT_COUNT_TRUNCATED
)
4030 o1
.low
&= GET_MODE_PRECISION (mode
) - 1;
4033 if (!o1
.fits_uhwi ()
4034 || o1
.to_uhwi () >= GET_MODE_PRECISION (mode
))
4037 cnt
= o1
.to_uhwi ();
4038 unsigned short prec
= GET_MODE_PRECISION (mode
);
4040 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
4041 res
= o0
.rshift (cnt
, prec
, code
== ASHIFTRT
);
4042 else if (code
== ASHIFT
)
4043 res
= o0
.alshift (cnt
, prec
);
4044 else if (code
== ROTATE
)
4045 res
= o0
.lrotate (cnt
, prec
);
4046 else /* code == ROTATERT */
4047 res
= o0
.rrotate (cnt
, prec
);
4055 return immed_double_int_const (res
, mode
);
4058 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
4059 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
4061 /* Get the integer argument values in two forms:
4062 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4064 arg0
= INTVAL (op0
);
4065 arg1
= INTVAL (op1
);
4067 if (width
< HOST_BITS_PER_WIDE_INT
)
4069 arg0
&= GET_MODE_MASK (mode
);
4070 arg1
&= GET_MODE_MASK (mode
);
4073 if (val_signbit_known_set_p (mode
, arg0s
))
4074 arg0s
|= ~GET_MODE_MASK (mode
);
4077 if (val_signbit_known_set_p (mode
, arg1s
))
4078 arg1s
|= ~GET_MODE_MASK (mode
);
4086 /* Compute the value of the arithmetic. */
4091 val
= arg0s
+ arg1s
;
4095 val
= arg0s
- arg1s
;
4099 val
= arg0s
* arg1s
;
4104 || ((unsigned HOST_WIDE_INT
) arg0s
4105 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4108 val
= arg0s
/ arg1s
;
4113 || ((unsigned HOST_WIDE_INT
) arg0s
4114 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4117 val
= arg0s
% arg1s
;
4122 || ((unsigned HOST_WIDE_INT
) arg0s
4123 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4126 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
4131 || ((unsigned HOST_WIDE_INT
) arg0s
4132 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4135 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
4153 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4154 the value is in range. We can't return any old value for
4155 out-of-range arguments because either the middle-end (via
4156 shift_truncation_mask) or the back-end might be relying on
4157 target-specific knowledge. Nor can we rely on
4158 shift_truncation_mask, since the shift might not be part of an
4159 ashlM3, lshrM3 or ashrM3 instruction. */
4160 if (SHIFT_COUNT_TRUNCATED
)
4161 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
4162 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
4165 val
= (code
== ASHIFT
4166 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
4167 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
4169 /* Sign-extend the result for arithmetic right shifts. */
4170 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
4171 val
|= ((unsigned HOST_WIDE_INT
) (-1)) << (width
- arg1
);
4179 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
4180 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
4188 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
4189 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
4193 /* Do nothing here. */
4197 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
4201 val
= ((unsigned HOST_WIDE_INT
) arg0
4202 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
4206 val
= arg0s
> arg1s
? arg0s
: arg1s
;
4210 val
= ((unsigned HOST_WIDE_INT
) arg0
4211 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
4224 /* ??? There are simplifications that can be done. */
4231 return gen_int_mode (val
, mode
);
4239 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4242 Rather than test for specific case, we do this by a brute-force method
4243 and do all possible simplifications until no more changes occur. Then
4244 we rebuild the operation. */
4246 struct simplify_plus_minus_op_data
4253 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4257 result
= (commutative_operand_precedence (y
)
4258 - commutative_operand_precedence (x
));
4262 /* Group together equal REGs to do more simplification. */
4263 if (REG_P (x
) && REG_P (y
))
4264 return REGNO (x
) > REGNO (y
);
4270 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
4273 struct simplify_plus_minus_op_data ops
[8];
4275 int n_ops
= 2, input_ops
= 2;
4276 int changed
, n_constants
= 0, canonicalized
= 0;
4279 memset (ops
, 0, sizeof ops
);
4281 /* Set up the two operands and then expand them until nothing has been
4282 changed. If we run out of room in our array, give up; this should
4283 almost never happen. */
4288 ops
[1].neg
= (code
== MINUS
);
4294 for (i
= 0; i
< n_ops
; i
++)
4296 rtx this_op
= ops
[i
].op
;
4297 int this_neg
= ops
[i
].neg
;
4298 enum rtx_code this_code
= GET_CODE (this_op
);
4307 ops
[n_ops
].op
= XEXP (this_op
, 1);
4308 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4311 ops
[i
].op
= XEXP (this_op
, 0);
4314 canonicalized
|= this_neg
;
4318 ops
[i
].op
= XEXP (this_op
, 0);
4319 ops
[i
].neg
= ! this_neg
;
4326 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4327 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4328 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4330 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4331 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4332 ops
[n_ops
].neg
= this_neg
;
4340 /* ~a -> (-a - 1) */
4343 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4344 ops
[n_ops
++].neg
= this_neg
;
4345 ops
[i
].op
= XEXP (this_op
, 0);
4346 ops
[i
].neg
= !this_neg
;
4356 ops
[i
].op
= neg_const_int (mode
, this_op
);
4370 if (n_constants
> 1)
4373 gcc_assert (n_ops
>= 2);
4375 /* If we only have two operands, we can avoid the loops. */
4378 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4381 /* Get the two operands. Be careful with the order, especially for
4382 the cases where code == MINUS. */
4383 if (ops
[0].neg
&& ops
[1].neg
)
4385 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4388 else if (ops
[0].neg
)
4399 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4402 /* Now simplify each pair of operands until nothing changes. */
4405 /* Insertion sort is good enough for an eight-element array. */
4406 for (i
= 1; i
< n_ops
; i
++)
4408 struct simplify_plus_minus_op_data save
;
4410 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4416 ops
[j
+ 1] = ops
[j
];
4417 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4422 for (i
= n_ops
- 1; i
> 0; i
--)
4423 for (j
= i
- 1; j
>= 0; j
--)
4425 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4426 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4428 if (lhs
!= 0 && rhs
!= 0)
4430 enum rtx_code ncode
= PLUS
;
4436 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4438 else if (swap_commutative_operands_p (lhs
, rhs
))
4439 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4441 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4442 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4444 rtx tem_lhs
, tem_rhs
;
4446 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4447 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4448 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4450 if (tem
&& !CONSTANT_P (tem
))
4451 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4454 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4456 /* Reject "simplifications" that just wrap the two
4457 arguments in a CONST. Failure to do so can result
4458 in infinite recursion with simplify_binary_operation
4459 when it calls us to simplify CONST operations. */
4461 && ! (GET_CODE (tem
) == CONST
4462 && GET_CODE (XEXP (tem
, 0)) == ncode
4463 && XEXP (XEXP (tem
, 0), 0) == lhs
4464 && XEXP (XEXP (tem
, 0), 1) == rhs
))
4467 if (GET_CODE (tem
) == NEG
)
4468 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4469 if (CONST_INT_P (tem
) && lneg
)
4470 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4474 ops
[j
].op
= NULL_RTX
;
4481 /* If nothing changed, fail. */
4485 /* Pack all the operands to the lower-numbered entries. */
4486 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4496 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4498 && CONST_INT_P (ops
[1].op
)
4499 && CONSTANT_P (ops
[0].op
)
4501 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4503 /* We suppressed creation of trivial CONST expressions in the
4504 combination loop to avoid recursion. Create one manually now.
4505 The combination loop should have ensured that there is exactly
4506 one CONST_INT, and the sort will have ensured that it is last
4507 in the array and that any other constant will be next-to-last. */
4510 && CONST_INT_P (ops
[n_ops
- 1].op
)
4511 && CONSTANT_P (ops
[n_ops
- 2].op
))
4513 rtx value
= ops
[n_ops
- 1].op
;
4514 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4515 value
= neg_const_int (mode
, value
);
4516 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4521 /* Put a non-negated operand first, if possible. */
4523 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4526 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4535 /* Now make the result by performing the requested operations. */
4537 for (i
= 1; i
< n_ops
; i
++)
4538 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4539 mode
, result
, ops
[i
].op
);
4544 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4546 plus_minus_operand_p (const_rtx x
)
4548 return GET_CODE (x
) == PLUS
4549 || GET_CODE (x
) == MINUS
4550 || (GET_CODE (x
) == CONST
4551 && GET_CODE (XEXP (x
, 0)) == PLUS
4552 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4553 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4556 /* Like simplify_binary_operation except used for relational operators.
4557 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4558 not also be VOIDmode.
4560 CMP_MODE specifies in which mode the comparison is done in, so it is
4561 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4562 the operands or, if both are VOIDmode, the operands are compared in
4563 "infinite precision". */
4565 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4566 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4568 rtx tem
, trueop0
, trueop1
;
4570 if (cmp_mode
== VOIDmode
)
4571 cmp_mode
= GET_MODE (op0
);
4572 if (cmp_mode
== VOIDmode
)
4573 cmp_mode
= GET_MODE (op1
);
4575 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4578 if (SCALAR_FLOAT_MODE_P (mode
))
4580 if (tem
== const0_rtx
)
4581 return CONST0_RTX (mode
);
4582 #ifdef FLOAT_STORE_FLAG_VALUE
4584 REAL_VALUE_TYPE val
;
4585 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4586 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4592 if (VECTOR_MODE_P (mode
))
4594 if (tem
== const0_rtx
)
4595 return CONST0_RTX (mode
);
4596 #ifdef VECTOR_STORE_FLAG_VALUE
4601 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4602 if (val
== NULL_RTX
)
4604 if (val
== const1_rtx
)
4605 return CONST1_RTX (mode
);
4607 units
= GET_MODE_NUNITS (mode
);
4608 v
= rtvec_alloc (units
);
4609 for (i
= 0; i
< units
; i
++)
4610 RTVEC_ELT (v
, i
) = val
;
4611 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4621 /* For the following tests, ensure const0_rtx is op1. */
4622 if (swap_commutative_operands_p (op0
, op1
)
4623 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4624 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4626 /* If op0 is a compare, extract the comparison arguments from it. */
4627 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4628 return simplify_gen_relational (code
, mode
, VOIDmode
,
4629 XEXP (op0
, 0), XEXP (op0
, 1));
4631 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4635 trueop0
= avoid_constant_pool_reference (op0
);
4636 trueop1
= avoid_constant_pool_reference (op1
);
4637 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4641 /* This part of simplify_relational_operation is only used when CMP_MODE
4642 is not in class MODE_CC (i.e. it is a real comparison).
4644 MODE is the mode of the result, while CMP_MODE specifies in which
4645 mode the comparison is done in, so it is the mode of the operands. */
4648 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4649 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4651 enum rtx_code op0code
= GET_CODE (op0
);
4653 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4655 /* If op0 is a comparison, extract the comparison arguments
4659 if (GET_MODE (op0
) == mode
)
4660 return simplify_rtx (op0
);
4662 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4663 XEXP (op0
, 0), XEXP (op0
, 1));
4665 else if (code
== EQ
)
4667 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4668 if (new_code
!= UNKNOWN
)
4669 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4670 XEXP (op0
, 0), XEXP (op0
, 1));
4674 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4675 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4676 if ((code
== LTU
|| code
== GEU
)
4677 && GET_CODE (op0
) == PLUS
4678 && CONST_INT_P (XEXP (op0
, 1))
4679 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4680 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4681 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4682 && XEXP (op0
, 1) != const0_rtx
)
4685 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4686 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4687 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4690 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4691 if ((code
== LTU
|| code
== GEU
)
4692 && GET_CODE (op0
) == PLUS
4693 && rtx_equal_p (op1
, XEXP (op0
, 1))
4694 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4695 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4696 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4697 copy_rtx (XEXP (op0
, 0)));
4699 if (op1
== const0_rtx
)
4701 /* Canonicalize (GTU x 0) as (NE x 0). */
4703 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4704 /* Canonicalize (LEU x 0) as (EQ x 0). */
4706 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4708 else if (op1
== const1_rtx
)
4713 /* Canonicalize (GE x 1) as (GT x 0). */
4714 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4717 /* Canonicalize (GEU x 1) as (NE x 0). */
4718 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4721 /* Canonicalize (LT x 1) as (LE x 0). */
4722 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4725 /* Canonicalize (LTU x 1) as (EQ x 0). */
4726 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4732 else if (op1
== constm1_rtx
)
4734 /* Canonicalize (LE x -1) as (LT x 0). */
4736 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4737 /* Canonicalize (GT x -1) as (GE x 0). */
4739 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4742 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4743 if ((code
== EQ
|| code
== NE
)
4744 && (op0code
== PLUS
|| op0code
== MINUS
)
4746 && CONSTANT_P (XEXP (op0
, 1))
4747 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4749 rtx x
= XEXP (op0
, 0);
4750 rtx c
= XEXP (op0
, 1);
4751 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4752 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4754 /* Detect an infinite recursive condition, where we oscillate at this
4755 simplification case between:
4756 A + B == C <---> C - B == A,
4757 where A, B, and C are all constants with non-simplifiable expressions,
4758 usually SYMBOL_REFs. */
4759 if (GET_CODE (tem
) == invcode
4761 && rtx_equal_p (c
, XEXP (tem
, 1)))
4764 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4767 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4768 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4770 && op1
== const0_rtx
4771 && GET_MODE_CLASS (mode
) == MODE_INT
4772 && cmp_mode
!= VOIDmode
4773 /* ??? Work-around BImode bugs in the ia64 backend. */
4775 && cmp_mode
!= BImode
4776 && nonzero_bits (op0
, cmp_mode
) == 1
4777 && STORE_FLAG_VALUE
== 1)
4778 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4779 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4780 : lowpart_subreg (mode
, op0
, cmp_mode
);
4782 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4783 if ((code
== EQ
|| code
== NE
)
4784 && op1
== const0_rtx
4786 return simplify_gen_relational (code
, mode
, cmp_mode
,
4787 XEXP (op0
, 0), XEXP (op0
, 1));
4789 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4790 if ((code
== EQ
|| code
== NE
)
4792 && rtx_equal_p (XEXP (op0
, 0), op1
)
4793 && !side_effects_p (XEXP (op0
, 0)))
4794 return simplify_gen_relational (code
, mode
, cmp_mode
,
4795 XEXP (op0
, 1), const0_rtx
);
4797 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4798 if ((code
== EQ
|| code
== NE
)
4800 && rtx_equal_p (XEXP (op0
, 1), op1
)
4801 && !side_effects_p (XEXP (op0
, 1)))
4802 return simplify_gen_relational (code
, mode
, cmp_mode
,
4803 XEXP (op0
, 0), const0_rtx
);
4805 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4806 if ((code
== EQ
|| code
== NE
)
4808 && CONST_SCALAR_INT_P (op1
)
4809 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4810 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4811 simplify_gen_binary (XOR
, cmp_mode
,
4812 XEXP (op0
, 1), op1
));
4814 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4815 if ((code
== EQ
|| code
== NE
)
4816 && GET_CODE (op0
) == BSWAP
4817 && (CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
)))
4818 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4819 simplify_gen_unary (BSWAP
, cmp_mode
,
4822 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4823 if ((code
== EQ
|| code
== NE
)
4824 && GET_CODE (op0
) == BSWAP
4825 && GET_CODE (op1
) == BSWAP
)
4826 return simplify_gen_relational (code
, mode
, cmp_mode
,
4827 XEXP (op0
, 0), XEXP (op1
, 0));
4829 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4835 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4836 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4837 XEXP (op0
, 0), const0_rtx
);
4842 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4843 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4844 XEXP (op0
, 0), const0_rtx
);
4863 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4864 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4865 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4866 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4867 For floating-point comparisons, assume that the operands were ordered. */
4870 comparison_result (enum rtx_code code
, int known_results
)
4876 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4879 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4883 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4886 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4890 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4893 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4896 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4898 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4901 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4903 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4906 return const_true_rtx
;
4914 /* Check if the given comparison (done in the given MODE) is actually a
4915 tautology or a contradiction.
4916 If no simplification is possible, this function returns zero.
4917 Otherwise, it returns either const_true_rtx or const0_rtx. */
4920 simplify_const_relational_operation (enum rtx_code code
,
4921 enum machine_mode mode
,
4928 gcc_assert (mode
!= VOIDmode
4929 || (GET_MODE (op0
) == VOIDmode
4930 && GET_MODE (op1
) == VOIDmode
));
4932 /* If op0 is a compare, extract the comparison arguments from it. */
4933 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4935 op1
= XEXP (op0
, 1);
4936 op0
= XEXP (op0
, 0);
4938 if (GET_MODE (op0
) != VOIDmode
)
4939 mode
= GET_MODE (op0
);
4940 else if (GET_MODE (op1
) != VOIDmode
)
4941 mode
= GET_MODE (op1
);
4946 /* We can't simplify MODE_CC values since we don't know what the
4947 actual comparison is. */
4948 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4951 /* Make sure the constant is second. */
4952 if (swap_commutative_operands_p (op0
, op1
))
4954 tem
= op0
, op0
= op1
, op1
= tem
;
4955 code
= swap_condition (code
);
4958 trueop0
= avoid_constant_pool_reference (op0
);
4959 trueop1
= avoid_constant_pool_reference (op1
);
4961 /* For integer comparisons of A and B maybe we can simplify A - B and can
4962 then simplify a comparison of that with zero. If A and B are both either
4963 a register or a CONST_INT, this can't help; testing for these cases will
4964 prevent infinite recursion here and speed things up.
4966 We can only do this for EQ and NE comparisons as otherwise we may
4967 lose or introduce overflow which we cannot disregard as undefined as
4968 we do not know the signedness of the operation on either the left or
4969 the right hand side of the comparison. */
4971 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4972 && (code
== EQ
|| code
== NE
)
4973 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4974 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4975 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4976 /* We cannot do this if tem is a nonzero address. */
4977 && ! nonzero_address_p (tem
))
4978 return simplify_const_relational_operation (signed_condition (code
),
4979 mode
, tem
, const0_rtx
);
4981 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4982 return const_true_rtx
;
4984 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4987 /* For modes without NaNs, if the two operands are equal, we know the
4988 result except if they have side-effects. Even with NaNs we know
4989 the result of unordered comparisons and, if signaling NaNs are
4990 irrelevant, also the result of LT/GT/LTGT. */
4991 if ((! HONOR_NANS (GET_MODE (trueop0
))
4992 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4993 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4994 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4995 && rtx_equal_p (trueop0
, trueop1
)
4996 && ! side_effects_p (trueop0
))
4997 return comparison_result (code
, CMP_EQ
);
4999 /* If the operands are floating-point constants, see if we can fold
5001 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5002 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5003 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5005 REAL_VALUE_TYPE d0
, d1
;
5007 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
5008 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
5010 /* Comparisons are unordered iff at least one of the values is NaN. */
5011 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
5021 return const_true_rtx
;
5034 return comparison_result (code
,
5035 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
5036 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
5039 /* Otherwise, see if the operands are both integers. */
5040 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5041 && (CONST_DOUBLE_AS_INT_P (trueop0
) || CONST_INT_P (trueop0
))
5042 && (CONST_DOUBLE_AS_INT_P (trueop1
) || CONST_INT_P (trueop1
)))
5044 int width
= GET_MODE_PRECISION (mode
);
5045 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
5046 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
5048 /* Get the two words comprising each integer constant. */
5049 if (CONST_DOUBLE_AS_INT_P (trueop0
))
5051 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
5052 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
5056 l0u
= l0s
= INTVAL (trueop0
);
5057 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
5060 if (CONST_DOUBLE_AS_INT_P (trueop1
))
5062 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
5063 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
5067 l1u
= l1s
= INTVAL (trueop1
);
5068 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
5071 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
5072 we have to sign or zero-extend the values. */
5073 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
5075 l0u
&= GET_MODE_MASK (mode
);
5076 l1u
&= GET_MODE_MASK (mode
);
5078 if (val_signbit_known_set_p (mode
, l0s
))
5079 l0s
|= ~GET_MODE_MASK (mode
);
5081 if (val_signbit_known_set_p (mode
, l1s
))
5082 l1s
|= ~GET_MODE_MASK (mode
);
5084 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
5085 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
5087 if (h0u
== h1u
&& l0u
== l1u
)
5088 return comparison_result (code
, CMP_EQ
);
5092 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
5093 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
5094 return comparison_result (code
, cr
);
5098 /* Optimize comparisons with upper and lower bounds. */
5099 if (HWI_COMPUTABLE_MODE_P (mode
)
5100 && CONST_INT_P (trueop1
))
5103 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
5104 HOST_WIDE_INT val
= INTVAL (trueop1
);
5105 HOST_WIDE_INT mmin
, mmax
;
5115 /* Get a reduced range if the sign bit is zero. */
5116 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
5123 rtx mmin_rtx
, mmax_rtx
;
5124 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
5126 mmin
= INTVAL (mmin_rtx
);
5127 mmax
= INTVAL (mmax_rtx
);
5130 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
5132 mmin
>>= (sign_copies
- 1);
5133 mmax
>>= (sign_copies
- 1);
5139 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5141 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5142 return const_true_rtx
;
5143 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5148 return const_true_rtx
;
5153 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5155 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5156 return const_true_rtx
;
5157 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5162 return const_true_rtx
;
5168 /* x == y is always false for y out of range. */
5169 if (val
< mmin
|| val
> mmax
)
5173 /* x > y is always false for y >= mmax, always true for y < mmin. */
5175 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5177 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5178 return const_true_rtx
;
5184 return const_true_rtx
;
5187 /* x < y is always false for y <= mmin, always true for y > mmax. */
5189 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5191 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5192 return const_true_rtx
;
5198 return const_true_rtx
;
5202 /* x != y is always true for y out of range. */
5203 if (val
< mmin
|| val
> mmax
)
5204 return const_true_rtx
;
5212 /* Optimize integer comparisons with zero. */
5213 if (trueop1
== const0_rtx
)
5215 /* Some addresses are known to be nonzero. We don't know
5216 their sign, but equality comparisons are known. */
5217 if (nonzero_address_p (trueop0
))
5219 if (code
== EQ
|| code
== LEU
)
5221 if (code
== NE
|| code
== GTU
)
5222 return const_true_rtx
;
5225 /* See if the first operand is an IOR with a constant. If so, we
5226 may be able to determine the result of this comparison. */
5227 if (GET_CODE (op0
) == IOR
)
5229 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5230 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5232 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5233 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5234 && (UINTVAL (inner_const
)
5235 & ((unsigned HOST_WIDE_INT
) 1
5245 return const_true_rtx
;
5249 return const_true_rtx
;
5263 /* Optimize comparison of ABS with zero. */
5264 if (trueop1
== CONST0_RTX (mode
)
5265 && (GET_CODE (trueop0
) == ABS
5266 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5267 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5272 /* Optimize abs(x) < 0.0. */
5273 if (!HONOR_SNANS (mode
)
5274 && (!INTEGRAL_MODE_P (mode
)
5275 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5277 if (INTEGRAL_MODE_P (mode
)
5278 && (issue_strict_overflow_warning
5279 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5280 warning (OPT_Wstrict_overflow
,
5281 ("assuming signed overflow does not occur when "
5282 "assuming abs (x) < 0 is false"));
5288 /* Optimize abs(x) >= 0.0. */
5289 if (!HONOR_NANS (mode
)
5290 && (!INTEGRAL_MODE_P (mode
)
5291 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5293 if (INTEGRAL_MODE_P (mode
)
5294 && (issue_strict_overflow_warning
5295 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5296 warning (OPT_Wstrict_overflow
,
5297 ("assuming signed overflow does not occur when "
5298 "assuming abs (x) >= 0 is true"));
5299 return const_true_rtx
;
5304 /* Optimize ! (abs(x) < 0.0). */
5305 return const_true_rtx
;
5315 /* Simplify CODE, an operation with result mode MODE and three operands,
5316 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5317 a constant. Return 0 if no simplifications is possible. */
5320 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
5321 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
5324 unsigned int width
= GET_MODE_PRECISION (mode
);
5325 bool any_change
= false;
5328 /* VOIDmode means "infinite" precision. */
5330 width
= HOST_BITS_PER_WIDE_INT
;
5335 /* Simplify negations around the multiplication. */
5336 /* -a * -b + c => a * b + c. */
5337 if (GET_CODE (op0
) == NEG
)
5339 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5341 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5343 else if (GET_CODE (op1
) == NEG
)
5345 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5347 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5350 /* Canonicalize the two multiplication operands. */
5351 /* a * -b + c => -b * a + c. */
5352 if (swap_commutative_operands_p (op0
, op1
))
5353 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
5356 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5361 if (CONST_INT_P (op0
)
5362 && CONST_INT_P (op1
)
5363 && CONST_INT_P (op2
)
5364 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5365 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5367 /* Extracting a bit-field from a constant */
5368 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5369 HOST_WIDE_INT op1val
= INTVAL (op1
);
5370 HOST_WIDE_INT op2val
= INTVAL (op2
);
5371 if (BITS_BIG_ENDIAN
)
5372 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5376 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5378 /* First zero-extend. */
5379 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5380 /* If desired, propagate sign bit. */
5381 if (code
== SIGN_EXTRACT
5382 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5384 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5387 return gen_int_mode (val
, mode
);
5392 if (CONST_INT_P (op0
))
5393 return op0
!= const0_rtx
? op1
: op2
;
5395 /* Convert c ? a : a into "a". */
5396 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5399 /* Convert a != b ? a : b into "a". */
5400 if (GET_CODE (op0
) == NE
5401 && ! side_effects_p (op0
)
5402 && ! HONOR_NANS (mode
)
5403 && ! HONOR_SIGNED_ZEROS (mode
)
5404 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5405 && rtx_equal_p (XEXP (op0
, 1), op2
))
5406 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5407 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5410 /* Convert a == b ? a : b into "b". */
5411 if (GET_CODE (op0
) == EQ
5412 && ! side_effects_p (op0
)
5413 && ! HONOR_NANS (mode
)
5414 && ! HONOR_SIGNED_ZEROS (mode
)
5415 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5416 && rtx_equal_p (XEXP (op0
, 1), op2
))
5417 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5418 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5421 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5423 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5424 ? GET_MODE (XEXP (op0
, 1))
5425 : GET_MODE (XEXP (op0
, 0)));
5428 /* Look for happy constants in op1 and op2. */
5429 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5431 HOST_WIDE_INT t
= INTVAL (op1
);
5432 HOST_WIDE_INT f
= INTVAL (op2
);
5434 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5435 code
= GET_CODE (op0
);
5436 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5439 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5447 return simplify_gen_relational (code
, mode
, cmp_mode
,
5448 XEXP (op0
, 0), XEXP (op0
, 1));
5451 if (cmp_mode
== VOIDmode
)
5452 cmp_mode
= op0_mode
;
5453 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5454 cmp_mode
, XEXP (op0
, 0),
5457 /* See if any simplifications were possible. */
5460 if (CONST_INT_P (temp
))
5461 return temp
== const0_rtx
? op2
: op1
;
5463 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5469 gcc_assert (GET_MODE (op0
) == mode
);
5470 gcc_assert (GET_MODE (op1
) == mode
);
5471 gcc_assert (VECTOR_MODE_P (mode
));
5472 trueop2
= avoid_constant_pool_reference (op2
);
5473 if (CONST_INT_P (trueop2
))
5475 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5476 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5477 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5478 unsigned HOST_WIDE_INT mask
;
5479 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5482 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5484 if (!(sel
& mask
) && !side_effects_p (op0
))
5486 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5489 rtx trueop0
= avoid_constant_pool_reference (op0
);
5490 rtx trueop1
= avoid_constant_pool_reference (op1
);
5491 if (GET_CODE (trueop0
) == CONST_VECTOR
5492 && GET_CODE (trueop1
) == CONST_VECTOR
)
5494 rtvec v
= rtvec_alloc (n_elts
);
5497 for (i
= 0; i
< n_elts
; i
++)
5498 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5499 ? CONST_VECTOR_ELT (trueop0
, i
)
5500 : CONST_VECTOR_ELT (trueop1
, i
));
5501 return gen_rtx_CONST_VECTOR (mode
, v
);
5504 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5505 if no element from a appears in the result. */
5506 if (GET_CODE (op0
) == VEC_MERGE
)
5508 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5509 if (CONST_INT_P (tem
))
5511 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5512 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5513 return simplify_gen_ternary (code
, mode
, mode
,
5514 XEXP (op0
, 1), op1
, op2
);
5515 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5516 return simplify_gen_ternary (code
, mode
, mode
,
5517 XEXP (op0
, 0), op1
, op2
);
5520 if (GET_CODE (op1
) == VEC_MERGE
)
5522 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5523 if (CONST_INT_P (tem
))
5525 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5526 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5527 return simplify_gen_ternary (code
, mode
, mode
,
5528 op0
, XEXP (op1
, 1), op2
);
5529 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5530 return simplify_gen_ternary (code
, mode
, mode
,
5531 op0
, XEXP (op1
, 0), op2
);
5536 if (rtx_equal_p (op0
, op1
)
5537 && !side_effects_p (op2
) && !side_effects_p (op1
))
5549 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5551 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5553 Works by unpacking OP into a collection of 8-bit values
5554 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5555 and then repacking them again for OUTERMODE. */
5558 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5559 enum machine_mode innermode
, unsigned int byte
)
5561 /* We support up to 512-bit values (for V8DFmode). */
5565 value_mask
= (1 << value_bit
) - 1
5567 unsigned char value
[max_bitsize
/ value_bit
];
5576 rtvec result_v
= NULL
;
5577 enum mode_class outer_class
;
5578 enum machine_mode outer_submode
;
5580 /* Some ports misuse CCmode. */
5581 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5584 /* We have no way to represent a complex constant at the rtl level. */
5585 if (COMPLEX_MODE_P (outermode
))
5588 /* Unpack the value. */
5590 if (GET_CODE (op
) == CONST_VECTOR
)
5592 num_elem
= CONST_VECTOR_NUNITS (op
);
5593 elems
= &CONST_VECTOR_ELT (op
, 0);
5594 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5600 elem_bitsize
= max_bitsize
;
5602 /* If this asserts, it is too complicated; reducing value_bit may help. */
5603 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5604 /* I don't know how to handle endianness of sub-units. */
5605 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5607 for (elem
= 0; elem
< num_elem
; elem
++)
5610 rtx el
= elems
[elem
];
5612 /* Vectors are kept in target memory order. (This is probably
5615 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5616 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5618 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5619 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5620 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5621 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5622 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5625 switch (GET_CODE (el
))
5629 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5631 *vp
++ = INTVAL (el
) >> i
;
5632 /* CONST_INTs are always logically sign-extended. */
5633 for (; i
< elem_bitsize
; i
+= value_bit
)
5634 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5638 if (GET_MODE (el
) == VOIDmode
)
5640 unsigned char extend
= 0;
5641 /* If this triggers, someone should have generated a
5642 CONST_INT instead. */
5643 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5645 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5646 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5647 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5650 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5654 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5656 for (; i
< elem_bitsize
; i
+= value_bit
)
5661 long tmp
[max_bitsize
/ 32];
5662 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5664 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5665 gcc_assert (bitsize
<= elem_bitsize
);
5666 gcc_assert (bitsize
% value_bit
== 0);
5668 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5671 /* real_to_target produces its result in words affected by
5672 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5673 and use WORDS_BIG_ENDIAN instead; see the documentation
5674 of SUBREG in rtl.texi. */
5675 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5678 if (WORDS_BIG_ENDIAN
)
5679 ibase
= bitsize
- 1 - i
;
5682 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5685 /* It shouldn't matter what's done here, so fill it with
5687 for (; i
< elem_bitsize
; i
+= value_bit
)
5693 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5695 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5696 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5700 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5701 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5702 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5704 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5705 >> (i
- HOST_BITS_PER_WIDE_INT
);
5706 for (; i
< elem_bitsize
; i
+= value_bit
)
5716 /* Now, pick the right byte to start with. */
5717 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5718 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5719 will already have offset 0. */
5720 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5722 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5724 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5725 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5726 byte
= (subword_byte
% UNITS_PER_WORD
5727 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5730 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5731 so if it's become negative it will instead be very large.) */
5732 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5734 /* Convert from bytes to chunks of size value_bit. */
5735 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5737 /* Re-pack the value. */
5739 if (VECTOR_MODE_P (outermode
))
5741 num_elem
= GET_MODE_NUNITS (outermode
);
5742 result_v
= rtvec_alloc (num_elem
);
5743 elems
= &RTVEC_ELT (result_v
, 0);
5744 outer_submode
= GET_MODE_INNER (outermode
);
5750 outer_submode
= outermode
;
5753 outer_class
= GET_MODE_CLASS (outer_submode
);
5754 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5756 gcc_assert (elem_bitsize
% value_bit
== 0);
5757 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5759 for (elem
= 0; elem
< num_elem
; elem
++)
5763 /* Vectors are stored in target memory order. (This is probably
5766 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5767 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5769 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5770 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5771 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5772 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5773 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5776 switch (outer_class
)
5779 case MODE_PARTIAL_INT
:
5781 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5784 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5786 lo
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5787 for (; i
< elem_bitsize
; i
+= value_bit
)
5788 hi
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5789 << (i
- HOST_BITS_PER_WIDE_INT
);
5791 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5793 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5794 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5795 else if (elem_bitsize
<= HOST_BITS_PER_DOUBLE_INT
)
5796 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5803 case MODE_DECIMAL_FLOAT
:
5806 long tmp
[max_bitsize
/ 32];
5808 /* real_from_target wants its input in words affected by
5809 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5810 and use WORDS_BIG_ENDIAN instead; see the documentation
5811 of SUBREG in rtl.texi. */
5812 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5814 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5817 if (WORDS_BIG_ENDIAN
)
5818 ibase
= elem_bitsize
- 1 - i
;
5821 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5824 real_from_target (&r
, tmp
, outer_submode
);
5825 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5837 f
.mode
= outer_submode
;
5840 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5842 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5843 for (; i
< elem_bitsize
; i
+= value_bit
)
5844 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5845 << (i
- HOST_BITS_PER_WIDE_INT
));
5847 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5855 if (VECTOR_MODE_P (outermode
))
5856 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5861 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5862 Return 0 if no simplifications are possible. */
5864 simplify_subreg (enum machine_mode outermode
, rtx op
,
5865 enum machine_mode innermode
, unsigned int byte
)
5867 /* Little bit of sanity checking. */
5868 gcc_assert (innermode
!= VOIDmode
);
5869 gcc_assert (outermode
!= VOIDmode
);
5870 gcc_assert (innermode
!= BLKmode
);
5871 gcc_assert (outermode
!= BLKmode
);
5873 gcc_assert (GET_MODE (op
) == innermode
5874 || GET_MODE (op
) == VOIDmode
);
5876 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5879 if (byte
>= GET_MODE_SIZE (innermode
))
5882 if (outermode
== innermode
&& !byte
)
5885 if (CONST_SCALAR_INT_P (op
)
5886 || CONST_DOUBLE_AS_FLOAT_P (op
)
5887 || GET_CODE (op
) == CONST_FIXED
5888 || GET_CODE (op
) == CONST_VECTOR
)
5889 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5891 /* Changing mode twice with SUBREG => just change it once,
5892 or not at all if changing back op starting mode. */
5893 if (GET_CODE (op
) == SUBREG
)
5895 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5896 int final_offset
= byte
+ SUBREG_BYTE (op
);
5899 if (outermode
== innermostmode
5900 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5901 return SUBREG_REG (op
);
5903 /* The SUBREG_BYTE represents offset, as if the value were stored
5904 in memory. Irritating exception is paradoxical subreg, where
5905 we define SUBREG_BYTE to be 0. On big endian machines, this
5906 value should be negative. For a moment, undo this exception. */
5907 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5909 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5910 if (WORDS_BIG_ENDIAN
)
5911 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5912 if (BYTES_BIG_ENDIAN
)
5913 final_offset
+= difference
% UNITS_PER_WORD
;
5915 if (SUBREG_BYTE (op
) == 0
5916 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5918 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5919 if (WORDS_BIG_ENDIAN
)
5920 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5921 if (BYTES_BIG_ENDIAN
)
5922 final_offset
+= difference
% UNITS_PER_WORD
;
5925 /* See whether resulting subreg will be paradoxical. */
5926 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5928 /* In nonparadoxical subregs we can't handle negative offsets. */
5929 if (final_offset
< 0)
5931 /* Bail out in case resulting subreg would be incorrect. */
5932 if (final_offset
% GET_MODE_SIZE (outermode
)
5933 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5939 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5941 /* In paradoxical subreg, see if we are still looking on lower part.
5942 If so, our SUBREG_BYTE will be 0. */
5943 if (WORDS_BIG_ENDIAN
)
5944 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5945 if (BYTES_BIG_ENDIAN
)
5946 offset
+= difference
% UNITS_PER_WORD
;
5947 if (offset
== final_offset
)
5953 /* Recurse for further possible simplifications. */
5954 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5958 if (validate_subreg (outermode
, innermostmode
,
5959 SUBREG_REG (op
), final_offset
))
5961 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5962 if (SUBREG_PROMOTED_VAR_P (op
)
5963 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5964 && GET_MODE_CLASS (outermode
) == MODE_INT
5965 && IN_RANGE (GET_MODE_SIZE (outermode
),
5966 GET_MODE_SIZE (innermode
),
5967 GET_MODE_SIZE (innermostmode
))
5968 && subreg_lowpart_p (newx
))
5970 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5971 SUBREG_PROMOTED_UNSIGNED_SET
5972 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5979 /* SUBREG of a hard register => just change the register number
5980 and/or mode. If the hard register is not valid in that mode,
5981 suppress this simplification. If the hard register is the stack,
5982 frame, or argument pointer, leave this as a SUBREG. */
5984 if (REG_P (op
) && HARD_REGISTER_P (op
))
5986 unsigned int regno
, final_regno
;
5989 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5990 if (HARD_REGISTER_NUM_P (final_regno
))
5993 int final_offset
= byte
;
5995 /* Adjust offset for paradoxical subregs. */
5997 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5999 int difference
= (GET_MODE_SIZE (innermode
)
6000 - GET_MODE_SIZE (outermode
));
6001 if (WORDS_BIG_ENDIAN
)
6002 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6003 if (BYTES_BIG_ENDIAN
)
6004 final_offset
+= difference
% UNITS_PER_WORD
;
6007 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
6009 /* Propagate original regno. We don't have any way to specify
6010 the offset inside original regno, so do so only for lowpart.
6011 The information is used only by alias analysis that can not
6012 grog partial register anyway. */
6014 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
6015 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6020 /* If we have a SUBREG of a register that we are replacing and we are
6021 replacing it with a MEM, make a new MEM and try replacing the
6022 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6023 or if we would be widening it. */
6026 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6027 /* Allow splitting of volatile memory references in case we don't
6028 have instruction to move the whole thing. */
6029 && (! MEM_VOLATILE_P (op
)
6030 || ! have_insn_for (SET
, innermode
))
6031 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
6032 return adjust_address_nv (op
, outermode
, byte
);
6034 /* Handle complex values represented as CONCAT
6035 of real and imaginary part. */
6036 if (GET_CODE (op
) == CONCAT
)
6038 unsigned int part_size
, final_offset
;
6041 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
6042 if (byte
< part_size
)
6044 part
= XEXP (op
, 0);
6045 final_offset
= byte
;
6049 part
= XEXP (op
, 1);
6050 final_offset
= byte
- part_size
;
6053 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
6056 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
6059 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
6060 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6064 /* A SUBREG resulting from a zero extension may fold to zero if
6065 it extracts higher bits that the ZERO_EXTEND's source bits. */
6066 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6068 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6069 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
6070 return CONST0_RTX (outermode
);
6073 if (SCALAR_INT_MODE_P (outermode
)
6074 && SCALAR_INT_MODE_P (innermode
)
6075 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
6076 && byte
== subreg_lowpart_offset (outermode
, innermode
))
6078 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
6086 /* Make a SUBREG operation or equivalent if it folds. */
6089 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
6090 enum machine_mode innermode
, unsigned int byte
)
6094 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6098 if (GET_CODE (op
) == SUBREG
6099 || GET_CODE (op
) == CONCAT
6100 || GET_MODE (op
) == VOIDmode
)
6103 if (validate_subreg (outermode
, innermode
, op
, byte
))
6104 return gen_rtx_SUBREG (outermode
, op
, byte
);
6109 /* Simplify X, an rtx expression.
6111 Return the simplified expression or NULL if no simplifications
6114 This is the preferred entry point into the simplification routines;
6115 however, we still allow passes to call the more specific routines.
6117 Right now GCC has three (yes, three) major bodies of RTL simplification
6118 code that need to be unified.
6120 1. fold_rtx in cse.c. This code uses various CSE specific
6121 information to aid in RTL simplification.
6123 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6124 it uses combine specific information to aid in RTL
6127 3. The routines in this file.
6130 Long term we want to only have one body of simplification code; to
6131 get to that state I recommend the following steps:
6133 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6134 which are not pass dependent state into these routines.
6136 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6137 use this routine whenever possible.
6139 3. Allow for pass dependent state to be provided to these
6140 routines and add simplifications based on the pass dependent
6141 state. Remove code from cse.c & combine.c that becomes
6144 It will take time, but ultimately the compiler will be easier to
6145 maintain and improve. It's totally silly that when we add a
6146 simplification that it needs to be added to 4 places (3 for RTL
6147 simplification and 1 for tree simplification. */
6150 simplify_rtx (const_rtx x
)
6152 const enum rtx_code code
= GET_CODE (x
);
6153 const enum machine_mode mode
= GET_MODE (x
);
6155 switch (GET_RTX_CLASS (code
))
6158 return simplify_unary_operation (code
, mode
,
6159 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6160 case RTX_COMM_ARITH
:
6161 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6162 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6164 /* Fall through.... */
6167 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6170 case RTX_BITFIELD_OPS
:
6171 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6172 XEXP (x
, 0), XEXP (x
, 1),
6176 case RTX_COMM_COMPARE
:
6177 return simplify_relational_operation (code
, mode
,
6178 ((GET_MODE (XEXP (x
, 0))
6180 ? GET_MODE (XEXP (x
, 0))
6181 : GET_MODE (XEXP (x
, 1))),
6187 return simplify_subreg (mode
, SUBREG_REG (x
),
6188 GET_MODE (SUBREG_REG (x
)),
6195 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6196 if (GET_CODE (XEXP (x
, 0)) == HIGH
6197 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))