1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
35 #include "diagnostic-core.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
48 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
49 static bool plus_minus_operand_p (const_rtx
);
50 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
51 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
52 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
54 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
56 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
57 enum machine_mode
, rtx
, rtx
);
58 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
59 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
65 neg_const_int (enum machine_mode mode
, const_rtx i
)
67 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
74 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
76 unsigned HOST_WIDE_INT val
;
79 if (GET_MODE_CLASS (mode
) != MODE_INT
)
82 width
= GET_MODE_PRECISION (mode
);
86 if (width
<= HOST_BITS_PER_WIDE_INT
89 else if (width
<= HOST_BITS_PER_DOUBLE_INT
90 && CONST_DOUBLE_AS_INT_P (x
)
91 && CONST_DOUBLE_LOW (x
) == 0)
93 val
= CONST_DOUBLE_HIGH (x
);
94 width
-= HOST_BITS_PER_WIDE_INT
;
97 /* FIXME: We don't yet have a representation for wider modes. */
100 if (width
< HOST_BITS_PER_WIDE_INT
)
101 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
102 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
105 /* Test whether VAL is equal to the most significant bit of mode MODE
106 (after masking with the mode mask of MODE). Returns false if the
107 precision of MODE is too large to handle. */
110 val_signbit_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
114 if (GET_MODE_CLASS (mode
) != MODE_INT
)
117 width
= GET_MODE_PRECISION (mode
);
118 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
121 val
&= GET_MODE_MASK (mode
);
122 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
125 /* Test whether the most significant bit of mode MODE is set in VAL.
126 Returns false if the precision of MODE is too large to handle. */
128 val_signbit_known_set_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
132 if (GET_MODE_CLASS (mode
) != MODE_INT
)
135 width
= GET_MODE_PRECISION (mode
);
136 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
139 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
143 /* Test whether the most significant bit of mode MODE is clear in VAL.
144 Returns false if the precision of MODE is too large to handle. */
146 val_signbit_known_clear_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
150 if (GET_MODE_CLASS (mode
) != MODE_INT
)
153 width
= GET_MODE_PRECISION (mode
);
154 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
157 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
161 /* Make a binary operation by properly ordering the operands and
162 seeing if the expression folds. */
165 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
170 /* If this simplifies, do it. */
171 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
175 /* Put complex operands first and constants second if commutative. */
176 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
177 && swap_commutative_operands_p (op0
, op1
))
178 tem
= op0
, op0
= op1
, op1
= tem
;
180 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
183 /* If X is a MEM referencing the constant pool, return the real value.
184 Otherwise return X. */
186 avoid_constant_pool_reference (rtx x
)
189 enum machine_mode cmode
;
190 HOST_WIDE_INT offset
= 0;
192 switch (GET_CODE (x
))
198 /* Handle float extensions of constant pool references. */
200 c
= avoid_constant_pool_reference (tmp
);
201 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
205 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
206 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
214 if (GET_MODE (x
) == BLKmode
)
219 /* Call target hook to avoid the effects of -fpic etc.... */
220 addr
= targetm
.delegitimize_address (addr
);
222 /* Split the address into a base and integer offset. */
223 if (GET_CODE (addr
) == CONST
224 && GET_CODE (XEXP (addr
, 0)) == PLUS
225 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
227 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
228 addr
= XEXP (XEXP (addr
, 0), 0);
231 if (GET_CODE (addr
) == LO_SUM
)
232 addr
= XEXP (addr
, 1);
234 /* If this is a constant pool reference, we can turn it into its
235 constant and hope that simplifications happen. */
236 if (GET_CODE (addr
) == SYMBOL_REF
237 && CONSTANT_POOL_ADDRESS_P (addr
))
239 c
= get_pool_constant (addr
);
240 cmode
= get_pool_mode (addr
);
242 /* If we're accessing the constant in a different mode than it was
243 originally stored, attempt to fix that up via subreg simplifications.
244 If that fails we have no choice but to return the original memory. */
245 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
246 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
248 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
249 if (tem
&& CONSTANT_P (tem
))
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x
)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
270 && MEM_OFFSET_KNOWN_P (x
))
272 tree decl
= MEM_EXPR (x
);
273 enum machine_mode mode
= GET_MODE (x
);
274 HOST_WIDE_INT offset
= 0;
276 switch (TREE_CODE (decl
))
286 case ARRAY_RANGE_REF
:
291 case VIEW_CONVERT_EXPR
:
293 HOST_WIDE_INT bitsize
, bitpos
;
295 int unsignedp
, volatilep
= 0;
297 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
298 &mode
, &unsignedp
, &volatilep
, false);
299 if (bitsize
!= GET_MODE_BITSIZE (mode
)
300 || (bitpos
% BITS_PER_UNIT
)
301 || (toffset
&& !host_integerp (toffset
, 0)))
305 offset
+= bitpos
/ BITS_PER_UNIT
;
307 offset
+= TREE_INT_CST_LOW (toffset
);
314 && mode
== GET_MODE (x
)
315 && TREE_CODE (decl
) == VAR_DECL
316 && (TREE_STATIC (decl
)
317 || DECL_THREAD_LOCAL_P (decl
))
318 && DECL_RTL_SET_P (decl
)
319 && MEM_P (DECL_RTL (decl
)))
323 offset
+= MEM_OFFSET (x
);
325 newx
= DECL_RTL (decl
);
329 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
331 /* Avoid creating a new MEM needlessly if we already had
332 the same address. We do if there's no OFFSET and the
333 old address X is identical to NEWX, or if X is of the
334 form (plus NEWX OFFSET), or the NEWX is of the form
335 (plus Y (const_int Z)) and X is that with the offset
336 added: (plus Y (const_int Z+OFFSET)). */
338 || (GET_CODE (o
) == PLUS
339 && GET_CODE (XEXP (o
, 1)) == CONST_INT
340 && (offset
== INTVAL (XEXP (o
, 1))
341 || (GET_CODE (n
) == PLUS
342 && GET_CODE (XEXP (n
, 1)) == CONST_INT
343 && (INTVAL (XEXP (n
, 1)) + offset
344 == INTVAL (XEXP (o
, 1)))
345 && (n
= XEXP (n
, 0))))
346 && (o
= XEXP (o
, 0))))
347 && rtx_equal_p (o
, n
)))
348 x
= adjust_address_nv (newx
, mode
, offset
);
350 else if (GET_MODE (x
) == GET_MODE (newx
)
359 /* Make a unary operation by first seeing if it folds and otherwise making
360 the specified operation. */
363 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
364 enum machine_mode op_mode
)
368 /* If this simplifies, use it. */
369 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
372 return gen_rtx_fmt_e (code
, mode
, op
);
375 /* Likewise for ternary operations. */
378 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
379 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
383 /* If this simplifies, use it. */
384 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
388 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
391 /* Likewise, for relational operations.
392 CMP_MODE specifies mode comparison is done in. */
395 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
396 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
400 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
404 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
407 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
408 and simplify the result. If FN is non-NULL, call this callback on each
409 X, if it returns non-NULL, replace X with its return value and simplify the
413 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
414 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
416 enum rtx_code code
= GET_CODE (x
);
417 enum machine_mode mode
= GET_MODE (x
);
418 enum machine_mode op_mode
;
420 rtx op0
, op1
, op2
, newx
, op
;
424 if (__builtin_expect (fn
!= NULL
, 0))
426 newx
= fn (x
, old_rtx
, data
);
430 else if (rtx_equal_p (x
, old_rtx
))
431 return copy_rtx ((rtx
) data
);
433 switch (GET_RTX_CLASS (code
))
437 op_mode
= GET_MODE (op0
);
438 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
439 if (op0
== XEXP (x
, 0))
441 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
445 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
446 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
447 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
449 return simplify_gen_binary (code
, mode
, op0
, op1
);
452 case RTX_COMM_COMPARE
:
455 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
456 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
457 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
458 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
460 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
463 case RTX_BITFIELD_OPS
:
465 op_mode
= GET_MODE (op0
);
466 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
467 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
468 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
469 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
471 if (op_mode
== VOIDmode
)
472 op_mode
= GET_MODE (op0
);
473 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
478 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
479 if (op0
== SUBREG_REG (x
))
481 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
482 GET_MODE (SUBREG_REG (x
)),
484 return op0
? op0
: x
;
491 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
492 if (op0
== XEXP (x
, 0))
494 return replace_equiv_address_nv (x
, op0
);
496 else if (code
== LO_SUM
)
498 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
499 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
501 /* (lo_sum (high x) x) -> x */
502 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
505 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
507 return gen_rtx_LO_SUM (mode
, op0
, op1
);
516 fmt
= GET_RTX_FORMAT (code
);
517 for (i
= 0; fmt
[i
]; i
++)
522 newvec
= XVEC (newx
, i
);
523 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
525 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
527 if (op
!= RTVEC_ELT (vec
, j
))
531 newvec
= shallow_copy_rtvec (vec
);
533 newx
= shallow_copy_rtx (x
);
534 XVEC (newx
, i
) = newvec
;
536 RTVEC_ELT (newvec
, j
) = op
;
544 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
545 if (op
!= XEXP (x
, i
))
548 newx
= shallow_copy_rtx (x
);
557 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
558 resulting RTX. Return a new RTX which is as simplified as possible. */
561 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
563 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
566 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
567 Only handle cases where the truncated value is inherently an rvalue.
569 RTL provides two ways of truncating a value:
571 1. a lowpart subreg. This form is only a truncation when both
572 the outer and inner modes (here MODE and OP_MODE respectively)
573 are scalar integers, and only then when the subreg is used as
576 It is only valid to form such truncating subregs if the
577 truncation requires no action by the target. The onus for
578 proving this is on the creator of the subreg -- e.g. the
579 caller to simplify_subreg or simplify_gen_subreg -- and typically
580 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
582 2. a TRUNCATE. This form handles both scalar and compound integers.
584 The first form is preferred where valid. However, the TRUNCATE
585 handling in simplify_unary_operation turns the second form into the
586 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
587 so it is generally safe to form rvalue truncations using:
589 simplify_gen_unary (TRUNCATE, ...)
591 and leave simplify_unary_operation to work out which representation
594 Because of the proof requirements on (1), simplify_truncation must
595 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
596 regardless of whether the outer truncation came from a SUBREG or a
597 TRUNCATE. For example, if the caller has proven that an SImode
602 is a no-op and can be represented as a subreg, it does not follow
603 that SImode truncations of X and Y are also no-ops. On a target
604 like 64-bit MIPS that requires SImode values to be stored in
605 sign-extended form, an SImode truncation of:
607 (and:DI (reg:DI X) (const_int 63))
609 is trivially a no-op because only the lower 6 bits can be set.
610 However, X is still an arbitrary 64-bit number and so we cannot
611 assume that truncating it too is a no-op. */
614 simplify_truncation (enum machine_mode mode
, rtx op
,
615 enum machine_mode op_mode
)
617 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
618 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
619 gcc_assert (precision
<= op_precision
);
621 /* Optimize truncations of zero and sign extended values. */
622 if (GET_CODE (op
) == ZERO_EXTEND
623 || GET_CODE (op
) == SIGN_EXTEND
)
625 /* There are three possibilities. If MODE is the same as the
626 origmode, we can omit both the extension and the subreg.
627 If MODE is not larger than the origmode, we can apply the
628 truncation without the extension. Finally, if the outermode
629 is larger than the origmode, we can just extend to the appropriate
631 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
632 if (mode
== origmode
)
634 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
635 return simplify_gen_unary (TRUNCATE
, mode
,
636 XEXP (op
, 0), origmode
);
638 return simplify_gen_unary (GET_CODE (op
), mode
,
639 XEXP (op
, 0), origmode
);
642 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
643 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
644 if (GET_CODE (op
) == PLUS
645 || GET_CODE (op
) == MINUS
646 || GET_CODE (op
) == MULT
)
648 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
651 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
653 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
657 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
658 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
659 the outer subreg is effectively a truncation to the original mode. */
660 if ((GET_CODE (op
) == LSHIFTRT
661 || GET_CODE (op
) == ASHIFTRT
)
662 /* Ensure that OP_MODE is at least twice as wide as MODE
663 to avoid the possibility that an outer LSHIFTRT shifts by more
664 than the sign extension's sign_bit_copies and introduces zeros
665 into the high bits of the result. */
666 && 2 * precision
<= op_precision
667 && CONST_INT_P (XEXP (op
, 1))
668 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
669 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
670 && UINTVAL (XEXP (op
, 1)) < precision
)
671 return simplify_gen_binary (ASHIFTRT
, mode
,
672 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
674 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
675 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
676 the outer subreg is effectively a truncation to the original mode. */
677 if ((GET_CODE (op
) == LSHIFTRT
678 || GET_CODE (op
) == ASHIFTRT
)
679 && CONST_INT_P (XEXP (op
, 1))
680 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
681 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
682 && UINTVAL (XEXP (op
, 1)) < precision
)
683 return simplify_gen_binary (LSHIFTRT
, mode
,
684 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
686 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
687 to (ashift:QI (x:QI) C), where C is a suitable small constant and
688 the outer subreg is effectively a truncation to the original mode. */
689 if (GET_CODE (op
) == ASHIFT
690 && CONST_INT_P (XEXP (op
, 1))
691 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
692 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
693 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
694 && UINTVAL (XEXP (op
, 1)) < precision
)
695 return simplify_gen_binary (ASHIFT
, mode
,
696 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
698 /* Recognize a word extraction from a multi-word subreg. */
699 if ((GET_CODE (op
) == LSHIFTRT
700 || GET_CODE (op
) == ASHIFTRT
)
701 && SCALAR_INT_MODE_P (mode
)
702 && SCALAR_INT_MODE_P (op_mode
)
703 && precision
>= BITS_PER_WORD
704 && 2 * precision
<= op_precision
705 && CONST_INT_P (XEXP (op
, 1))
706 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
707 && UINTVAL (XEXP (op
, 1)) < op_precision
)
709 int byte
= subreg_lowpart_offset (mode
, op_mode
);
710 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
711 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
713 ? byte
- shifted_bytes
714 : byte
+ shifted_bytes
));
717 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
718 and try replacing the TRUNCATE and shift with it. Don't do this
719 if the MEM has a mode-dependent address. */
720 if ((GET_CODE (op
) == LSHIFTRT
721 || GET_CODE (op
) == ASHIFTRT
)
722 && SCALAR_INT_MODE_P (op_mode
)
723 && MEM_P (XEXP (op
, 0))
724 && CONST_INT_P (XEXP (op
, 1))
725 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
726 && INTVAL (XEXP (op
, 1)) > 0
727 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
728 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
729 MEM_ADDR_SPACE (XEXP (op
, 0)))
730 && ! MEM_VOLATILE_P (XEXP (op
, 0))
731 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
732 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
734 int byte
= subreg_lowpart_offset (mode
, op_mode
);
735 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
736 return adjust_address_nv (XEXP (op
, 0), mode
,
738 ? byte
- shifted_bytes
739 : byte
+ shifted_bytes
));
742 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
743 (OP:SI foo:SI) if OP is NEG or ABS. */
744 if ((GET_CODE (op
) == ABS
745 || GET_CODE (op
) == NEG
)
746 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
747 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
748 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
749 return simplify_gen_unary (GET_CODE (op
), mode
,
750 XEXP (XEXP (op
, 0), 0), mode
);
752 /* (truncate:A (subreg:B (truncate:C X) 0)) is
754 if (GET_CODE (op
) == SUBREG
755 && SCALAR_INT_MODE_P (mode
)
756 && SCALAR_INT_MODE_P (op_mode
)
757 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
758 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
759 && subreg_lowpart_p (op
))
760 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
761 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
763 /* (truncate:A (truncate:B X)) is (truncate:A X). */
764 if (GET_CODE (op
) == TRUNCATE
)
765 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
766 GET_MODE (XEXP (op
, 0)));
771 /* Try to simplify a unary operation CODE whose output mode is to be
772 MODE with input operand OP whose mode was originally OP_MODE.
773 Return zero if no simplification can be made. */
775 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
776 rtx op
, enum machine_mode op_mode
)
780 trueop
= avoid_constant_pool_reference (op
);
782 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
786 return simplify_unary_operation_1 (code
, mode
, op
);
789 /* Perform some simplifications we can do even if the operands
792 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
794 enum rtx_code reversed
;
800 /* (not (not X)) == X. */
801 if (GET_CODE (op
) == NOT
)
804 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
805 comparison is all ones. */
806 if (COMPARISON_P (op
)
807 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
808 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
809 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
810 XEXP (op
, 0), XEXP (op
, 1));
812 /* (not (plus X -1)) can become (neg X). */
813 if (GET_CODE (op
) == PLUS
814 && XEXP (op
, 1) == constm1_rtx
)
815 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
817 /* Similarly, (not (neg X)) is (plus X -1). */
818 if (GET_CODE (op
) == NEG
)
819 return plus_constant (mode
, XEXP (op
, 0), -1);
821 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
822 if (GET_CODE (op
) == XOR
823 && CONST_INT_P (XEXP (op
, 1))
824 && (temp
= simplify_unary_operation (NOT
, mode
,
825 XEXP (op
, 1), mode
)) != 0)
826 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
828 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
829 if (GET_CODE (op
) == PLUS
830 && CONST_INT_P (XEXP (op
, 1))
831 && mode_signbit_p (mode
, XEXP (op
, 1))
832 && (temp
= simplify_unary_operation (NOT
, mode
,
833 XEXP (op
, 1), mode
)) != 0)
834 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
837 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
838 operands other than 1, but that is not valid. We could do a
839 similar simplification for (not (lshiftrt C X)) where C is
840 just the sign bit, but this doesn't seem common enough to
842 if (GET_CODE (op
) == ASHIFT
843 && XEXP (op
, 0) == const1_rtx
)
845 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
846 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
849 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
850 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
851 so we can perform the above simplification. */
853 if (STORE_FLAG_VALUE
== -1
854 && GET_CODE (op
) == ASHIFTRT
855 && GET_CODE (XEXP (op
, 1))
856 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
857 return simplify_gen_relational (GE
, mode
, VOIDmode
,
858 XEXP (op
, 0), const0_rtx
);
861 if (GET_CODE (op
) == SUBREG
862 && subreg_lowpart_p (op
)
863 && (GET_MODE_SIZE (GET_MODE (op
))
864 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
865 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
866 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
868 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
871 x
= gen_rtx_ROTATE (inner_mode
,
872 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
874 XEXP (SUBREG_REG (op
), 1));
875 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
880 /* Apply De Morgan's laws to reduce number of patterns for machines
881 with negating logical insns (and-not, nand, etc.). If result has
882 only one NOT, put it first, since that is how the patterns are
885 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
887 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
888 enum machine_mode op_mode
;
890 op_mode
= GET_MODE (in1
);
891 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
893 op_mode
= GET_MODE (in2
);
894 if (op_mode
== VOIDmode
)
896 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
898 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
901 in2
= in1
; in1
= tem
;
904 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
910 /* (neg (neg X)) == X. */
911 if (GET_CODE (op
) == NEG
)
914 /* (neg (plus X 1)) can become (not X). */
915 if (GET_CODE (op
) == PLUS
916 && XEXP (op
, 1) == const1_rtx
)
917 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
919 /* Similarly, (neg (not X)) is (plus X 1). */
920 if (GET_CODE (op
) == NOT
)
921 return plus_constant (mode
, XEXP (op
, 0), 1);
923 /* (neg (minus X Y)) can become (minus Y X). This transformation
924 isn't safe for modes with signed zeros, since if X and Y are
925 both +0, (minus Y X) is the same as (minus X Y). If the
926 rounding mode is towards +infinity (or -infinity) then the two
927 expressions will be rounded differently. */
928 if (GET_CODE (op
) == MINUS
929 && !HONOR_SIGNED_ZEROS (mode
)
930 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
931 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
933 if (GET_CODE (op
) == PLUS
934 && !HONOR_SIGNED_ZEROS (mode
)
935 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
937 /* (neg (plus A C)) is simplified to (minus -C A). */
938 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
939 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
941 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
943 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
946 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
947 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
948 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
951 /* (neg (mult A B)) becomes (mult A (neg B)).
952 This works even for floating-point values. */
953 if (GET_CODE (op
) == MULT
954 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
956 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
957 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
960 /* NEG commutes with ASHIFT since it is multiplication. Only do
961 this if we can then eliminate the NEG (e.g., if the operand
963 if (GET_CODE (op
) == ASHIFT
)
965 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
967 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
970 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
971 C is equal to the width of MODE minus 1. */
972 if (GET_CODE (op
) == ASHIFTRT
973 && CONST_INT_P (XEXP (op
, 1))
974 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
975 return simplify_gen_binary (LSHIFTRT
, mode
,
976 XEXP (op
, 0), XEXP (op
, 1));
978 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
979 C is equal to the width of MODE minus 1. */
980 if (GET_CODE (op
) == LSHIFTRT
981 && CONST_INT_P (XEXP (op
, 1))
982 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
983 return simplify_gen_binary (ASHIFTRT
, mode
,
984 XEXP (op
, 0), XEXP (op
, 1));
986 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
987 if (GET_CODE (op
) == XOR
988 && XEXP (op
, 1) == const1_rtx
989 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
990 return plus_constant (mode
, XEXP (op
, 0), -1);
992 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
993 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
994 if (GET_CODE (op
) == LT
995 && XEXP (op
, 1) == const0_rtx
996 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
998 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
999 int isize
= GET_MODE_PRECISION (inner
);
1000 if (STORE_FLAG_VALUE
== 1)
1002 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1003 GEN_INT (isize
- 1));
1006 if (GET_MODE_PRECISION (mode
) > isize
)
1007 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1008 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1010 else if (STORE_FLAG_VALUE
== -1)
1012 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1013 GEN_INT (isize
- 1));
1016 if (GET_MODE_PRECISION (mode
) > isize
)
1017 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1018 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1024 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1025 with the umulXi3_highpart patterns. */
1026 if (GET_CODE (op
) == LSHIFTRT
1027 && GET_CODE (XEXP (op
, 0)) == MULT
)
1030 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1032 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1034 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1038 /* We can't handle truncation to a partial integer mode here
1039 because we don't know the real bitsize of the partial
1044 if (GET_MODE (op
) != VOIDmode
)
1046 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1051 /* If we know that the value is already truncated, we can
1052 replace the TRUNCATE with a SUBREG. */
1053 if (GET_MODE_NUNITS (mode
) == 1
1054 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1055 || truncated_to_mode (mode
, op
)))
1057 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1062 /* A truncate of a comparison can be replaced with a subreg if
1063 STORE_FLAG_VALUE permits. This is like the previous test,
1064 but it works even if the comparison is done in a mode larger
1065 than HOST_BITS_PER_WIDE_INT. */
1066 if (HWI_COMPUTABLE_MODE_P (mode
)
1067 && COMPARISON_P (op
)
1068 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1070 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1075 /* A truncate of a memory is just loading the low part of the memory
1076 if we are not changing the meaning of the address. */
1077 if (GET_CODE (op
) == MEM
1078 && !VECTOR_MODE_P (mode
)
1079 && !MEM_VOLATILE_P (op
)
1080 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1082 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1089 case FLOAT_TRUNCATE
:
1090 if (DECIMAL_FLOAT_MODE_P (mode
))
1093 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1094 if (GET_CODE (op
) == FLOAT_EXTEND
1095 && GET_MODE (XEXP (op
, 0)) == mode
)
1096 return XEXP (op
, 0);
1098 /* (float_truncate:SF (float_truncate:DF foo:XF))
1099 = (float_truncate:SF foo:XF).
1100 This may eliminate double rounding, so it is unsafe.
1102 (float_truncate:SF (float_extend:XF foo:DF))
1103 = (float_truncate:SF foo:DF).
1105 (float_truncate:DF (float_extend:XF foo:SF))
1106 = (float_extend:SF foo:DF). */
1107 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1108 && flag_unsafe_math_optimizations
)
1109 || GET_CODE (op
) == FLOAT_EXTEND
)
1110 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1112 > GET_MODE_SIZE (mode
)
1113 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1115 XEXP (op
, 0), mode
);
1117 /* (float_truncate (float x)) is (float x) */
1118 if (GET_CODE (op
) == FLOAT
1119 && (flag_unsafe_math_optimizations
1120 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1121 && ((unsigned)significand_size (GET_MODE (op
))
1122 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1123 - num_sign_bit_copies (XEXP (op
, 0),
1124 GET_MODE (XEXP (op
, 0))))))))
1125 return simplify_gen_unary (FLOAT
, mode
,
1127 GET_MODE (XEXP (op
, 0)));
1129 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1130 (OP:SF foo:SF) if OP is NEG or ABS. */
1131 if ((GET_CODE (op
) == ABS
1132 || GET_CODE (op
) == NEG
)
1133 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1134 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1135 return simplify_gen_unary (GET_CODE (op
), mode
,
1136 XEXP (XEXP (op
, 0), 0), mode
);
1138 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1139 is (float_truncate:SF x). */
1140 if (GET_CODE (op
) == SUBREG
1141 && subreg_lowpart_p (op
)
1142 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1143 return SUBREG_REG (op
);
1147 if (DECIMAL_FLOAT_MODE_P (mode
))
1150 /* (float_extend (float_extend x)) is (float_extend x)
1152 (float_extend (float x)) is (float x) assuming that double
1153 rounding can't happen.
1155 if (GET_CODE (op
) == FLOAT_EXTEND
1156 || (GET_CODE (op
) == FLOAT
1157 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1158 && ((unsigned)significand_size (GET_MODE (op
))
1159 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1160 - num_sign_bit_copies (XEXP (op
, 0),
1161 GET_MODE (XEXP (op
, 0)))))))
1162 return simplify_gen_unary (GET_CODE (op
), mode
,
1164 GET_MODE (XEXP (op
, 0)));
1169 /* (abs (neg <foo>)) -> (abs <foo>) */
1170 if (GET_CODE (op
) == NEG
)
1171 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1172 GET_MODE (XEXP (op
, 0)));
1174 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1176 if (GET_MODE (op
) == VOIDmode
)
1179 /* If operand is something known to be positive, ignore the ABS. */
1180 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1181 || val_signbit_known_clear_p (GET_MODE (op
),
1182 nonzero_bits (op
, GET_MODE (op
))))
1185 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1186 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1187 return gen_rtx_NEG (mode
, op
);
1192 /* (ffs (*_extend <X>)) = (ffs <X>) */
1193 if (GET_CODE (op
) == SIGN_EXTEND
1194 || GET_CODE (op
) == ZERO_EXTEND
)
1195 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1196 GET_MODE (XEXP (op
, 0)));
1200 switch (GET_CODE (op
))
1204 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1205 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1206 GET_MODE (XEXP (op
, 0)));
1210 /* Rotations don't affect popcount. */
1211 if (!side_effects_p (XEXP (op
, 1)))
1212 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1213 GET_MODE (XEXP (op
, 0)));
1222 switch (GET_CODE (op
))
1228 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1229 GET_MODE (XEXP (op
, 0)));
1233 /* Rotations don't affect parity. */
1234 if (!side_effects_p (XEXP (op
, 1)))
1235 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1236 GET_MODE (XEXP (op
, 0)));
1245 /* (bswap (bswap x)) -> x. */
1246 if (GET_CODE (op
) == BSWAP
)
1247 return XEXP (op
, 0);
1251 /* (float (sign_extend <X>)) = (float <X>). */
1252 if (GET_CODE (op
) == SIGN_EXTEND
)
1253 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1254 GET_MODE (XEXP (op
, 0)));
1258 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1259 becomes just the MINUS if its mode is MODE. This allows
1260 folding switch statements on machines using casesi (such as
1262 if (GET_CODE (op
) == TRUNCATE
1263 && GET_MODE (XEXP (op
, 0)) == mode
1264 && GET_CODE (XEXP (op
, 0)) == MINUS
1265 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1266 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1267 return XEXP (op
, 0);
1269 /* Extending a widening multiplication should be canonicalized to
1270 a wider widening multiplication. */
1271 if (GET_CODE (op
) == MULT
)
1273 rtx lhs
= XEXP (op
, 0);
1274 rtx rhs
= XEXP (op
, 1);
1275 enum rtx_code lcode
= GET_CODE (lhs
);
1276 enum rtx_code rcode
= GET_CODE (rhs
);
1278 /* Widening multiplies usually extend both operands, but sometimes
1279 they use a shift to extract a portion of a register. */
1280 if ((lcode
== SIGN_EXTEND
1281 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1282 && (rcode
== SIGN_EXTEND
1283 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1285 enum machine_mode lmode
= GET_MODE (lhs
);
1286 enum machine_mode rmode
= GET_MODE (rhs
);
1289 if (lcode
== ASHIFTRT
)
1290 /* Number of bits not shifted off the end. */
1291 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1292 else /* lcode == SIGN_EXTEND */
1293 /* Size of inner mode. */
1294 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1296 if (rcode
== ASHIFTRT
)
1297 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1298 else /* rcode == SIGN_EXTEND */
1299 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1301 /* We can only widen multiplies if the result is mathematiclly
1302 equivalent. I.e. if overflow was impossible. */
1303 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1304 return simplify_gen_binary
1306 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1307 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1311 /* Check for a sign extension of a subreg of a promoted
1312 variable, where the promotion is sign-extended, and the
1313 target mode is the same as the variable's promotion. */
1314 if (GET_CODE (op
) == SUBREG
1315 && SUBREG_PROMOTED_VAR_P (op
)
1316 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1317 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1319 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1324 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1325 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1326 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1328 gcc_assert (GET_MODE_BITSIZE (mode
)
1329 > GET_MODE_BITSIZE (GET_MODE (op
)));
1330 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1331 GET_MODE (XEXP (op
, 0)));
1334 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1335 is (sign_extend:M (subreg:O <X>)) if there is mode with
1336 GET_MODE_BITSIZE (N) - I bits.
1337 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1338 is similarly (zero_extend:M (subreg:O <X>)). */
1339 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1340 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1341 && CONST_INT_P (XEXP (op
, 1))
1342 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1343 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1345 enum machine_mode tmode
1346 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1347 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1348 gcc_assert (GET_MODE_BITSIZE (mode
)
1349 > GET_MODE_BITSIZE (GET_MODE (op
)));
1350 if (tmode
!= BLKmode
)
1353 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1355 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1356 ? SIGN_EXTEND
: ZERO_EXTEND
,
1357 mode
, inner
, tmode
);
1361 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1362 /* As we do not know which address space the pointer is referring to,
1363 we can do this only if the target does not support different pointer
1364 or address modes depending on the address space. */
1365 if (target_default_pointer_address_modes_p ()
1366 && ! POINTERS_EXTEND_UNSIGNED
1367 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1369 || (GET_CODE (op
) == SUBREG
1370 && REG_P (SUBREG_REG (op
))
1371 && REG_POINTER (SUBREG_REG (op
))
1372 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1373 return convert_memory_address (Pmode
, op
);
1378 /* Check for a zero extension of a subreg of a promoted
1379 variable, where the promotion is zero-extended, and the
1380 target mode is the same as the variable's promotion. */
1381 if (GET_CODE (op
) == SUBREG
1382 && SUBREG_PROMOTED_VAR_P (op
)
1383 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1384 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1386 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1391 /* Extending a widening multiplication should be canonicalized to
1392 a wider widening multiplication. */
1393 if (GET_CODE (op
) == MULT
)
1395 rtx lhs
= XEXP (op
, 0);
1396 rtx rhs
= XEXP (op
, 1);
1397 enum rtx_code lcode
= GET_CODE (lhs
);
1398 enum rtx_code rcode
= GET_CODE (rhs
);
1400 /* Widening multiplies usually extend both operands, but sometimes
1401 they use a shift to extract a portion of a register. */
1402 if ((lcode
== ZERO_EXTEND
1403 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1404 && (rcode
== ZERO_EXTEND
1405 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1407 enum machine_mode lmode
= GET_MODE (lhs
);
1408 enum machine_mode rmode
= GET_MODE (rhs
);
1411 if (lcode
== LSHIFTRT
)
1412 /* Number of bits not shifted off the end. */
1413 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1414 else /* lcode == ZERO_EXTEND */
1415 /* Size of inner mode. */
1416 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1418 if (rcode
== LSHIFTRT
)
1419 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1420 else /* rcode == ZERO_EXTEND */
1421 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1423 /* We can only widen multiplies if the result is mathematiclly
1424 equivalent. I.e. if overflow was impossible. */
1425 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1426 return simplify_gen_binary
1428 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1429 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1433 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1434 if (GET_CODE (op
) == ZERO_EXTEND
)
1435 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1436 GET_MODE (XEXP (op
, 0)));
1438 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1439 is (zero_extend:M (subreg:O <X>)) if there is mode with
1440 GET_MODE_BITSIZE (N) - I bits. */
1441 if (GET_CODE (op
) == LSHIFTRT
1442 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1443 && CONST_INT_P (XEXP (op
, 1))
1444 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1445 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1447 enum machine_mode tmode
1448 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1449 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1450 if (tmode
!= BLKmode
)
1453 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1455 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1459 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1460 /* As we do not know which address space the pointer is referring to,
1461 we can do this only if the target does not support different pointer
1462 or address modes depending on the address space. */
1463 if (target_default_pointer_address_modes_p ()
1464 && POINTERS_EXTEND_UNSIGNED
> 0
1465 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1467 || (GET_CODE (op
) == SUBREG
1468 && REG_P (SUBREG_REG (op
))
1469 && REG_POINTER (SUBREG_REG (op
))
1470 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1471 return convert_memory_address (Pmode
, op
);
1482 /* Try to compute the value of a unary operation CODE whose output mode is to
1483 be MODE with input operand OP whose mode was originally OP_MODE.
1484 Return zero if the value cannot be computed. */
1486 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1487 rtx op
, enum machine_mode op_mode
)
1489 unsigned int width
= GET_MODE_PRECISION (mode
);
1490 unsigned int op_width
= GET_MODE_PRECISION (op_mode
);
1492 if (code
== VEC_DUPLICATE
)
1494 gcc_assert (VECTOR_MODE_P (mode
));
1495 if (GET_MODE (op
) != VOIDmode
)
1497 if (!VECTOR_MODE_P (GET_MODE (op
)))
1498 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1500 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1503 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1504 || GET_CODE (op
) == CONST_VECTOR
)
1506 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1507 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1508 rtvec v
= rtvec_alloc (n_elts
);
1511 if (GET_CODE (op
) != CONST_VECTOR
)
1512 for (i
= 0; i
< n_elts
; i
++)
1513 RTVEC_ELT (v
, i
) = op
;
1516 enum machine_mode inmode
= GET_MODE (op
);
1517 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1518 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1520 gcc_assert (in_n_elts
< n_elts
);
1521 gcc_assert ((n_elts
% in_n_elts
) == 0);
1522 for (i
= 0; i
< n_elts
; i
++)
1523 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1525 return gen_rtx_CONST_VECTOR (mode
, v
);
1529 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1531 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1532 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1533 enum machine_mode opmode
= GET_MODE (op
);
1534 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1535 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1536 rtvec v
= rtvec_alloc (n_elts
);
1539 gcc_assert (op_n_elts
== n_elts
);
1540 for (i
= 0; i
< n_elts
; i
++)
1542 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1543 CONST_VECTOR_ELT (op
, i
),
1544 GET_MODE_INNER (opmode
));
1547 RTVEC_ELT (v
, i
) = x
;
1549 return gen_rtx_CONST_VECTOR (mode
, v
);
1552 /* The order of these tests is critical so that, for example, we don't
1553 check the wrong mode (input vs. output) for a conversion operation,
1554 such as FIX. At some point, this should be simplified. */
1556 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1558 HOST_WIDE_INT hv
, lv
;
1561 if (CONST_INT_P (op
))
1562 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1564 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1566 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1567 d
= real_value_truncate (mode
, d
);
1568 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1570 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1572 HOST_WIDE_INT hv
, lv
;
1575 if (CONST_INT_P (op
))
1576 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1578 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1580 if (op_mode
== VOIDmode
1581 || GET_MODE_PRECISION (op_mode
) > HOST_BITS_PER_DOUBLE_INT
)
1582 /* We should never get a negative number. */
1583 gcc_assert (hv
>= 0);
1584 else if (GET_MODE_PRECISION (op_mode
) <= HOST_BITS_PER_WIDE_INT
)
1585 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1587 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1588 d
= real_value_truncate (mode
, d
);
1589 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1592 if (CONST_INT_P (op
)
1593 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1595 HOST_WIDE_INT arg0
= INTVAL (op
);
1609 val
= (arg0
>= 0 ? arg0
: - arg0
);
1613 arg0
&= GET_MODE_MASK (mode
);
1614 val
= ffs_hwi (arg0
);
1618 arg0
&= GET_MODE_MASK (mode
);
1619 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1622 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 1;
1626 arg0
&= GET_MODE_MASK (mode
);
1628 val
= GET_MODE_PRECISION (mode
) - 1;
1630 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 2;
1632 val
= GET_MODE_PRECISION (mode
) - floor_log2 (~arg0
) - 2;
1636 arg0
&= GET_MODE_MASK (mode
);
1639 /* Even if the value at zero is undefined, we have to come
1640 up with some replacement. Seems good enough. */
1641 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1642 val
= GET_MODE_PRECISION (mode
);
1645 val
= ctz_hwi (arg0
);
1649 arg0
&= GET_MODE_MASK (mode
);
1652 val
++, arg0
&= arg0
- 1;
1656 arg0
&= GET_MODE_MASK (mode
);
1659 val
++, arg0
&= arg0
- 1;
1668 for (s
= 0; s
< width
; s
+= 8)
1670 unsigned int d
= width
- s
- 8;
1671 unsigned HOST_WIDE_INT byte
;
1672 byte
= (arg0
>> s
) & 0xff;
1683 /* When zero-extending a CONST_INT, we need to know its
1685 gcc_assert (op_mode
!= VOIDmode
);
1686 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1688 /* If we were really extending the mode,
1689 we would have to distinguish between zero-extension
1690 and sign-extension. */
1691 gcc_assert (width
== op_width
);
1694 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1695 val
= arg0
& GET_MODE_MASK (op_mode
);
1701 if (op_mode
== VOIDmode
)
1703 op_width
= GET_MODE_PRECISION (op_mode
);
1704 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1706 /* If we were really extending the mode,
1707 we would have to distinguish between zero-extension
1708 and sign-extension. */
1709 gcc_assert (width
== op_width
);
1712 else if (op_width
< HOST_BITS_PER_WIDE_INT
)
1714 val
= arg0
& GET_MODE_MASK (op_mode
);
1715 if (val_signbit_known_set_p (op_mode
, val
))
1716 val
|= ~GET_MODE_MASK (op_mode
);
1724 case FLOAT_TRUNCATE
:
1736 return gen_int_mode (val
, mode
);
1739 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1740 for a DImode operation on a CONST_INT. */
1741 else if (width
<= HOST_BITS_PER_DOUBLE_INT
1742 && (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1744 double_int first
, value
;
1746 if (CONST_DOUBLE_AS_INT_P (op
))
1747 first
= double_int::from_pair (CONST_DOUBLE_HIGH (op
),
1748 CONST_DOUBLE_LOW (op
));
1750 first
= double_int::from_shwi (INTVAL (op
));
1763 if (first
.is_negative ())
1772 value
.low
= ffs_hwi (first
.low
);
1773 else if (first
.high
!= 0)
1774 value
.low
= HOST_BITS_PER_WIDE_INT
+ ffs_hwi (first
.high
);
1781 if (first
.high
!= 0)
1782 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.high
) - 1
1783 - HOST_BITS_PER_WIDE_INT
;
1784 else if (first
.low
!= 0)
1785 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.low
) - 1;
1786 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1787 value
.low
= GET_MODE_PRECISION (mode
);
1793 value
.low
= ctz_hwi (first
.low
);
1794 else if (first
.high
!= 0)
1795 value
.low
= HOST_BITS_PER_WIDE_INT
+ ctz_hwi (first
.high
);
1796 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1797 value
.low
= GET_MODE_PRECISION (mode
);
1801 value
= double_int_zero
;
1805 first
.low
&= first
.low
- 1;
1810 first
.high
&= first
.high
- 1;
1815 value
= double_int_zero
;
1819 first
.low
&= first
.low
- 1;
1824 first
.high
&= first
.high
- 1;
1833 value
= double_int_zero
;
1834 for (s
= 0; s
< width
; s
+= 8)
1836 unsigned int d
= width
- s
- 8;
1837 unsigned HOST_WIDE_INT byte
;
1839 if (s
< HOST_BITS_PER_WIDE_INT
)
1840 byte
= (first
.low
>> s
) & 0xff;
1842 byte
= (first
.high
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1844 if (d
< HOST_BITS_PER_WIDE_INT
)
1845 value
.low
|= byte
<< d
;
1847 value
.high
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1853 /* This is just a change-of-mode, so do nothing. */
1858 gcc_assert (op_mode
!= VOIDmode
);
1860 if (op_width
> HOST_BITS_PER_WIDE_INT
)
1863 value
= double_int::from_uhwi (first
.low
& GET_MODE_MASK (op_mode
));
1867 if (op_mode
== VOIDmode
1868 || op_width
> HOST_BITS_PER_WIDE_INT
)
1872 value
.low
= first
.low
& GET_MODE_MASK (op_mode
);
1873 if (val_signbit_known_set_p (op_mode
, value
.low
))
1874 value
.low
|= ~GET_MODE_MASK (op_mode
);
1876 value
.high
= HWI_SIGN_EXTEND (value
.low
);
1887 return immed_double_int_const (value
, mode
);
1890 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1891 && SCALAR_FLOAT_MODE_P (mode
)
1892 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1894 REAL_VALUE_TYPE d
, t
;
1895 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1900 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1902 real_sqrt (&t
, mode
, &d
);
1906 d
= real_value_abs (&d
);
1909 d
= real_value_negate (&d
);
1911 case FLOAT_TRUNCATE
:
1912 d
= real_value_truncate (mode
, d
);
1915 /* All this does is change the mode, unless changing
1917 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1918 real_convert (&d
, mode
, &d
);
1921 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1928 real_to_target (tmp
, &d
, GET_MODE (op
));
1929 for (i
= 0; i
< 4; i
++)
1931 real_from_target (&d
, tmp
, mode
);
1937 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1940 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1941 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1942 && GET_MODE_CLASS (mode
) == MODE_INT
1943 && width
<= HOST_BITS_PER_DOUBLE_INT
&& width
> 0)
1945 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1946 operators are intentionally left unspecified (to ease implementation
1947 by target backends), for consistency, this routine implements the
1948 same semantics for constant folding as used by the middle-end. */
1950 /* This was formerly used only for non-IEEE float.
1951 eggert@twinsun.com says it is safe for IEEE also. */
1952 HOST_WIDE_INT xh
, xl
, th
, tl
;
1953 REAL_VALUE_TYPE x
, t
;
1954 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1958 if (REAL_VALUE_ISNAN (x
))
1961 /* Test against the signed upper bound. */
1962 if (width
> HOST_BITS_PER_WIDE_INT
)
1964 th
= ((unsigned HOST_WIDE_INT
) 1
1965 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1971 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1973 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1974 if (REAL_VALUES_LESS (t
, x
))
1981 /* Test against the signed lower bound. */
1982 if (width
> HOST_BITS_PER_WIDE_INT
)
1984 th
= (unsigned HOST_WIDE_INT
) (-1)
1985 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1991 tl
= (unsigned HOST_WIDE_INT
) (-1) << (width
- 1);
1993 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1994 if (REAL_VALUES_LESS (x
, t
))
2000 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
2004 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
2007 /* Test against the unsigned upper bound. */
2008 if (width
== HOST_BITS_PER_DOUBLE_INT
)
2013 else if (width
>= HOST_BITS_PER_WIDE_INT
)
2015 th
= ((unsigned HOST_WIDE_INT
) 1
2016 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
2022 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
2024 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
2025 if (REAL_VALUES_LESS (t
, x
))
2032 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
2038 return immed_double_const (xl
, xh
, mode
);
2044 /* Subroutine of simplify_binary_operation to simplify a commutative,
2045 associative binary operation CODE with result mode MODE, operating
2046 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2047 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2048 canonicalization is possible. */
2051 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
2056 /* Linearize the operator to the left. */
2057 if (GET_CODE (op1
) == code
)
2059 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2060 if (GET_CODE (op0
) == code
)
2062 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2063 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2066 /* "a op (b op c)" becomes "(b op c) op a". */
2067 if (! swap_commutative_operands_p (op1
, op0
))
2068 return simplify_gen_binary (code
, mode
, op1
, op0
);
2075 if (GET_CODE (op0
) == code
)
2077 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2078 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2080 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2081 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2084 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2085 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2087 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2089 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2090 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2092 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2099 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2100 and OP1. Return 0 if no simplification is possible.
2102 Don't use this for relational operations such as EQ or LT.
2103 Use simplify_relational_operation instead. */
2105 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2108 rtx trueop0
, trueop1
;
2111 /* Relational operations don't work here. We must know the mode
2112 of the operands in order to do the comparison correctly.
2113 Assuming a full word can give incorrect results.
2114 Consider comparing 128 with -128 in QImode. */
2115 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2116 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2118 /* Make sure the constant is second. */
2119 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2120 && swap_commutative_operands_p (op0
, op1
))
2122 tem
= op0
, op0
= op1
, op1
= tem
;
2125 trueop0
= avoid_constant_pool_reference (op0
);
2126 trueop1
= avoid_constant_pool_reference (op1
);
2128 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2131 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2134 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2135 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2136 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2137 actual constants. */
2140 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
2141 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2143 rtx tem
, reversed
, opleft
, opright
;
2145 unsigned int width
= GET_MODE_PRECISION (mode
);
2147 /* Even if we can't compute a constant result,
2148 there are some cases worth simplifying. */
2153 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2154 when x is NaN, infinite, or finite and nonzero. They aren't
2155 when x is -0 and the rounding mode is not towards -infinity,
2156 since (-0) + 0 is then 0. */
2157 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2160 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2161 transformations are safe even for IEEE. */
2162 if (GET_CODE (op0
) == NEG
)
2163 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2164 else if (GET_CODE (op1
) == NEG
)
2165 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2167 /* (~a) + 1 -> -a */
2168 if (INTEGRAL_MODE_P (mode
)
2169 && GET_CODE (op0
) == NOT
2170 && trueop1
== const1_rtx
)
2171 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2173 /* Handle both-operands-constant cases. We can only add
2174 CONST_INTs to constants since the sum of relocatable symbols
2175 can't be handled by most assemblers. Don't add CONST_INT
2176 to CONST_INT since overflow won't be computed properly if wider
2177 than HOST_BITS_PER_WIDE_INT. */
2179 if ((GET_CODE (op0
) == CONST
2180 || GET_CODE (op0
) == SYMBOL_REF
2181 || GET_CODE (op0
) == LABEL_REF
)
2182 && CONST_INT_P (op1
))
2183 return plus_constant (mode
, op0
, INTVAL (op1
));
2184 else if ((GET_CODE (op1
) == CONST
2185 || GET_CODE (op1
) == SYMBOL_REF
2186 || GET_CODE (op1
) == LABEL_REF
)
2187 && CONST_INT_P (op0
))
2188 return plus_constant (mode
, op1
, INTVAL (op0
));
2190 /* See if this is something like X * C - X or vice versa or
2191 if the multiplication is written as a shift. If so, we can
2192 distribute and make a new multiply, shift, or maybe just
2193 have X (if C is 2 in the example above). But don't make
2194 something more expensive than we had before. */
2196 if (SCALAR_INT_MODE_P (mode
))
2198 double_int coeff0
, coeff1
;
2199 rtx lhs
= op0
, rhs
= op1
;
2201 coeff0
= double_int_one
;
2202 coeff1
= double_int_one
;
2204 if (GET_CODE (lhs
) == NEG
)
2206 coeff0
= double_int_minus_one
;
2207 lhs
= XEXP (lhs
, 0);
2209 else if (GET_CODE (lhs
) == MULT
2210 && CONST_INT_P (XEXP (lhs
, 1)))
2212 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
2213 lhs
= XEXP (lhs
, 0);
2215 else if (GET_CODE (lhs
) == ASHIFT
2216 && CONST_INT_P (XEXP (lhs
, 1))
2217 && INTVAL (XEXP (lhs
, 1)) >= 0
2218 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2220 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2221 lhs
= XEXP (lhs
, 0);
2224 if (GET_CODE (rhs
) == NEG
)
2226 coeff1
= double_int_minus_one
;
2227 rhs
= XEXP (rhs
, 0);
2229 else if (GET_CODE (rhs
) == MULT
2230 && CONST_INT_P (XEXP (rhs
, 1)))
2232 coeff1
= double_int::from_shwi (INTVAL (XEXP (rhs
, 1)));
2233 rhs
= XEXP (rhs
, 0);
2235 else if (GET_CODE (rhs
) == ASHIFT
2236 && CONST_INT_P (XEXP (rhs
, 1))
2237 && INTVAL (XEXP (rhs
, 1)) >= 0
2238 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2240 coeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2241 rhs
= XEXP (rhs
, 0);
2244 if (rtx_equal_p (lhs
, rhs
))
2246 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2249 bool speed
= optimize_function_for_speed_p (cfun
);
2251 val
= coeff0
+ coeff1
;
2252 coeff
= immed_double_int_const (val
, mode
);
2254 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2255 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2260 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2261 if (CONST_SCALAR_INT_P (op1
)
2262 && GET_CODE (op0
) == XOR
2263 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2264 && mode_signbit_p (mode
, op1
))
2265 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2266 simplify_gen_binary (XOR
, mode
, op1
,
2269 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2270 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2271 && GET_CODE (op0
) == MULT
2272 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2276 in1
= XEXP (XEXP (op0
, 0), 0);
2277 in2
= XEXP (op0
, 1);
2278 return simplify_gen_binary (MINUS
, mode
, op1
,
2279 simplify_gen_binary (MULT
, mode
,
2283 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2284 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2286 if (COMPARISON_P (op0
)
2287 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2288 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2289 && (reversed
= reversed_comparison (op0
, mode
)))
2291 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2293 /* If one of the operands is a PLUS or a MINUS, see if we can
2294 simplify this by the associative law.
2295 Don't use the associative law for floating point.
2296 The inaccuracy makes it nonassociative,
2297 and subtle programs can break if operations are associated. */
2299 if (INTEGRAL_MODE_P (mode
)
2300 && (plus_minus_operand_p (op0
)
2301 || plus_minus_operand_p (op1
))
2302 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2305 /* Reassociate floating point addition only when the user
2306 specifies associative math operations. */
2307 if (FLOAT_MODE_P (mode
)
2308 && flag_associative_math
)
2310 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2317 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2318 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2319 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2320 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2322 rtx xop00
= XEXP (op0
, 0);
2323 rtx xop10
= XEXP (op1
, 0);
2326 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2328 if (REG_P (xop00
) && REG_P (xop10
)
2329 && GET_MODE (xop00
) == GET_MODE (xop10
)
2330 && REGNO (xop00
) == REGNO (xop10
)
2331 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2332 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2339 /* We can't assume x-x is 0 even with non-IEEE floating point,
2340 but since it is zero except in very strange circumstances, we
2341 will treat it as zero with -ffinite-math-only. */
2342 if (rtx_equal_p (trueop0
, trueop1
)
2343 && ! side_effects_p (op0
)
2344 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2345 return CONST0_RTX (mode
);
2347 /* Change subtraction from zero into negation. (0 - x) is the
2348 same as -x when x is NaN, infinite, or finite and nonzero.
2349 But if the mode has signed zeros, and does not round towards
2350 -infinity, then 0 - 0 is 0, not -0. */
2351 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2352 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2354 /* (-1 - a) is ~a. */
2355 if (trueop0
== constm1_rtx
)
2356 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2358 /* Subtracting 0 has no effect unless the mode has signed zeros
2359 and supports rounding towards -infinity. In such a case,
2361 if (!(HONOR_SIGNED_ZEROS (mode
)
2362 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2363 && trueop1
== CONST0_RTX (mode
))
2366 /* See if this is something like X * C - X or vice versa or
2367 if the multiplication is written as a shift. If so, we can
2368 distribute and make a new multiply, shift, or maybe just
2369 have X (if C is 2 in the example above). But don't make
2370 something more expensive than we had before. */
2372 if (SCALAR_INT_MODE_P (mode
))
2374 double_int coeff0
, negcoeff1
;
2375 rtx lhs
= op0
, rhs
= op1
;
2377 coeff0
= double_int_one
;
2378 negcoeff1
= double_int_minus_one
;
2380 if (GET_CODE (lhs
) == NEG
)
2382 coeff0
= double_int_minus_one
;
2383 lhs
= XEXP (lhs
, 0);
2385 else if (GET_CODE (lhs
) == MULT
2386 && CONST_INT_P (XEXP (lhs
, 1)))
2388 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
2389 lhs
= XEXP (lhs
, 0);
2391 else if (GET_CODE (lhs
) == ASHIFT
2392 && CONST_INT_P (XEXP (lhs
, 1))
2393 && INTVAL (XEXP (lhs
, 1)) >= 0
2394 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2396 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2397 lhs
= XEXP (lhs
, 0);
2400 if (GET_CODE (rhs
) == NEG
)
2402 negcoeff1
= double_int_one
;
2403 rhs
= XEXP (rhs
, 0);
2405 else if (GET_CODE (rhs
) == MULT
2406 && CONST_INT_P (XEXP (rhs
, 1)))
2408 negcoeff1
= double_int::from_shwi (-INTVAL (XEXP (rhs
, 1)));
2409 rhs
= XEXP (rhs
, 0);
2411 else if (GET_CODE (rhs
) == ASHIFT
2412 && CONST_INT_P (XEXP (rhs
, 1))
2413 && INTVAL (XEXP (rhs
, 1)) >= 0
2414 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2416 negcoeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2417 negcoeff1
= -negcoeff1
;
2418 rhs
= XEXP (rhs
, 0);
2421 if (rtx_equal_p (lhs
, rhs
))
2423 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2426 bool speed
= optimize_function_for_speed_p (cfun
);
2428 val
= coeff0
+ negcoeff1
;
2429 coeff
= immed_double_int_const (val
, mode
);
2431 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2432 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2437 /* (a - (-b)) -> (a + b). True even for IEEE. */
2438 if (GET_CODE (op1
) == NEG
)
2439 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2441 /* (-x - c) may be simplified as (-c - x). */
2442 if (GET_CODE (op0
) == NEG
2443 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2445 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2447 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2450 /* Don't let a relocatable value get a negative coeff. */
2451 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2452 return simplify_gen_binary (PLUS
, mode
,
2454 neg_const_int (mode
, op1
));
2456 /* (x - (x & y)) -> (x & ~y) */
2457 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2459 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2461 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2462 GET_MODE (XEXP (op1
, 1)));
2463 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2465 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2467 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2468 GET_MODE (XEXP (op1
, 0)));
2469 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2473 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2474 by reversing the comparison code if valid. */
2475 if (STORE_FLAG_VALUE
== 1
2476 && trueop0
== const1_rtx
2477 && COMPARISON_P (op1
)
2478 && (reversed
= reversed_comparison (op1
, mode
)))
2481 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2482 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2483 && GET_CODE (op1
) == MULT
2484 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2488 in1
= XEXP (XEXP (op1
, 0), 0);
2489 in2
= XEXP (op1
, 1);
2490 return simplify_gen_binary (PLUS
, mode
,
2491 simplify_gen_binary (MULT
, mode
,
2496 /* Canonicalize (minus (neg A) (mult B C)) to
2497 (minus (mult (neg B) C) A). */
2498 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2499 && GET_CODE (op1
) == MULT
2500 && GET_CODE (op0
) == NEG
)
2504 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2505 in2
= XEXP (op1
, 1);
2506 return simplify_gen_binary (MINUS
, mode
,
2507 simplify_gen_binary (MULT
, mode
,
2512 /* If one of the operands is a PLUS or a MINUS, see if we can
2513 simplify this by the associative law. This will, for example,
2514 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2515 Don't use the associative law for floating point.
2516 The inaccuracy makes it nonassociative,
2517 and subtle programs can break if operations are associated. */
2519 if (INTEGRAL_MODE_P (mode
)
2520 && (plus_minus_operand_p (op0
)
2521 || plus_minus_operand_p (op1
))
2522 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2527 if (trueop1
== constm1_rtx
)
2528 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2530 if (GET_CODE (op0
) == NEG
)
2532 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2533 /* If op1 is a MULT as well and simplify_unary_operation
2534 just moved the NEG to the second operand, simplify_gen_binary
2535 below could through simplify_associative_operation move
2536 the NEG around again and recurse endlessly. */
2538 && GET_CODE (op1
) == MULT
2539 && GET_CODE (temp
) == MULT
2540 && XEXP (op1
, 0) == XEXP (temp
, 0)
2541 && GET_CODE (XEXP (temp
, 1)) == NEG
2542 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2545 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2547 if (GET_CODE (op1
) == NEG
)
2549 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2550 /* If op0 is a MULT as well and simplify_unary_operation
2551 just moved the NEG to the second operand, simplify_gen_binary
2552 below could through simplify_associative_operation move
2553 the NEG around again and recurse endlessly. */
2555 && GET_CODE (op0
) == MULT
2556 && GET_CODE (temp
) == MULT
2557 && XEXP (op0
, 0) == XEXP (temp
, 0)
2558 && GET_CODE (XEXP (temp
, 1)) == NEG
2559 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2562 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2565 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2566 x is NaN, since x * 0 is then also NaN. Nor is it valid
2567 when the mode has signed zeros, since multiplying a negative
2568 number by 0 will give -0, not 0. */
2569 if (!HONOR_NANS (mode
)
2570 && !HONOR_SIGNED_ZEROS (mode
)
2571 && trueop1
== CONST0_RTX (mode
)
2572 && ! side_effects_p (op0
))
2575 /* In IEEE floating point, x*1 is not equivalent to x for
2577 if (!HONOR_SNANS (mode
)
2578 && trueop1
== CONST1_RTX (mode
))
2581 /* Convert multiply by constant power of two into shift unless
2582 we are still generating RTL. This test is a kludge. */
2583 if (CONST_INT_P (trueop1
)
2584 && (val
= exact_log2 (UINTVAL (trueop1
))) >= 0
2585 /* If the mode is larger than the host word size, and the
2586 uppermost bit is set, then this isn't a power of two due
2587 to implicit sign extension. */
2588 && (width
<= HOST_BITS_PER_WIDE_INT
2589 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2590 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2592 /* Likewise for multipliers wider than a word. */
2593 if (CONST_DOUBLE_AS_INT_P (trueop1
)
2594 && GET_MODE (op0
) == mode
2595 && CONST_DOUBLE_LOW (trueop1
) == 0
2596 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0
2597 && (val
< HOST_BITS_PER_DOUBLE_INT
- 1
2598 || GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_DOUBLE_INT
))
2599 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2600 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2602 /* x*2 is x+x and x*(-1) is -x */
2603 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2604 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2605 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2606 && GET_MODE (op0
) == mode
)
2609 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2611 if (REAL_VALUES_EQUAL (d
, dconst2
))
2612 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2614 if (!HONOR_SNANS (mode
)
2615 && REAL_VALUES_EQUAL (d
, dconstm1
))
2616 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2619 /* Optimize -x * -x as x * x. */
2620 if (FLOAT_MODE_P (mode
)
2621 && GET_CODE (op0
) == NEG
2622 && GET_CODE (op1
) == NEG
2623 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2624 && !side_effects_p (XEXP (op0
, 0)))
2625 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2627 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2628 if (SCALAR_FLOAT_MODE_P (mode
)
2629 && GET_CODE (op0
) == ABS
2630 && GET_CODE (op1
) == ABS
2631 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2632 && !side_effects_p (XEXP (op0
, 0)))
2633 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2635 /* Reassociate multiplication, but for floating point MULTs
2636 only when the user specifies unsafe math optimizations. */
2637 if (! FLOAT_MODE_P (mode
)
2638 || flag_unsafe_math_optimizations
)
2640 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2647 if (trueop1
== CONST0_RTX (mode
))
2649 if (INTEGRAL_MODE_P (mode
)
2650 && trueop1
== CONSTM1_RTX (mode
)
2651 && !side_effects_p (op0
))
2653 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2655 /* A | (~A) -> -1 */
2656 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2657 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2658 && ! side_effects_p (op0
)
2659 && SCALAR_INT_MODE_P (mode
))
2662 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2663 if (CONST_INT_P (op1
)
2664 && HWI_COMPUTABLE_MODE_P (mode
)
2665 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2666 && !side_effects_p (op0
))
2669 /* Canonicalize (X & C1) | C2. */
2670 if (GET_CODE (op0
) == AND
2671 && CONST_INT_P (trueop1
)
2672 && CONST_INT_P (XEXP (op0
, 1)))
2674 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2675 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2676 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2678 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2680 && !side_effects_p (XEXP (op0
, 0)))
2683 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2684 if (((c1
|c2
) & mask
) == mask
)
2685 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2687 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2688 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2690 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2691 gen_int_mode (c1
& ~c2
, mode
));
2692 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2696 /* Convert (A & B) | A to A. */
2697 if (GET_CODE (op0
) == AND
2698 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2699 || rtx_equal_p (XEXP (op0
, 1), op1
))
2700 && ! side_effects_p (XEXP (op0
, 0))
2701 && ! side_effects_p (XEXP (op0
, 1)))
2704 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2705 mode size to (rotate A CX). */
2707 if (GET_CODE (op1
) == ASHIFT
2708 || GET_CODE (op1
) == SUBREG
)
2719 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2720 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2721 && CONST_INT_P (XEXP (opleft
, 1))
2722 && CONST_INT_P (XEXP (opright
, 1))
2723 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2724 == GET_MODE_PRECISION (mode
)))
2725 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2727 /* Same, but for ashift that has been "simplified" to a wider mode
2728 by simplify_shift_const. */
2730 if (GET_CODE (opleft
) == SUBREG
2731 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2732 && GET_CODE (opright
) == LSHIFTRT
2733 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2734 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2735 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2736 && (GET_MODE_SIZE (GET_MODE (opleft
))
2737 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2738 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2739 SUBREG_REG (XEXP (opright
, 0)))
2740 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2741 && CONST_INT_P (XEXP (opright
, 1))
2742 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2743 == GET_MODE_PRECISION (mode
)))
2744 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2745 XEXP (SUBREG_REG (opleft
), 1));
2747 /* If we have (ior (and (X C1) C2)), simplify this by making
2748 C1 as small as possible if C1 actually changes. */
2749 if (CONST_INT_P (op1
)
2750 && (HWI_COMPUTABLE_MODE_P (mode
)
2751 || INTVAL (op1
) > 0)
2752 && GET_CODE (op0
) == AND
2753 && CONST_INT_P (XEXP (op0
, 1))
2754 && CONST_INT_P (op1
)
2755 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2756 return simplify_gen_binary (IOR
, mode
,
2758 (AND
, mode
, XEXP (op0
, 0),
2759 GEN_INT (UINTVAL (XEXP (op0
, 1))
2763 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2764 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2765 the PLUS does not affect any of the bits in OP1: then we can do
2766 the IOR as a PLUS and we can associate. This is valid if OP1
2767 can be safely shifted left C bits. */
2768 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2769 && GET_CODE (XEXP (op0
, 0)) == PLUS
2770 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2771 && CONST_INT_P (XEXP (op0
, 1))
2772 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2774 int count
= INTVAL (XEXP (op0
, 1));
2775 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2777 if (mask
>> count
== INTVAL (trueop1
)
2778 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2779 return simplify_gen_binary (ASHIFTRT
, mode
,
2780 plus_constant (mode
, XEXP (op0
, 0),
2785 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2791 if (trueop1
== CONST0_RTX (mode
))
2793 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2794 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2795 if (rtx_equal_p (trueop0
, trueop1
)
2796 && ! side_effects_p (op0
)
2797 && GET_MODE_CLASS (mode
) != MODE_CC
)
2798 return CONST0_RTX (mode
);
2800 /* Canonicalize XOR of the most significant bit to PLUS. */
2801 if (CONST_SCALAR_INT_P (op1
)
2802 && mode_signbit_p (mode
, op1
))
2803 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2804 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2805 if (CONST_SCALAR_INT_P (op1
)
2806 && GET_CODE (op0
) == PLUS
2807 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2808 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2809 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2810 simplify_gen_binary (XOR
, mode
, op1
,
2813 /* If we are XORing two things that have no bits in common,
2814 convert them into an IOR. This helps to detect rotation encoded
2815 using those methods and possibly other simplifications. */
2817 if (HWI_COMPUTABLE_MODE_P (mode
)
2818 && (nonzero_bits (op0
, mode
)
2819 & nonzero_bits (op1
, mode
)) == 0)
2820 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2822 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2823 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2826 int num_negated
= 0;
2828 if (GET_CODE (op0
) == NOT
)
2829 num_negated
++, op0
= XEXP (op0
, 0);
2830 if (GET_CODE (op1
) == NOT
)
2831 num_negated
++, op1
= XEXP (op1
, 0);
2833 if (num_negated
== 2)
2834 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2835 else if (num_negated
== 1)
2836 return simplify_gen_unary (NOT
, mode
,
2837 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2841 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2842 correspond to a machine insn or result in further simplifications
2843 if B is a constant. */
2845 if (GET_CODE (op0
) == AND
2846 && rtx_equal_p (XEXP (op0
, 1), op1
)
2847 && ! side_effects_p (op1
))
2848 return simplify_gen_binary (AND
, mode
,
2849 simplify_gen_unary (NOT
, mode
,
2850 XEXP (op0
, 0), mode
),
2853 else if (GET_CODE (op0
) == AND
2854 && rtx_equal_p (XEXP (op0
, 0), op1
)
2855 && ! side_effects_p (op1
))
2856 return simplify_gen_binary (AND
, mode
,
2857 simplify_gen_unary (NOT
, mode
,
2858 XEXP (op0
, 1), mode
),
2861 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2862 we can transform like this:
2863 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2864 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2865 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2866 Attempt a few simplifications when B and C are both constants. */
2867 if (GET_CODE (op0
) == AND
2868 && CONST_INT_P (op1
)
2869 && CONST_INT_P (XEXP (op0
, 1)))
2871 rtx a
= XEXP (op0
, 0);
2872 rtx b
= XEXP (op0
, 1);
2874 HOST_WIDE_INT bval
= INTVAL (b
);
2875 HOST_WIDE_INT cval
= INTVAL (c
);
2878 = simplify_binary_operation (AND
, mode
,
2879 simplify_gen_unary (NOT
, mode
, a
, mode
),
2881 if ((~cval
& bval
) == 0)
2883 /* Try to simplify ~A&C | ~B&C. */
2884 if (na_c
!= NULL_RTX
)
2885 return simplify_gen_binary (IOR
, mode
, na_c
,
2886 GEN_INT (~bval
& cval
));
2890 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2891 if (na_c
== const0_rtx
)
2893 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2894 GEN_INT (~cval
& bval
));
2895 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2896 GEN_INT (~bval
& cval
));
2901 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2902 comparison if STORE_FLAG_VALUE is 1. */
2903 if (STORE_FLAG_VALUE
== 1
2904 && trueop1
== const1_rtx
2905 && COMPARISON_P (op0
)
2906 && (reversed
= reversed_comparison (op0
, mode
)))
2909 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2910 is (lt foo (const_int 0)), so we can perform the above
2911 simplification if STORE_FLAG_VALUE is 1. */
2913 if (STORE_FLAG_VALUE
== 1
2914 && trueop1
== const1_rtx
2915 && GET_CODE (op0
) == LSHIFTRT
2916 && CONST_INT_P (XEXP (op0
, 1))
2917 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2918 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2920 /* (xor (comparison foo bar) (const_int sign-bit))
2921 when STORE_FLAG_VALUE is the sign bit. */
2922 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2923 && trueop1
== const_true_rtx
2924 && COMPARISON_P (op0
)
2925 && (reversed
= reversed_comparison (op0
, mode
)))
2928 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2934 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2936 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2938 if (HWI_COMPUTABLE_MODE_P (mode
))
2940 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2941 HOST_WIDE_INT nzop1
;
2942 if (CONST_INT_P (trueop1
))
2944 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2945 /* If we are turning off bits already known off in OP0, we need
2947 if ((nzop0
& ~val1
) == 0)
2950 nzop1
= nonzero_bits (trueop1
, mode
);
2951 /* If we are clearing all the nonzero bits, the result is zero. */
2952 if ((nzop1
& nzop0
) == 0
2953 && !side_effects_p (op0
) && !side_effects_p (op1
))
2954 return CONST0_RTX (mode
);
2956 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2957 && GET_MODE_CLASS (mode
) != MODE_CC
)
2960 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2961 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2962 && ! side_effects_p (op0
)
2963 && GET_MODE_CLASS (mode
) != MODE_CC
)
2964 return CONST0_RTX (mode
);
2966 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2967 there are no nonzero bits of C outside of X's mode. */
2968 if ((GET_CODE (op0
) == SIGN_EXTEND
2969 || GET_CODE (op0
) == ZERO_EXTEND
)
2970 && CONST_INT_P (trueop1
)
2971 && HWI_COMPUTABLE_MODE_P (mode
)
2972 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2973 & UINTVAL (trueop1
)) == 0)
2975 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2976 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2977 gen_int_mode (INTVAL (trueop1
),
2979 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2982 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2983 we might be able to further simplify the AND with X and potentially
2984 remove the truncation altogether. */
2985 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2987 rtx x
= XEXP (op0
, 0);
2988 enum machine_mode xmode
= GET_MODE (x
);
2989 tem
= simplify_gen_binary (AND
, xmode
, x
,
2990 gen_int_mode (INTVAL (trueop1
), xmode
));
2991 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2994 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2995 if (GET_CODE (op0
) == IOR
2996 && CONST_INT_P (trueop1
)
2997 && CONST_INT_P (XEXP (op0
, 1)))
2999 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3000 return simplify_gen_binary (IOR
, mode
,
3001 simplify_gen_binary (AND
, mode
,
3002 XEXP (op0
, 0), op1
),
3003 gen_int_mode (tmp
, mode
));
3006 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3007 insn (and may simplify more). */
3008 if (GET_CODE (op0
) == XOR
3009 && rtx_equal_p (XEXP (op0
, 0), op1
)
3010 && ! side_effects_p (op1
))
3011 return simplify_gen_binary (AND
, mode
,
3012 simplify_gen_unary (NOT
, mode
,
3013 XEXP (op0
, 1), mode
),
3016 if (GET_CODE (op0
) == XOR
3017 && rtx_equal_p (XEXP (op0
, 1), op1
)
3018 && ! side_effects_p (op1
))
3019 return simplify_gen_binary (AND
, mode
,
3020 simplify_gen_unary (NOT
, mode
,
3021 XEXP (op0
, 0), mode
),
3024 /* Similarly for (~(A ^ B)) & A. */
3025 if (GET_CODE (op0
) == NOT
3026 && GET_CODE (XEXP (op0
, 0)) == XOR
3027 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3028 && ! side_effects_p (op1
))
3029 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3031 if (GET_CODE (op0
) == NOT
3032 && GET_CODE (XEXP (op0
, 0)) == XOR
3033 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3034 && ! side_effects_p (op1
))
3035 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3037 /* Convert (A | B) & A to A. */
3038 if (GET_CODE (op0
) == IOR
3039 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3040 || rtx_equal_p (XEXP (op0
, 1), op1
))
3041 && ! side_effects_p (XEXP (op0
, 0))
3042 && ! side_effects_p (XEXP (op0
, 1)))
3045 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3046 ((A & N) + B) & M -> (A + B) & M
3047 Similarly if (N & M) == 0,
3048 ((A | N) + B) & M -> (A + B) & M
3049 and for - instead of + and/or ^ instead of |.
3050 Also, if (N & M) == 0, then
3051 (A +- N) & M -> A & M. */
3052 if (CONST_INT_P (trueop1
)
3053 && HWI_COMPUTABLE_MODE_P (mode
)
3054 && ~UINTVAL (trueop1
)
3055 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3056 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3061 pmop
[0] = XEXP (op0
, 0);
3062 pmop
[1] = XEXP (op0
, 1);
3064 if (CONST_INT_P (pmop
[1])
3065 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3066 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3068 for (which
= 0; which
< 2; which
++)
3071 switch (GET_CODE (tem
))
3074 if (CONST_INT_P (XEXP (tem
, 1))
3075 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3076 == UINTVAL (trueop1
))
3077 pmop
[which
] = XEXP (tem
, 0);
3081 if (CONST_INT_P (XEXP (tem
, 1))
3082 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3083 pmop
[which
] = XEXP (tem
, 0);
3090 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3092 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3094 return simplify_gen_binary (code
, mode
, tem
, op1
);
3098 /* (and X (ior (not X) Y) -> (and X Y) */
3099 if (GET_CODE (op1
) == IOR
3100 && GET_CODE (XEXP (op1
, 0)) == NOT
3101 && op0
== XEXP (XEXP (op1
, 0), 0))
3102 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3104 /* (and (ior (not X) Y) X) -> (and X Y) */
3105 if (GET_CODE (op0
) == IOR
3106 && GET_CODE (XEXP (op0
, 0)) == NOT
3107 && op1
== XEXP (XEXP (op0
, 0), 0))
3108 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3110 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3116 /* 0/x is 0 (or x&0 if x has side-effects). */
3117 if (trueop0
== CONST0_RTX (mode
))
3119 if (side_effects_p (op1
))
3120 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3124 if (trueop1
== CONST1_RTX (mode
))
3126 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3130 /* Convert divide by power of two into shift. */
3131 if (CONST_INT_P (trueop1
)
3132 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3133 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3137 /* Handle floating point and integers separately. */
3138 if (SCALAR_FLOAT_MODE_P (mode
))
3140 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3141 safe for modes with NaNs, since 0.0 / 0.0 will then be
3142 NaN rather than 0.0. Nor is it safe for modes with signed
3143 zeros, since dividing 0 by a negative number gives -0.0 */
3144 if (trueop0
== CONST0_RTX (mode
)
3145 && !HONOR_NANS (mode
)
3146 && !HONOR_SIGNED_ZEROS (mode
)
3147 && ! side_effects_p (op1
))
3150 if (trueop1
== CONST1_RTX (mode
)
3151 && !HONOR_SNANS (mode
))
3154 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3155 && trueop1
!= CONST0_RTX (mode
))
3158 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
3161 if (REAL_VALUES_EQUAL (d
, dconstm1
)
3162 && !HONOR_SNANS (mode
))
3163 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3165 /* Change FP division by a constant into multiplication.
3166 Only do this with -freciprocal-math. */
3167 if (flag_reciprocal_math
3168 && !REAL_VALUES_EQUAL (d
, dconst0
))
3170 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3171 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3172 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3176 else if (SCALAR_INT_MODE_P (mode
))
3178 /* 0/x is 0 (or x&0 if x has side-effects). */
3179 if (trueop0
== CONST0_RTX (mode
)
3180 && !cfun
->can_throw_non_call_exceptions
)
3182 if (side_effects_p (op1
))
3183 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3187 if (trueop1
== CONST1_RTX (mode
))
3189 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3194 if (trueop1
== constm1_rtx
)
3196 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3198 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3204 /* 0%x is 0 (or x&0 if x has side-effects). */
3205 if (trueop0
== CONST0_RTX (mode
))
3207 if (side_effects_p (op1
))
3208 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3211 /* x%1 is 0 (of x&0 if x has side-effects). */
3212 if (trueop1
== CONST1_RTX (mode
))
3214 if (side_effects_p (op0
))
3215 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3216 return CONST0_RTX (mode
);
3218 /* Implement modulus by power of two as AND. */
3219 if (CONST_INT_P (trueop1
)
3220 && exact_log2 (UINTVAL (trueop1
)) > 0)
3221 return simplify_gen_binary (AND
, mode
, op0
,
3222 GEN_INT (INTVAL (op1
) - 1));
3226 /* 0%x is 0 (or x&0 if x has side-effects). */
3227 if (trueop0
== CONST0_RTX (mode
))
3229 if (side_effects_p (op1
))
3230 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3233 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3234 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3236 if (side_effects_p (op0
))
3237 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3238 return CONST0_RTX (mode
);
3245 if (trueop1
== CONST0_RTX (mode
))
3247 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3249 /* Rotating ~0 always results in ~0. */
3250 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3251 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3252 && ! side_effects_p (op1
))
3255 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3257 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
3258 if (val
!= INTVAL (op1
))
3259 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3266 if (trueop1
== CONST0_RTX (mode
))
3268 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3270 goto canonicalize_shift
;
3273 if (trueop1
== CONST0_RTX (mode
))
3275 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3277 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3278 if (GET_CODE (op0
) == CLZ
3279 && CONST_INT_P (trueop1
)
3280 && STORE_FLAG_VALUE
== 1
3281 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3283 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3284 unsigned HOST_WIDE_INT zero_val
= 0;
3286 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3287 && zero_val
== GET_MODE_PRECISION (imode
)
3288 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3289 return simplify_gen_relational (EQ
, mode
, imode
,
3290 XEXP (op0
, 0), const0_rtx
);
3292 goto canonicalize_shift
;
3295 if (width
<= HOST_BITS_PER_WIDE_INT
3296 && mode_signbit_p (mode
, trueop1
)
3297 && ! side_effects_p (op0
))
3299 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3301 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3307 if (width
<= HOST_BITS_PER_WIDE_INT
3308 && CONST_INT_P (trueop1
)
3309 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3310 && ! side_effects_p (op0
))
3312 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3314 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3320 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3322 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3324 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3330 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3332 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3334 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3347 /* ??? There are simplifications that can be done. */
3351 if (!VECTOR_MODE_P (mode
))
3353 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3354 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3355 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3356 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3357 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3359 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3360 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3363 /* Extract a scalar element from a nested VEC_SELECT expression
3364 (with optional nested VEC_CONCAT expression). Some targets
3365 (i386) extract scalar element from a vector using chain of
3366 nested VEC_SELECT expressions. When input operand is a memory
3367 operand, this operation can be simplified to a simple scalar
3368 load from an offseted memory address. */
3369 if (GET_CODE (trueop0
) == VEC_SELECT
)
3371 rtx op0
= XEXP (trueop0
, 0);
3372 rtx op1
= XEXP (trueop0
, 1);
3374 enum machine_mode opmode
= GET_MODE (op0
);
3375 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3376 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3378 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3384 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3385 gcc_assert (i
< n_elts
);
3387 /* Select element, pointed by nested selector. */
3388 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3390 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3391 if (GET_CODE (op0
) == VEC_CONCAT
)
3393 rtx op00
= XEXP (op0
, 0);
3394 rtx op01
= XEXP (op0
, 1);
3396 enum machine_mode mode00
, mode01
;
3397 int n_elts00
, n_elts01
;
3399 mode00
= GET_MODE (op00
);
3400 mode01
= GET_MODE (op01
);
3402 /* Find out number of elements of each operand. */
3403 if (VECTOR_MODE_P (mode00
))
3405 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3406 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3411 if (VECTOR_MODE_P (mode01
))
3413 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3414 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3419 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3421 /* Select correct operand of VEC_CONCAT
3422 and adjust selector. */
3423 if (elem
< n_elts01
)
3434 vec
= rtvec_alloc (1);
3435 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3437 tmp
= gen_rtx_fmt_ee (code
, mode
,
3438 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3441 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3442 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3443 return XEXP (trueop0
, 0);
3447 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3448 gcc_assert (GET_MODE_INNER (mode
)
3449 == GET_MODE_INNER (GET_MODE (trueop0
)));
3450 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3452 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3454 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3455 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3456 rtvec v
= rtvec_alloc (n_elts
);
3459 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3460 for (i
= 0; i
< n_elts
; i
++)
3462 rtx x
= XVECEXP (trueop1
, 0, i
);
3464 gcc_assert (CONST_INT_P (x
));
3465 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3469 return gen_rtx_CONST_VECTOR (mode
, v
);
3472 /* Recognize the identity. */
3473 if (GET_MODE (trueop0
) == mode
)
3475 bool maybe_ident
= true;
3476 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3478 rtx j
= XVECEXP (trueop1
, 0, i
);
3479 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3481 maybe_ident
= false;
3489 /* If we build {a,b} then permute it, build the result directly. */
3490 if (XVECLEN (trueop1
, 0) == 2
3491 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3492 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3493 && GET_CODE (trueop0
) == VEC_CONCAT
3494 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3495 && GET_MODE (XEXP (trueop0
, 0)) == mode
3496 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3497 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3499 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3500 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3503 gcc_assert (i0
< 4 && i1
< 4);
3504 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3505 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3507 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3510 if (XVECLEN (trueop1
, 0) == 2
3511 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3512 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3513 && GET_CODE (trueop0
) == VEC_CONCAT
3514 && GET_MODE (trueop0
) == mode
)
3516 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3517 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3520 gcc_assert (i0
< 2 && i1
< 2);
3521 subop0
= XEXP (trueop0
, i0
);
3522 subop1
= XEXP (trueop0
, i1
);
3524 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3528 if (XVECLEN (trueop1
, 0) == 1
3529 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3530 && GET_CODE (trueop0
) == VEC_CONCAT
)
3533 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3535 /* Try to find the element in the VEC_CONCAT. */
3536 while (GET_MODE (vec
) != mode
3537 && GET_CODE (vec
) == VEC_CONCAT
)
3539 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3540 if (offset
< vec_size
)
3541 vec
= XEXP (vec
, 0);
3545 vec
= XEXP (vec
, 1);
3547 vec
= avoid_constant_pool_reference (vec
);
3550 if (GET_MODE (vec
) == mode
)
3557 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3558 ? GET_MODE (trueop0
)
3559 : GET_MODE_INNER (mode
));
3560 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3561 ? GET_MODE (trueop1
)
3562 : GET_MODE_INNER (mode
));
3564 gcc_assert (VECTOR_MODE_P (mode
));
3565 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3566 == GET_MODE_SIZE (mode
));
3568 if (VECTOR_MODE_P (op0_mode
))
3569 gcc_assert (GET_MODE_INNER (mode
)
3570 == GET_MODE_INNER (op0_mode
));
3572 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3574 if (VECTOR_MODE_P (op1_mode
))
3575 gcc_assert (GET_MODE_INNER (mode
)
3576 == GET_MODE_INNER (op1_mode
));
3578 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3580 if ((GET_CODE (trueop0
) == CONST_VECTOR
3581 || CONST_SCALAR_INT_P (trueop0
)
3582 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3583 && (GET_CODE (trueop1
) == CONST_VECTOR
3584 || CONST_SCALAR_INT_P (trueop1
)
3585 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3587 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3588 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3589 rtvec v
= rtvec_alloc (n_elts
);
3591 unsigned in_n_elts
= 1;
3593 if (VECTOR_MODE_P (op0_mode
))
3594 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3595 for (i
= 0; i
< n_elts
; i
++)
3599 if (!VECTOR_MODE_P (op0_mode
))
3600 RTVEC_ELT (v
, i
) = trueop0
;
3602 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3606 if (!VECTOR_MODE_P (op1_mode
))
3607 RTVEC_ELT (v
, i
) = trueop1
;
3609 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3614 return gen_rtx_CONST_VECTOR (mode
, v
);
3617 /* Try to merge VEC_SELECTs from the same vector into a single one. */
3618 if (GET_CODE (trueop0
) == VEC_SELECT
3619 && GET_CODE (trueop1
) == VEC_SELECT
3620 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0)))
3622 rtx par0
= XEXP (trueop0
, 1);
3623 rtx par1
= XEXP (trueop1
, 1);
3624 int len0
= XVECLEN (par0
, 0);
3625 int len1
= XVECLEN (par1
, 0);
3626 rtvec vec
= rtvec_alloc (len0
+ len1
);
3627 for (int i
= 0; i
< len0
; i
++)
3628 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3629 for (int i
= 0; i
< len1
; i
++)
3630 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3631 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3632 gen_rtx_PARALLEL (VOIDmode
, vec
));
3645 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3648 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3650 unsigned int width
= GET_MODE_PRECISION (mode
);
3652 if (VECTOR_MODE_P (mode
)
3653 && code
!= VEC_CONCAT
3654 && GET_CODE (op0
) == CONST_VECTOR
3655 && GET_CODE (op1
) == CONST_VECTOR
)
3657 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3658 enum machine_mode op0mode
= GET_MODE (op0
);
3659 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3660 enum machine_mode op1mode
= GET_MODE (op1
);
3661 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3662 rtvec v
= rtvec_alloc (n_elts
);
3665 gcc_assert (op0_n_elts
== n_elts
);
3666 gcc_assert (op1_n_elts
== n_elts
);
3667 for (i
= 0; i
< n_elts
; i
++)
3669 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3670 CONST_VECTOR_ELT (op0
, i
),
3671 CONST_VECTOR_ELT (op1
, i
));
3674 RTVEC_ELT (v
, i
) = x
;
3677 return gen_rtx_CONST_VECTOR (mode
, v
);
3680 if (VECTOR_MODE_P (mode
)
3681 && code
== VEC_CONCAT
3682 && (CONST_SCALAR_INT_P (op0
)
3683 || GET_CODE (op0
) == CONST_FIXED
3684 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3685 && (CONST_SCALAR_INT_P (op1
)
3686 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3687 || GET_CODE (op1
) == CONST_FIXED
))
3689 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3690 rtvec v
= rtvec_alloc (n_elts
);
3692 gcc_assert (n_elts
>= 2);
3695 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3696 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3698 RTVEC_ELT (v
, 0) = op0
;
3699 RTVEC_ELT (v
, 1) = op1
;
3703 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3704 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3707 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3708 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3709 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3711 for (i
= 0; i
< op0_n_elts
; ++i
)
3712 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3713 for (i
= 0; i
< op1_n_elts
; ++i
)
3714 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3717 return gen_rtx_CONST_VECTOR (mode
, v
);
3720 if (SCALAR_FLOAT_MODE_P (mode
)
3721 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3722 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3723 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3734 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3736 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3738 for (i
= 0; i
< 4; i
++)
3755 real_from_target (&r
, tmp0
, mode
);
3756 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3760 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3763 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3764 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3765 real_convert (&f0
, mode
, &f0
);
3766 real_convert (&f1
, mode
, &f1
);
3768 if (HONOR_SNANS (mode
)
3769 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3773 && REAL_VALUES_EQUAL (f1
, dconst0
)
3774 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3777 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3778 && flag_trapping_math
3779 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3781 int s0
= REAL_VALUE_NEGATIVE (f0
);
3782 int s1
= REAL_VALUE_NEGATIVE (f1
);
3787 /* Inf + -Inf = NaN plus exception. */
3792 /* Inf - Inf = NaN plus exception. */
3797 /* Inf / Inf = NaN plus exception. */
3804 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3805 && flag_trapping_math
3806 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3807 || (REAL_VALUE_ISINF (f1
)
3808 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3809 /* Inf * 0 = NaN plus exception. */
3812 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3814 real_convert (&result
, mode
, &value
);
3816 /* Don't constant fold this floating point operation if
3817 the result has overflowed and flag_trapping_math. */
3819 if (flag_trapping_math
3820 && MODE_HAS_INFINITIES (mode
)
3821 && REAL_VALUE_ISINF (result
)
3822 && !REAL_VALUE_ISINF (f0
)
3823 && !REAL_VALUE_ISINF (f1
))
3824 /* Overflow plus exception. */
3827 /* Don't constant fold this floating point operation if the
3828 result may dependent upon the run-time rounding mode and
3829 flag_rounding_math is set, or if GCC's software emulation
3830 is unable to accurately represent the result. */
3832 if ((flag_rounding_math
3833 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3834 && (inexact
|| !real_identical (&result
, &value
)))
3837 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3841 /* We can fold some multi-word operations. */
3842 if (GET_MODE_CLASS (mode
) == MODE_INT
3843 && width
== HOST_BITS_PER_DOUBLE_INT
3844 && (CONST_DOUBLE_AS_INT_P (op0
) || CONST_INT_P (op0
))
3845 && (CONST_DOUBLE_AS_INT_P (op1
) || CONST_INT_P (op1
)))
3847 double_int o0
, o1
, res
, tmp
;
3850 o0
= rtx_to_double_int (op0
);
3851 o1
= rtx_to_double_int (op1
);
3856 /* A - B == A + (-B). */
3859 /* Fall through.... */
3870 res
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
3877 tmp
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
3884 res
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
3891 tmp
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
3925 case LSHIFTRT
: case ASHIFTRT
:
3927 case ROTATE
: case ROTATERT
:
3929 unsigned HOST_WIDE_INT cnt
;
3931 if (SHIFT_COUNT_TRUNCATED
)
3934 o1
.low
&= GET_MODE_PRECISION (mode
) - 1;
3937 if (!o1
.fits_uhwi ()
3938 || o1
.to_uhwi () >= GET_MODE_PRECISION (mode
))
3941 cnt
= o1
.to_uhwi ();
3942 unsigned short prec
= GET_MODE_PRECISION (mode
);
3944 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3945 res
= o0
.rshift (cnt
, prec
, code
== ASHIFTRT
);
3946 else if (code
== ASHIFT
)
3947 res
= o0
.alshift (cnt
, prec
);
3948 else if (code
== ROTATE
)
3949 res
= o0
.lrotate (cnt
, prec
);
3950 else /* code == ROTATERT */
3951 res
= o0
.rrotate (cnt
, prec
);
3959 return immed_double_int_const (res
, mode
);
3962 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
3963 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3965 /* Get the integer argument values in two forms:
3966 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3968 arg0
= INTVAL (op0
);
3969 arg1
= INTVAL (op1
);
3971 if (width
< HOST_BITS_PER_WIDE_INT
)
3973 arg0
&= GET_MODE_MASK (mode
);
3974 arg1
&= GET_MODE_MASK (mode
);
3977 if (val_signbit_known_set_p (mode
, arg0s
))
3978 arg0s
|= ~GET_MODE_MASK (mode
);
3981 if (val_signbit_known_set_p (mode
, arg1s
))
3982 arg1s
|= ~GET_MODE_MASK (mode
);
3990 /* Compute the value of the arithmetic. */
3995 val
= arg0s
+ arg1s
;
3999 val
= arg0s
- arg1s
;
4003 val
= arg0s
* arg1s
;
4008 || ((unsigned HOST_WIDE_INT
) arg0s
4009 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4012 val
= arg0s
/ arg1s
;
4017 || ((unsigned HOST_WIDE_INT
) arg0s
4018 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4021 val
= arg0s
% arg1s
;
4026 || ((unsigned HOST_WIDE_INT
) arg0s
4027 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4030 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
4035 || ((unsigned HOST_WIDE_INT
) arg0s
4036 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4039 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
4057 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4058 the value is in range. We can't return any old value for
4059 out-of-range arguments because either the middle-end (via
4060 shift_truncation_mask) or the back-end might be relying on
4061 target-specific knowledge. Nor can we rely on
4062 shift_truncation_mask, since the shift might not be part of an
4063 ashlM3, lshrM3 or ashrM3 instruction. */
4064 if (SHIFT_COUNT_TRUNCATED
)
4065 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
4066 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
4069 val
= (code
== ASHIFT
4070 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
4071 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
4073 /* Sign-extend the result for arithmetic right shifts. */
4074 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
4075 val
|= ((unsigned HOST_WIDE_INT
) (-1)) << (width
- arg1
);
4083 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
4084 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
4092 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
4093 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
4097 /* Do nothing here. */
4101 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
4105 val
= ((unsigned HOST_WIDE_INT
) arg0
4106 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
4110 val
= arg0s
> arg1s
? arg0s
: arg1s
;
4114 val
= ((unsigned HOST_WIDE_INT
) arg0
4115 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
4128 /* ??? There are simplifications that can be done. */
4135 return gen_int_mode (val
, mode
);
4143 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4146 Rather than test for specific case, we do this by a brute-force method
4147 and do all possible simplifications until no more changes occur. Then
4148 we rebuild the operation. */
4150 struct simplify_plus_minus_op_data
4157 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4161 result
= (commutative_operand_precedence (y
)
4162 - commutative_operand_precedence (x
));
4166 /* Group together equal REGs to do more simplification. */
4167 if (REG_P (x
) && REG_P (y
))
4168 return REGNO (x
) > REGNO (y
);
4174 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
4177 struct simplify_plus_minus_op_data ops
[8];
4179 int n_ops
= 2, input_ops
= 2;
4180 int changed
, n_constants
= 0, canonicalized
= 0;
4183 memset (ops
, 0, sizeof ops
);
4185 /* Set up the two operands and then expand them until nothing has been
4186 changed. If we run out of room in our array, give up; this should
4187 almost never happen. */
4192 ops
[1].neg
= (code
== MINUS
);
4198 for (i
= 0; i
< n_ops
; i
++)
4200 rtx this_op
= ops
[i
].op
;
4201 int this_neg
= ops
[i
].neg
;
4202 enum rtx_code this_code
= GET_CODE (this_op
);
4211 ops
[n_ops
].op
= XEXP (this_op
, 1);
4212 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4215 ops
[i
].op
= XEXP (this_op
, 0);
4218 canonicalized
|= this_neg
;
4222 ops
[i
].op
= XEXP (this_op
, 0);
4223 ops
[i
].neg
= ! this_neg
;
4230 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4231 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4232 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4234 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4235 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4236 ops
[n_ops
].neg
= this_neg
;
4244 /* ~a -> (-a - 1) */
4247 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4248 ops
[n_ops
++].neg
= this_neg
;
4249 ops
[i
].op
= XEXP (this_op
, 0);
4250 ops
[i
].neg
= !this_neg
;
4260 ops
[i
].op
= neg_const_int (mode
, this_op
);
4274 if (n_constants
> 1)
4277 gcc_assert (n_ops
>= 2);
4279 /* If we only have two operands, we can avoid the loops. */
4282 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4285 /* Get the two operands. Be careful with the order, especially for
4286 the cases where code == MINUS. */
4287 if (ops
[0].neg
&& ops
[1].neg
)
4289 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4292 else if (ops
[0].neg
)
4303 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4306 /* Now simplify each pair of operands until nothing changes. */
4309 /* Insertion sort is good enough for an eight-element array. */
4310 for (i
= 1; i
< n_ops
; i
++)
4312 struct simplify_plus_minus_op_data save
;
4314 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4320 ops
[j
+ 1] = ops
[j
];
4321 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4326 for (i
= n_ops
- 1; i
> 0; i
--)
4327 for (j
= i
- 1; j
>= 0; j
--)
4329 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4330 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4332 if (lhs
!= 0 && rhs
!= 0)
4334 enum rtx_code ncode
= PLUS
;
4340 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4342 else if (swap_commutative_operands_p (lhs
, rhs
))
4343 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4345 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4346 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4348 rtx tem_lhs
, tem_rhs
;
4350 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4351 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4352 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4354 if (tem
&& !CONSTANT_P (tem
))
4355 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4358 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4360 /* Reject "simplifications" that just wrap the two
4361 arguments in a CONST. Failure to do so can result
4362 in infinite recursion with simplify_binary_operation
4363 when it calls us to simplify CONST operations. */
4365 && ! (GET_CODE (tem
) == CONST
4366 && GET_CODE (XEXP (tem
, 0)) == ncode
4367 && XEXP (XEXP (tem
, 0), 0) == lhs
4368 && XEXP (XEXP (tem
, 0), 1) == rhs
))
4371 if (GET_CODE (tem
) == NEG
)
4372 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4373 if (CONST_INT_P (tem
) && lneg
)
4374 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4378 ops
[j
].op
= NULL_RTX
;
4385 /* If nothing changed, fail. */
4389 /* Pack all the operands to the lower-numbered entries. */
4390 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4400 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4402 && CONST_INT_P (ops
[1].op
)
4403 && CONSTANT_P (ops
[0].op
)
4405 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4407 /* We suppressed creation of trivial CONST expressions in the
4408 combination loop to avoid recursion. Create one manually now.
4409 The combination loop should have ensured that there is exactly
4410 one CONST_INT, and the sort will have ensured that it is last
4411 in the array and that any other constant will be next-to-last. */
4414 && CONST_INT_P (ops
[n_ops
- 1].op
)
4415 && CONSTANT_P (ops
[n_ops
- 2].op
))
4417 rtx value
= ops
[n_ops
- 1].op
;
4418 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4419 value
= neg_const_int (mode
, value
);
4420 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4425 /* Put a non-negated operand first, if possible. */
4427 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4430 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4439 /* Now make the result by performing the requested operations. */
4441 for (i
= 1; i
< n_ops
; i
++)
4442 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4443 mode
, result
, ops
[i
].op
);
4448 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4450 plus_minus_operand_p (const_rtx x
)
4452 return GET_CODE (x
) == PLUS
4453 || GET_CODE (x
) == MINUS
4454 || (GET_CODE (x
) == CONST
4455 && GET_CODE (XEXP (x
, 0)) == PLUS
4456 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4457 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4460 /* Like simplify_binary_operation except used for relational operators.
4461 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4462 not also be VOIDmode.
4464 CMP_MODE specifies in which mode the comparison is done in, so it is
4465 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4466 the operands or, if both are VOIDmode, the operands are compared in
4467 "infinite precision". */
4469 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4470 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4472 rtx tem
, trueop0
, trueop1
;
4474 if (cmp_mode
== VOIDmode
)
4475 cmp_mode
= GET_MODE (op0
);
4476 if (cmp_mode
== VOIDmode
)
4477 cmp_mode
= GET_MODE (op1
);
4479 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4482 if (SCALAR_FLOAT_MODE_P (mode
))
4484 if (tem
== const0_rtx
)
4485 return CONST0_RTX (mode
);
4486 #ifdef FLOAT_STORE_FLAG_VALUE
4488 REAL_VALUE_TYPE val
;
4489 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4490 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4496 if (VECTOR_MODE_P (mode
))
4498 if (tem
== const0_rtx
)
4499 return CONST0_RTX (mode
);
4500 #ifdef VECTOR_STORE_FLAG_VALUE
4505 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4506 if (val
== NULL_RTX
)
4508 if (val
== const1_rtx
)
4509 return CONST1_RTX (mode
);
4511 units
= GET_MODE_NUNITS (mode
);
4512 v
= rtvec_alloc (units
);
4513 for (i
= 0; i
< units
; i
++)
4514 RTVEC_ELT (v
, i
) = val
;
4515 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4525 /* For the following tests, ensure const0_rtx is op1. */
4526 if (swap_commutative_operands_p (op0
, op1
)
4527 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4528 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4530 /* If op0 is a compare, extract the comparison arguments from it. */
4531 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4532 return simplify_gen_relational (code
, mode
, VOIDmode
,
4533 XEXP (op0
, 0), XEXP (op0
, 1));
4535 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4539 trueop0
= avoid_constant_pool_reference (op0
);
4540 trueop1
= avoid_constant_pool_reference (op1
);
4541 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4545 /* This part of simplify_relational_operation is only used when CMP_MODE
4546 is not in class MODE_CC (i.e. it is a real comparison).
4548 MODE is the mode of the result, while CMP_MODE specifies in which
4549 mode the comparison is done in, so it is the mode of the operands. */
4552 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4553 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4555 enum rtx_code op0code
= GET_CODE (op0
);
4557 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4559 /* If op0 is a comparison, extract the comparison arguments
4563 if (GET_MODE (op0
) == mode
)
4564 return simplify_rtx (op0
);
4566 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4567 XEXP (op0
, 0), XEXP (op0
, 1));
4569 else if (code
== EQ
)
4571 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4572 if (new_code
!= UNKNOWN
)
4573 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4574 XEXP (op0
, 0), XEXP (op0
, 1));
4578 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4579 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4580 if ((code
== LTU
|| code
== GEU
)
4581 && GET_CODE (op0
) == PLUS
4582 && CONST_INT_P (XEXP (op0
, 1))
4583 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4584 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4585 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4586 && XEXP (op0
, 1) != const0_rtx
)
4589 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4590 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4591 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4594 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4595 if ((code
== LTU
|| code
== GEU
)
4596 && GET_CODE (op0
) == PLUS
4597 && rtx_equal_p (op1
, XEXP (op0
, 1))
4598 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4599 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4600 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4601 copy_rtx (XEXP (op0
, 0)));
4603 if (op1
== const0_rtx
)
4605 /* Canonicalize (GTU x 0) as (NE x 0). */
4607 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4608 /* Canonicalize (LEU x 0) as (EQ x 0). */
4610 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4612 else if (op1
== const1_rtx
)
4617 /* Canonicalize (GE x 1) as (GT x 0). */
4618 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4621 /* Canonicalize (GEU x 1) as (NE x 0). */
4622 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4625 /* Canonicalize (LT x 1) as (LE x 0). */
4626 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4629 /* Canonicalize (LTU x 1) as (EQ x 0). */
4630 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4636 else if (op1
== constm1_rtx
)
4638 /* Canonicalize (LE x -1) as (LT x 0). */
4640 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4641 /* Canonicalize (GT x -1) as (GE x 0). */
4643 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4646 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4647 if ((code
== EQ
|| code
== NE
)
4648 && (op0code
== PLUS
|| op0code
== MINUS
)
4650 && CONSTANT_P (XEXP (op0
, 1))
4651 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4653 rtx x
= XEXP (op0
, 0);
4654 rtx c
= XEXP (op0
, 1);
4655 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4656 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4658 /* Detect an infinite recursive condition, where we oscillate at this
4659 simplification case between:
4660 A + B == C <---> C - B == A,
4661 where A, B, and C are all constants with non-simplifiable expressions,
4662 usually SYMBOL_REFs. */
4663 if (GET_CODE (tem
) == invcode
4665 && rtx_equal_p (c
, XEXP (tem
, 1)))
4668 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4671 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4672 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4674 && op1
== const0_rtx
4675 && GET_MODE_CLASS (mode
) == MODE_INT
4676 && cmp_mode
!= VOIDmode
4677 /* ??? Work-around BImode bugs in the ia64 backend. */
4679 && cmp_mode
!= BImode
4680 && nonzero_bits (op0
, cmp_mode
) == 1
4681 && STORE_FLAG_VALUE
== 1)
4682 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4683 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4684 : lowpart_subreg (mode
, op0
, cmp_mode
);
4686 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4687 if ((code
== EQ
|| code
== NE
)
4688 && op1
== const0_rtx
4690 return simplify_gen_relational (code
, mode
, cmp_mode
,
4691 XEXP (op0
, 0), XEXP (op0
, 1));
4693 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4694 if ((code
== EQ
|| code
== NE
)
4696 && rtx_equal_p (XEXP (op0
, 0), op1
)
4697 && !side_effects_p (XEXP (op0
, 0)))
4698 return simplify_gen_relational (code
, mode
, cmp_mode
,
4699 XEXP (op0
, 1), const0_rtx
);
4701 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4702 if ((code
== EQ
|| code
== NE
)
4704 && rtx_equal_p (XEXP (op0
, 1), op1
)
4705 && !side_effects_p (XEXP (op0
, 1)))
4706 return simplify_gen_relational (code
, mode
, cmp_mode
,
4707 XEXP (op0
, 0), const0_rtx
);
4709 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4710 if ((code
== EQ
|| code
== NE
)
4712 && CONST_SCALAR_INT_P (op1
)
4713 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4714 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4715 simplify_gen_binary (XOR
, cmp_mode
,
4716 XEXP (op0
, 1), op1
));
4718 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4724 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4725 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4726 XEXP (op0
, 0), const0_rtx
);
4731 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4732 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4733 XEXP (op0
, 0), const0_rtx
);
4752 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4753 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4754 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4755 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4756 For floating-point comparisons, assume that the operands were ordered. */
4759 comparison_result (enum rtx_code code
, int known_results
)
4765 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4768 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4772 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4775 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4779 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4782 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4785 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4787 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4790 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4792 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4795 return const_true_rtx
;
4803 /* Check if the given comparison (done in the given MODE) is actually a
4804 tautology or a contradiction.
4805 If no simplification is possible, this function returns zero.
4806 Otherwise, it returns either const_true_rtx or const0_rtx. */
4809 simplify_const_relational_operation (enum rtx_code code
,
4810 enum machine_mode mode
,
4817 gcc_assert (mode
!= VOIDmode
4818 || (GET_MODE (op0
) == VOIDmode
4819 && GET_MODE (op1
) == VOIDmode
));
4821 /* If op0 is a compare, extract the comparison arguments from it. */
4822 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4824 op1
= XEXP (op0
, 1);
4825 op0
= XEXP (op0
, 0);
4827 if (GET_MODE (op0
) != VOIDmode
)
4828 mode
= GET_MODE (op0
);
4829 else if (GET_MODE (op1
) != VOIDmode
)
4830 mode
= GET_MODE (op1
);
4835 /* We can't simplify MODE_CC values since we don't know what the
4836 actual comparison is. */
4837 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4840 /* Make sure the constant is second. */
4841 if (swap_commutative_operands_p (op0
, op1
))
4843 tem
= op0
, op0
= op1
, op1
= tem
;
4844 code
= swap_condition (code
);
4847 trueop0
= avoid_constant_pool_reference (op0
);
4848 trueop1
= avoid_constant_pool_reference (op1
);
4850 /* For integer comparisons of A and B maybe we can simplify A - B and can
4851 then simplify a comparison of that with zero. If A and B are both either
4852 a register or a CONST_INT, this can't help; testing for these cases will
4853 prevent infinite recursion here and speed things up.
4855 We can only do this for EQ and NE comparisons as otherwise we may
4856 lose or introduce overflow which we cannot disregard as undefined as
4857 we do not know the signedness of the operation on either the left or
4858 the right hand side of the comparison. */
4860 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4861 && (code
== EQ
|| code
== NE
)
4862 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4863 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4864 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4865 /* We cannot do this if tem is a nonzero address. */
4866 && ! nonzero_address_p (tem
))
4867 return simplify_const_relational_operation (signed_condition (code
),
4868 mode
, tem
, const0_rtx
);
4870 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4871 return const_true_rtx
;
4873 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4876 /* For modes without NaNs, if the two operands are equal, we know the
4877 result except if they have side-effects. Even with NaNs we know
4878 the result of unordered comparisons and, if signaling NaNs are
4879 irrelevant, also the result of LT/GT/LTGT. */
4880 if ((! HONOR_NANS (GET_MODE (trueop0
))
4881 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4882 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4883 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4884 && rtx_equal_p (trueop0
, trueop1
)
4885 && ! side_effects_p (trueop0
))
4886 return comparison_result (code
, CMP_EQ
);
4888 /* If the operands are floating-point constants, see if we can fold
4890 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4891 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4892 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4894 REAL_VALUE_TYPE d0
, d1
;
4896 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4897 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4899 /* Comparisons are unordered iff at least one of the values is NaN. */
4900 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4910 return const_true_rtx
;
4923 return comparison_result (code
,
4924 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4925 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4928 /* Otherwise, see if the operands are both integers. */
4929 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4930 && (CONST_DOUBLE_AS_INT_P (trueop0
) || CONST_INT_P (trueop0
))
4931 && (CONST_DOUBLE_AS_INT_P (trueop1
) || CONST_INT_P (trueop1
)))
4933 int width
= GET_MODE_PRECISION (mode
);
4934 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4935 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4937 /* Get the two words comprising each integer constant. */
4938 if (CONST_DOUBLE_AS_INT_P (trueop0
))
4940 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4941 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4945 l0u
= l0s
= INTVAL (trueop0
);
4946 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4949 if (CONST_DOUBLE_AS_INT_P (trueop1
))
4951 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4952 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4956 l1u
= l1s
= INTVAL (trueop1
);
4957 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4960 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4961 we have to sign or zero-extend the values. */
4962 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4964 l0u
&= GET_MODE_MASK (mode
);
4965 l1u
&= GET_MODE_MASK (mode
);
4967 if (val_signbit_known_set_p (mode
, l0s
))
4968 l0s
|= ~GET_MODE_MASK (mode
);
4970 if (val_signbit_known_set_p (mode
, l1s
))
4971 l1s
|= ~GET_MODE_MASK (mode
);
4973 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4974 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4976 if (h0u
== h1u
&& l0u
== l1u
)
4977 return comparison_result (code
, CMP_EQ
);
4981 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4982 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4983 return comparison_result (code
, cr
);
4987 /* Optimize comparisons with upper and lower bounds. */
4988 if (HWI_COMPUTABLE_MODE_P (mode
)
4989 && CONST_INT_P (trueop1
))
4992 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4993 HOST_WIDE_INT val
= INTVAL (trueop1
);
4994 HOST_WIDE_INT mmin
, mmax
;
5004 /* Get a reduced range if the sign bit is zero. */
5005 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
5012 rtx mmin_rtx
, mmax_rtx
;
5013 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
5015 mmin
= INTVAL (mmin_rtx
);
5016 mmax
= INTVAL (mmax_rtx
);
5019 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
5021 mmin
>>= (sign_copies
- 1);
5022 mmax
>>= (sign_copies
- 1);
5028 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5030 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5031 return const_true_rtx
;
5032 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5037 return const_true_rtx
;
5042 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5044 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5045 return const_true_rtx
;
5046 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5051 return const_true_rtx
;
5057 /* x == y is always false for y out of range. */
5058 if (val
< mmin
|| val
> mmax
)
5062 /* x > y is always false for y >= mmax, always true for y < mmin. */
5064 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5066 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5067 return const_true_rtx
;
5073 return const_true_rtx
;
5076 /* x < y is always false for y <= mmin, always true for y > mmax. */
5078 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5080 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5081 return const_true_rtx
;
5087 return const_true_rtx
;
5091 /* x != y is always true for y out of range. */
5092 if (val
< mmin
|| val
> mmax
)
5093 return const_true_rtx
;
5101 /* Optimize integer comparisons with zero. */
5102 if (trueop1
== const0_rtx
)
5104 /* Some addresses are known to be nonzero. We don't know
5105 their sign, but equality comparisons are known. */
5106 if (nonzero_address_p (trueop0
))
5108 if (code
== EQ
|| code
== LEU
)
5110 if (code
== NE
|| code
== GTU
)
5111 return const_true_rtx
;
5114 /* See if the first operand is an IOR with a constant. If so, we
5115 may be able to determine the result of this comparison. */
5116 if (GET_CODE (op0
) == IOR
)
5118 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5119 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5121 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5122 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5123 && (UINTVAL (inner_const
)
5124 & ((unsigned HOST_WIDE_INT
) 1
5134 return const_true_rtx
;
5138 return const_true_rtx
;
5152 /* Optimize comparison of ABS with zero. */
5153 if (trueop1
== CONST0_RTX (mode
)
5154 && (GET_CODE (trueop0
) == ABS
5155 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5156 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5161 /* Optimize abs(x) < 0.0. */
5162 if (!HONOR_SNANS (mode
)
5163 && (!INTEGRAL_MODE_P (mode
)
5164 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5166 if (INTEGRAL_MODE_P (mode
)
5167 && (issue_strict_overflow_warning
5168 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5169 warning (OPT_Wstrict_overflow
,
5170 ("assuming signed overflow does not occur when "
5171 "assuming abs (x) < 0 is false"));
5177 /* Optimize abs(x) >= 0.0. */
5178 if (!HONOR_NANS (mode
)
5179 && (!INTEGRAL_MODE_P (mode
)
5180 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5182 if (INTEGRAL_MODE_P (mode
)
5183 && (issue_strict_overflow_warning
5184 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5185 warning (OPT_Wstrict_overflow
,
5186 ("assuming signed overflow does not occur when "
5187 "assuming abs (x) >= 0 is true"));
5188 return const_true_rtx
;
5193 /* Optimize ! (abs(x) < 0.0). */
5194 return const_true_rtx
;
5204 /* Simplify CODE, an operation with result mode MODE and three operands,
5205 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5206 a constant. Return 0 if no simplifications is possible. */
5209 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
5210 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
5213 unsigned int width
= GET_MODE_PRECISION (mode
);
5214 bool any_change
= false;
5217 /* VOIDmode means "infinite" precision. */
5219 width
= HOST_BITS_PER_WIDE_INT
;
5224 /* Simplify negations around the multiplication. */
5225 /* -a * -b + c => a * b + c. */
5226 if (GET_CODE (op0
) == NEG
)
5228 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5230 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5232 else if (GET_CODE (op1
) == NEG
)
5234 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5236 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5239 /* Canonicalize the two multiplication operands. */
5240 /* a * -b + c => -b * a + c. */
5241 if (swap_commutative_operands_p (op0
, op1
))
5242 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
5245 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5250 if (CONST_INT_P (op0
)
5251 && CONST_INT_P (op1
)
5252 && CONST_INT_P (op2
)
5253 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5254 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5256 /* Extracting a bit-field from a constant */
5257 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5258 HOST_WIDE_INT op1val
= INTVAL (op1
);
5259 HOST_WIDE_INT op2val
= INTVAL (op2
);
5260 if (BITS_BIG_ENDIAN
)
5261 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5265 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5267 /* First zero-extend. */
5268 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5269 /* If desired, propagate sign bit. */
5270 if (code
== SIGN_EXTRACT
5271 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5273 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5276 return gen_int_mode (val
, mode
);
5281 if (CONST_INT_P (op0
))
5282 return op0
!= const0_rtx
? op1
: op2
;
5284 /* Convert c ? a : a into "a". */
5285 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5288 /* Convert a != b ? a : b into "a". */
5289 if (GET_CODE (op0
) == NE
5290 && ! side_effects_p (op0
)
5291 && ! HONOR_NANS (mode
)
5292 && ! HONOR_SIGNED_ZEROS (mode
)
5293 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5294 && rtx_equal_p (XEXP (op0
, 1), op2
))
5295 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5296 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5299 /* Convert a == b ? a : b into "b". */
5300 if (GET_CODE (op0
) == EQ
5301 && ! side_effects_p (op0
)
5302 && ! HONOR_NANS (mode
)
5303 && ! HONOR_SIGNED_ZEROS (mode
)
5304 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5305 && rtx_equal_p (XEXP (op0
, 1), op2
))
5306 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5307 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5310 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5312 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5313 ? GET_MODE (XEXP (op0
, 1))
5314 : GET_MODE (XEXP (op0
, 0)));
5317 /* Look for happy constants in op1 and op2. */
5318 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5320 HOST_WIDE_INT t
= INTVAL (op1
);
5321 HOST_WIDE_INT f
= INTVAL (op2
);
5323 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5324 code
= GET_CODE (op0
);
5325 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5328 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5336 return simplify_gen_relational (code
, mode
, cmp_mode
,
5337 XEXP (op0
, 0), XEXP (op0
, 1));
5340 if (cmp_mode
== VOIDmode
)
5341 cmp_mode
= op0_mode
;
5342 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5343 cmp_mode
, XEXP (op0
, 0),
5346 /* See if any simplifications were possible. */
5349 if (CONST_INT_P (temp
))
5350 return temp
== const0_rtx
? op2
: op1
;
5352 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5358 gcc_assert (GET_MODE (op0
) == mode
);
5359 gcc_assert (GET_MODE (op1
) == mode
);
5360 gcc_assert (VECTOR_MODE_P (mode
));
5361 op2
= avoid_constant_pool_reference (op2
);
5362 if (CONST_INT_P (op2
))
5364 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5365 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5366 int mask
= (1 << n_elts
) - 1;
5368 if (!(INTVAL (op2
) & mask
))
5370 if ((INTVAL (op2
) & mask
) == mask
)
5373 op0
= avoid_constant_pool_reference (op0
);
5374 op1
= avoid_constant_pool_reference (op1
);
5375 if (GET_CODE (op0
) == CONST_VECTOR
5376 && GET_CODE (op1
) == CONST_VECTOR
)
5378 rtvec v
= rtvec_alloc (n_elts
);
5381 for (i
= 0; i
< n_elts
; i
++)
5382 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
5383 ? CONST_VECTOR_ELT (op0
, i
)
5384 : CONST_VECTOR_ELT (op1
, i
));
5385 return gen_rtx_CONST_VECTOR (mode
, v
);
5397 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5399 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5401 Works by unpacking OP into a collection of 8-bit values
5402 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5403 and then repacking them again for OUTERMODE. */
5406 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5407 enum machine_mode innermode
, unsigned int byte
)
5409 /* We support up to 512-bit values (for V8DFmode). */
5413 value_mask
= (1 << value_bit
) - 1
5415 unsigned char value
[max_bitsize
/ value_bit
];
5424 rtvec result_v
= NULL
;
5425 enum mode_class outer_class
;
5426 enum machine_mode outer_submode
;
5428 /* Some ports misuse CCmode. */
5429 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5432 /* We have no way to represent a complex constant at the rtl level. */
5433 if (COMPLEX_MODE_P (outermode
))
5436 /* Unpack the value. */
5438 if (GET_CODE (op
) == CONST_VECTOR
)
5440 num_elem
= CONST_VECTOR_NUNITS (op
);
5441 elems
= &CONST_VECTOR_ELT (op
, 0);
5442 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5448 elem_bitsize
= max_bitsize
;
5450 /* If this asserts, it is too complicated; reducing value_bit may help. */
5451 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5452 /* I don't know how to handle endianness of sub-units. */
5453 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5455 for (elem
= 0; elem
< num_elem
; elem
++)
5458 rtx el
= elems
[elem
];
5460 /* Vectors are kept in target memory order. (This is probably
5463 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5464 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5466 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5467 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5468 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5469 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5470 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5473 switch (GET_CODE (el
))
5477 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5479 *vp
++ = INTVAL (el
) >> i
;
5480 /* CONST_INTs are always logically sign-extended. */
5481 for (; i
< elem_bitsize
; i
+= value_bit
)
5482 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5486 if (GET_MODE (el
) == VOIDmode
)
5488 unsigned char extend
= 0;
5489 /* If this triggers, someone should have generated a
5490 CONST_INT instead. */
5491 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5493 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5494 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5495 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5498 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5502 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5504 for (; i
< elem_bitsize
; i
+= value_bit
)
5509 long tmp
[max_bitsize
/ 32];
5510 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5512 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5513 gcc_assert (bitsize
<= elem_bitsize
);
5514 gcc_assert (bitsize
% value_bit
== 0);
5516 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5519 /* real_to_target produces its result in words affected by
5520 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5521 and use WORDS_BIG_ENDIAN instead; see the documentation
5522 of SUBREG in rtl.texi. */
5523 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5526 if (WORDS_BIG_ENDIAN
)
5527 ibase
= bitsize
- 1 - i
;
5530 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5533 /* It shouldn't matter what's done here, so fill it with
5535 for (; i
< elem_bitsize
; i
+= value_bit
)
5541 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5543 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5544 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5548 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5549 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5550 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5552 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5553 >> (i
- HOST_BITS_PER_WIDE_INT
);
5554 for (; i
< elem_bitsize
; i
+= value_bit
)
5564 /* Now, pick the right byte to start with. */
5565 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5566 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5567 will already have offset 0. */
5568 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5570 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5572 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5573 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5574 byte
= (subword_byte
% UNITS_PER_WORD
5575 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5578 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5579 so if it's become negative it will instead be very large.) */
5580 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5582 /* Convert from bytes to chunks of size value_bit. */
5583 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5585 /* Re-pack the value. */
5587 if (VECTOR_MODE_P (outermode
))
5589 num_elem
= GET_MODE_NUNITS (outermode
);
5590 result_v
= rtvec_alloc (num_elem
);
5591 elems
= &RTVEC_ELT (result_v
, 0);
5592 outer_submode
= GET_MODE_INNER (outermode
);
5598 outer_submode
= outermode
;
5601 outer_class
= GET_MODE_CLASS (outer_submode
);
5602 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5604 gcc_assert (elem_bitsize
% value_bit
== 0);
5605 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5607 for (elem
= 0; elem
< num_elem
; elem
++)
5611 /* Vectors are stored in target memory order. (This is probably
5614 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5615 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5617 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5618 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5619 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5620 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5621 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5624 switch (outer_class
)
5627 case MODE_PARTIAL_INT
:
5629 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5632 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5634 lo
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5635 for (; i
< elem_bitsize
; i
+= value_bit
)
5636 hi
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5637 << (i
- HOST_BITS_PER_WIDE_INT
);
5639 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5641 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5642 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5643 else if (elem_bitsize
<= HOST_BITS_PER_DOUBLE_INT
)
5644 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5651 case MODE_DECIMAL_FLOAT
:
5654 long tmp
[max_bitsize
/ 32];
5656 /* real_from_target wants its input in words affected by
5657 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5658 and use WORDS_BIG_ENDIAN instead; see the documentation
5659 of SUBREG in rtl.texi. */
5660 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5662 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5665 if (WORDS_BIG_ENDIAN
)
5666 ibase
= elem_bitsize
- 1 - i
;
5669 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5672 real_from_target (&r
, tmp
, outer_submode
);
5673 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5685 f
.mode
= outer_submode
;
5688 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5690 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5691 for (; i
< elem_bitsize
; i
+= value_bit
)
5692 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5693 << (i
- HOST_BITS_PER_WIDE_INT
));
5695 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5703 if (VECTOR_MODE_P (outermode
))
5704 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5709 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5710 Return 0 if no simplifications are possible. */
5712 simplify_subreg (enum machine_mode outermode
, rtx op
,
5713 enum machine_mode innermode
, unsigned int byte
)
5715 /* Little bit of sanity checking. */
5716 gcc_assert (innermode
!= VOIDmode
);
5717 gcc_assert (outermode
!= VOIDmode
);
5718 gcc_assert (innermode
!= BLKmode
);
5719 gcc_assert (outermode
!= BLKmode
);
5721 gcc_assert (GET_MODE (op
) == innermode
5722 || GET_MODE (op
) == VOIDmode
);
5724 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5727 if (byte
>= GET_MODE_SIZE (innermode
))
5730 if (outermode
== innermode
&& !byte
)
5733 if (CONST_SCALAR_INT_P (op
)
5734 || CONST_DOUBLE_AS_FLOAT_P (op
)
5735 || GET_CODE (op
) == CONST_FIXED
5736 || GET_CODE (op
) == CONST_VECTOR
)
5737 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5739 /* Changing mode twice with SUBREG => just change it once,
5740 or not at all if changing back op starting mode. */
5741 if (GET_CODE (op
) == SUBREG
)
5743 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5744 int final_offset
= byte
+ SUBREG_BYTE (op
);
5747 if (outermode
== innermostmode
5748 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5749 return SUBREG_REG (op
);
5751 /* The SUBREG_BYTE represents offset, as if the value were stored
5752 in memory. Irritating exception is paradoxical subreg, where
5753 we define SUBREG_BYTE to be 0. On big endian machines, this
5754 value should be negative. For a moment, undo this exception. */
5755 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5757 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5758 if (WORDS_BIG_ENDIAN
)
5759 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5760 if (BYTES_BIG_ENDIAN
)
5761 final_offset
+= difference
% UNITS_PER_WORD
;
5763 if (SUBREG_BYTE (op
) == 0
5764 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5766 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5767 if (WORDS_BIG_ENDIAN
)
5768 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5769 if (BYTES_BIG_ENDIAN
)
5770 final_offset
+= difference
% UNITS_PER_WORD
;
5773 /* See whether resulting subreg will be paradoxical. */
5774 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5776 /* In nonparadoxical subregs we can't handle negative offsets. */
5777 if (final_offset
< 0)
5779 /* Bail out in case resulting subreg would be incorrect. */
5780 if (final_offset
% GET_MODE_SIZE (outermode
)
5781 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5787 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5789 /* In paradoxical subreg, see if we are still looking on lower part.
5790 If so, our SUBREG_BYTE will be 0. */
5791 if (WORDS_BIG_ENDIAN
)
5792 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5793 if (BYTES_BIG_ENDIAN
)
5794 offset
+= difference
% UNITS_PER_WORD
;
5795 if (offset
== final_offset
)
5801 /* Recurse for further possible simplifications. */
5802 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5806 if (validate_subreg (outermode
, innermostmode
,
5807 SUBREG_REG (op
), final_offset
))
5809 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5810 if (SUBREG_PROMOTED_VAR_P (op
)
5811 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5812 && GET_MODE_CLASS (outermode
) == MODE_INT
5813 && IN_RANGE (GET_MODE_SIZE (outermode
),
5814 GET_MODE_SIZE (innermode
),
5815 GET_MODE_SIZE (innermostmode
))
5816 && subreg_lowpart_p (newx
))
5818 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5819 SUBREG_PROMOTED_UNSIGNED_SET
5820 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5827 /* SUBREG of a hard register => just change the register number
5828 and/or mode. If the hard register is not valid in that mode,
5829 suppress this simplification. If the hard register is the stack,
5830 frame, or argument pointer, leave this as a SUBREG. */
5832 if (REG_P (op
) && HARD_REGISTER_P (op
))
5834 unsigned int regno
, final_regno
;
5837 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5838 if (HARD_REGISTER_NUM_P (final_regno
))
5841 int final_offset
= byte
;
5843 /* Adjust offset for paradoxical subregs. */
5845 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5847 int difference
= (GET_MODE_SIZE (innermode
)
5848 - GET_MODE_SIZE (outermode
));
5849 if (WORDS_BIG_ENDIAN
)
5850 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5851 if (BYTES_BIG_ENDIAN
)
5852 final_offset
+= difference
% UNITS_PER_WORD
;
5855 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5857 /* Propagate original regno. We don't have any way to specify
5858 the offset inside original regno, so do so only for lowpart.
5859 The information is used only by alias analysis that can not
5860 grog partial register anyway. */
5862 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5863 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5868 /* If we have a SUBREG of a register that we are replacing and we are
5869 replacing it with a MEM, make a new MEM and try replacing the
5870 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5871 or if we would be widening it. */
5874 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
5875 /* Allow splitting of volatile memory references in case we don't
5876 have instruction to move the whole thing. */
5877 && (! MEM_VOLATILE_P (op
)
5878 || ! have_insn_for (SET
, innermode
))
5879 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5880 return adjust_address_nv (op
, outermode
, byte
);
5882 /* Handle complex values represented as CONCAT
5883 of real and imaginary part. */
5884 if (GET_CODE (op
) == CONCAT
)
5886 unsigned int part_size
, final_offset
;
5889 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5890 if (byte
< part_size
)
5892 part
= XEXP (op
, 0);
5893 final_offset
= byte
;
5897 part
= XEXP (op
, 1);
5898 final_offset
= byte
- part_size
;
5901 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5904 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5907 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5908 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5912 /* A SUBREG resulting from a zero extension may fold to zero if
5913 it extracts higher bits that the ZERO_EXTEND's source bits. */
5914 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
5916 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5917 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5918 return CONST0_RTX (outermode
);
5921 if (SCALAR_INT_MODE_P (outermode
)
5922 && SCALAR_INT_MODE_P (innermode
)
5923 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5924 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5926 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
5934 /* Make a SUBREG operation or equivalent if it folds. */
5937 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5938 enum machine_mode innermode
, unsigned int byte
)
5942 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5946 if (GET_CODE (op
) == SUBREG
5947 || GET_CODE (op
) == CONCAT
5948 || GET_MODE (op
) == VOIDmode
)
5951 if (validate_subreg (outermode
, innermode
, op
, byte
))
5952 return gen_rtx_SUBREG (outermode
, op
, byte
);
5957 /* Simplify X, an rtx expression.
5959 Return the simplified expression or NULL if no simplifications
5962 This is the preferred entry point into the simplification routines;
5963 however, we still allow passes to call the more specific routines.
5965 Right now GCC has three (yes, three) major bodies of RTL simplification
5966 code that need to be unified.
5968 1. fold_rtx in cse.c. This code uses various CSE specific
5969 information to aid in RTL simplification.
5971 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5972 it uses combine specific information to aid in RTL
5975 3. The routines in this file.
5978 Long term we want to only have one body of simplification code; to
5979 get to that state I recommend the following steps:
5981 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5982 which are not pass dependent state into these routines.
5984 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5985 use this routine whenever possible.
5987 3. Allow for pass dependent state to be provided to these
5988 routines and add simplifications based on the pass dependent
5989 state. Remove code from cse.c & combine.c that becomes
5992 It will take time, but ultimately the compiler will be easier to
5993 maintain and improve. It's totally silly that when we add a
5994 simplification that it needs to be added to 4 places (3 for RTL
5995 simplification and 1 for tree simplification. */
5998 simplify_rtx (const_rtx x
)
6000 const enum rtx_code code
= GET_CODE (x
);
6001 const enum machine_mode mode
= GET_MODE (x
);
6003 switch (GET_RTX_CLASS (code
))
6006 return simplify_unary_operation (code
, mode
,
6007 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6008 case RTX_COMM_ARITH
:
6009 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6010 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6012 /* Fall through.... */
6015 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6018 case RTX_BITFIELD_OPS
:
6019 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6020 XEXP (x
, 0), XEXP (x
, 1),
6024 case RTX_COMM_COMPARE
:
6025 return simplify_relational_operation (code
, mode
,
6026 ((GET_MODE (XEXP (x
, 0))
6028 ? GET_MODE (XEXP (x
, 0))
6029 : GET_MODE (XEXP (x
, 1))),
6035 return simplify_subreg (mode
, SUBREG_REG (x
),
6036 GET_MODE (SUBREG_REG (x
)),
6043 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6044 if (GET_CODE (XEXP (x
, 0)) == HIGH
6045 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))