1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
41 #include "diagnostic-core.h"
46 /* Simplification and canonicalization of RTL. */
48 /* Much code operates on (low, high) pairs; the low value is an
49 unsigned wide int, the high value a signed wide int. We
50 occasionally need to sign extend from low to high as if low were a
52 #define HWI_SIGN_EXTEND(low) \
53 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
55 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
56 static bool plus_minus_operand_p (const_rtx
);
57 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
58 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
59 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
61 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
63 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
64 enum machine_mode
, rtx
, rtx
);
65 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
66 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
69 /* Negate a CONST_INT rtx, truncating (because a conversion from a
70 maximally negative number can overflow). */
72 neg_const_int (enum machine_mode mode
, const_rtx i
)
74 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
77 /* Test whether expression, X, is an immediate constant that represents
78 the most significant bit of machine mode MODE. */
81 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
83 unsigned HOST_WIDE_INT val
;
86 if (GET_MODE_CLASS (mode
) != MODE_INT
)
89 width
= GET_MODE_PRECISION (mode
);
93 if (width
<= HOST_BITS_PER_WIDE_INT
96 #if TARGET_SUPPORTS_WIDE_INT
97 else if (CONST_WIDE_INT_P (x
))
100 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
101 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
103 for (i
= 0; i
< elts
- 1; i
++)
104 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
106 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
107 width
%= HOST_BITS_PER_WIDE_INT
;
109 width
= HOST_BITS_PER_WIDE_INT
;
112 else if (width
<= HOST_BITS_PER_DOUBLE_INT
113 && CONST_DOUBLE_AS_INT_P (x
)
114 && CONST_DOUBLE_LOW (x
) == 0)
116 val
= CONST_DOUBLE_HIGH (x
);
117 width
-= HOST_BITS_PER_WIDE_INT
;
121 /* X is not an integer constant. */
124 if (width
< HOST_BITS_PER_WIDE_INT
)
125 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
126 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
129 /* Test whether VAL is equal to the most significant bit of mode MODE
130 (after masking with the mode mask of MODE). Returns false if the
131 precision of MODE is too large to handle. */
134 val_signbit_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
138 if (GET_MODE_CLASS (mode
) != MODE_INT
)
141 width
= GET_MODE_PRECISION (mode
);
142 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
145 val
&= GET_MODE_MASK (mode
);
146 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
149 /* Test whether the most significant bit of mode MODE is set in VAL.
150 Returns false if the precision of MODE is too large to handle. */
152 val_signbit_known_set_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
156 if (GET_MODE_CLASS (mode
) != MODE_INT
)
159 width
= GET_MODE_PRECISION (mode
);
160 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
163 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
167 /* Test whether the most significant bit of mode MODE is clear in VAL.
168 Returns false if the precision of MODE is too large to handle. */
170 val_signbit_known_clear_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
174 if (GET_MODE_CLASS (mode
) != MODE_INT
)
177 width
= GET_MODE_PRECISION (mode
);
178 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
181 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
185 /* Make a binary operation by properly ordering the operands and
186 seeing if the expression folds. */
189 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
194 /* If this simplifies, do it. */
195 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
199 /* Put complex operands first and constants second if commutative. */
200 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
201 && swap_commutative_operands_p (op0
, op1
))
202 tem
= op0
, op0
= op1
, op1
= tem
;
204 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
207 /* If X is a MEM referencing the constant pool, return the real value.
208 Otherwise return X. */
210 avoid_constant_pool_reference (rtx x
)
213 enum machine_mode cmode
;
214 HOST_WIDE_INT offset
= 0;
216 switch (GET_CODE (x
))
222 /* Handle float extensions of constant pool references. */
224 c
= avoid_constant_pool_reference (tmp
);
225 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
229 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
230 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
238 if (GET_MODE (x
) == BLKmode
)
243 /* Call target hook to avoid the effects of -fpic etc.... */
244 addr
= targetm
.delegitimize_address (addr
);
246 /* Split the address into a base and integer offset. */
247 if (GET_CODE (addr
) == CONST
248 && GET_CODE (XEXP (addr
, 0)) == PLUS
249 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
251 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
252 addr
= XEXP (XEXP (addr
, 0), 0);
255 if (GET_CODE (addr
) == LO_SUM
)
256 addr
= XEXP (addr
, 1);
258 /* If this is a constant pool reference, we can turn it into its
259 constant and hope that simplifications happen. */
260 if (GET_CODE (addr
) == SYMBOL_REF
261 && CONSTANT_POOL_ADDRESS_P (addr
))
263 c
= get_pool_constant (addr
);
264 cmode
= get_pool_mode (addr
);
266 /* If we're accessing the constant in a different mode than it was
267 originally stored, attempt to fix that up via subreg simplifications.
268 If that fails we have no choice but to return the original memory. */
269 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
270 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
272 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
273 if (tem
&& CONSTANT_P (tem
))
283 /* Simplify a MEM based on its attributes. This is the default
284 delegitimize_address target hook, and it's recommended that every
285 overrider call it. */
288 delegitimize_mem_from_attrs (rtx x
)
290 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
291 use their base addresses as equivalent. */
294 && MEM_OFFSET_KNOWN_P (x
))
296 tree decl
= MEM_EXPR (x
);
297 enum machine_mode mode
= GET_MODE (x
);
298 HOST_WIDE_INT offset
= 0;
300 switch (TREE_CODE (decl
))
310 case ARRAY_RANGE_REF
:
315 case VIEW_CONVERT_EXPR
:
317 HOST_WIDE_INT bitsize
, bitpos
;
319 int unsignedp
, volatilep
= 0;
321 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
322 &mode
, &unsignedp
, &volatilep
, false);
323 if (bitsize
!= GET_MODE_BITSIZE (mode
)
324 || (bitpos
% BITS_PER_UNIT
)
325 || (toffset
&& !tree_fits_shwi_p (toffset
)))
329 offset
+= bitpos
/ BITS_PER_UNIT
;
331 offset
+= tree_to_shwi (toffset
);
338 && mode
== GET_MODE (x
)
339 && TREE_CODE (decl
) == VAR_DECL
340 && (TREE_STATIC (decl
)
341 || DECL_THREAD_LOCAL_P (decl
))
342 && DECL_RTL_SET_P (decl
)
343 && MEM_P (DECL_RTL (decl
)))
347 offset
+= MEM_OFFSET (x
);
349 newx
= DECL_RTL (decl
);
353 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
355 /* Avoid creating a new MEM needlessly if we already had
356 the same address. We do if there's no OFFSET and the
357 old address X is identical to NEWX, or if X is of the
358 form (plus NEWX OFFSET), or the NEWX is of the form
359 (plus Y (const_int Z)) and X is that with the offset
360 added: (plus Y (const_int Z+OFFSET)). */
362 || (GET_CODE (o
) == PLUS
363 && GET_CODE (XEXP (o
, 1)) == CONST_INT
364 && (offset
== INTVAL (XEXP (o
, 1))
365 || (GET_CODE (n
) == PLUS
366 && GET_CODE (XEXP (n
, 1)) == CONST_INT
367 && (INTVAL (XEXP (n
, 1)) + offset
368 == INTVAL (XEXP (o
, 1)))
369 && (n
= XEXP (n
, 0))))
370 && (o
= XEXP (o
, 0))))
371 && rtx_equal_p (o
, n
)))
372 x
= adjust_address_nv (newx
, mode
, offset
);
374 else if (GET_MODE (x
) == GET_MODE (newx
)
383 /* Make a unary operation by first seeing if it folds and otherwise making
384 the specified operation. */
387 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
388 enum machine_mode op_mode
)
392 /* If this simplifies, use it. */
393 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
396 return gen_rtx_fmt_e (code
, mode
, op
);
399 /* Likewise for ternary operations. */
402 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
403 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
407 /* If this simplifies, use it. */
408 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
412 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
415 /* Likewise, for relational operations.
416 CMP_MODE specifies mode comparison is done in. */
419 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
420 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
424 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
428 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
431 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
432 and simplify the result. If FN is non-NULL, call this callback on each
433 X, if it returns non-NULL, replace X with its return value and simplify the
437 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
438 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
440 enum rtx_code code
= GET_CODE (x
);
441 enum machine_mode mode
= GET_MODE (x
);
442 enum machine_mode op_mode
;
444 rtx op0
, op1
, op2
, newx
, op
;
448 if (__builtin_expect (fn
!= NULL
, 0))
450 newx
= fn (x
, old_rtx
, data
);
454 else if (rtx_equal_p (x
, old_rtx
))
455 return copy_rtx ((rtx
) data
);
457 switch (GET_RTX_CLASS (code
))
461 op_mode
= GET_MODE (op0
);
462 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
463 if (op0
== XEXP (x
, 0))
465 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
469 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
470 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
471 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
473 return simplify_gen_binary (code
, mode
, op0
, op1
);
476 case RTX_COMM_COMPARE
:
479 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
480 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
481 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
482 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
484 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
487 case RTX_BITFIELD_OPS
:
489 op_mode
= GET_MODE (op0
);
490 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
491 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
492 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
493 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
495 if (op_mode
== VOIDmode
)
496 op_mode
= GET_MODE (op0
);
497 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
502 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
503 if (op0
== SUBREG_REG (x
))
505 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
506 GET_MODE (SUBREG_REG (x
)),
508 return op0
? op0
: x
;
515 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
516 if (op0
== XEXP (x
, 0))
518 return replace_equiv_address_nv (x
, op0
);
520 else if (code
== LO_SUM
)
522 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
523 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
525 /* (lo_sum (high x) x) -> x */
526 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
529 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
531 return gen_rtx_LO_SUM (mode
, op0
, op1
);
540 fmt
= GET_RTX_FORMAT (code
);
541 for (i
= 0; fmt
[i
]; i
++)
546 newvec
= XVEC (newx
, i
);
547 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
549 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
551 if (op
!= RTVEC_ELT (vec
, j
))
555 newvec
= shallow_copy_rtvec (vec
);
557 newx
= shallow_copy_rtx (x
);
558 XVEC (newx
, i
) = newvec
;
560 RTVEC_ELT (newvec
, j
) = op
;
568 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
569 if (op
!= XEXP (x
, i
))
572 newx
= shallow_copy_rtx (x
);
581 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
582 resulting RTX. Return a new RTX which is as simplified as possible. */
585 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
587 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
590 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
591 Only handle cases where the truncated value is inherently an rvalue.
593 RTL provides two ways of truncating a value:
595 1. a lowpart subreg. This form is only a truncation when both
596 the outer and inner modes (here MODE and OP_MODE respectively)
597 are scalar integers, and only then when the subreg is used as
600 It is only valid to form such truncating subregs if the
601 truncation requires no action by the target. The onus for
602 proving this is on the creator of the subreg -- e.g. the
603 caller to simplify_subreg or simplify_gen_subreg -- and typically
604 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
606 2. a TRUNCATE. This form handles both scalar and compound integers.
608 The first form is preferred where valid. However, the TRUNCATE
609 handling in simplify_unary_operation turns the second form into the
610 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
611 so it is generally safe to form rvalue truncations using:
613 simplify_gen_unary (TRUNCATE, ...)
615 and leave simplify_unary_operation to work out which representation
618 Because of the proof requirements on (1), simplify_truncation must
619 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
620 regardless of whether the outer truncation came from a SUBREG or a
621 TRUNCATE. For example, if the caller has proven that an SImode
626 is a no-op and can be represented as a subreg, it does not follow
627 that SImode truncations of X and Y are also no-ops. On a target
628 like 64-bit MIPS that requires SImode values to be stored in
629 sign-extended form, an SImode truncation of:
631 (and:DI (reg:DI X) (const_int 63))
633 is trivially a no-op because only the lower 6 bits can be set.
634 However, X is still an arbitrary 64-bit number and so we cannot
635 assume that truncating it too is a no-op. */
638 simplify_truncation (enum machine_mode mode
, rtx op
,
639 enum machine_mode op_mode
)
641 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
642 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
643 gcc_assert (precision
<= op_precision
);
645 /* Optimize truncations of zero and sign extended values. */
646 if (GET_CODE (op
) == ZERO_EXTEND
647 || GET_CODE (op
) == SIGN_EXTEND
)
649 /* There are three possibilities. If MODE is the same as the
650 origmode, we can omit both the extension and the subreg.
651 If MODE is not larger than the origmode, we can apply the
652 truncation without the extension. Finally, if the outermode
653 is larger than the origmode, we can just extend to the appropriate
655 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
656 if (mode
== origmode
)
658 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
659 return simplify_gen_unary (TRUNCATE
, mode
,
660 XEXP (op
, 0), origmode
);
662 return simplify_gen_unary (GET_CODE (op
), mode
,
663 XEXP (op
, 0), origmode
);
666 /* If the machine can perform operations in the truncated mode, distribute
667 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
668 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
670 #ifdef WORD_REGISTER_OPERATIONS
671 && precision
>= BITS_PER_WORD
673 && (GET_CODE (op
) == PLUS
674 || GET_CODE (op
) == MINUS
675 || GET_CODE (op
) == MULT
))
677 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
680 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
682 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
686 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
687 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
688 the outer subreg is effectively a truncation to the original mode. */
689 if ((GET_CODE (op
) == LSHIFTRT
690 || GET_CODE (op
) == ASHIFTRT
)
691 /* Ensure that OP_MODE is at least twice as wide as MODE
692 to avoid the possibility that an outer LSHIFTRT shifts by more
693 than the sign extension's sign_bit_copies and introduces zeros
694 into the high bits of the result. */
695 && 2 * precision
<= op_precision
696 && CONST_INT_P (XEXP (op
, 1))
697 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
698 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
699 && UINTVAL (XEXP (op
, 1)) < precision
)
700 return simplify_gen_binary (ASHIFTRT
, mode
,
701 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
703 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
704 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
705 the outer subreg is effectively a truncation to the original mode. */
706 if ((GET_CODE (op
) == LSHIFTRT
707 || GET_CODE (op
) == ASHIFTRT
)
708 && CONST_INT_P (XEXP (op
, 1))
709 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
710 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
711 && UINTVAL (XEXP (op
, 1)) < precision
)
712 return simplify_gen_binary (LSHIFTRT
, mode
,
713 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
715 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
716 to (ashift:QI (x:QI) C), where C is a suitable small constant and
717 the outer subreg is effectively a truncation to the original mode. */
718 if (GET_CODE (op
) == ASHIFT
719 && CONST_INT_P (XEXP (op
, 1))
720 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
721 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
722 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
723 && UINTVAL (XEXP (op
, 1)) < precision
)
724 return simplify_gen_binary (ASHIFT
, mode
,
725 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
727 /* Recognize a word extraction from a multi-word subreg. */
728 if ((GET_CODE (op
) == LSHIFTRT
729 || GET_CODE (op
) == ASHIFTRT
)
730 && SCALAR_INT_MODE_P (mode
)
731 && SCALAR_INT_MODE_P (op_mode
)
732 && precision
>= BITS_PER_WORD
733 && 2 * precision
<= op_precision
734 && CONST_INT_P (XEXP (op
, 1))
735 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
736 && UINTVAL (XEXP (op
, 1)) < op_precision
)
738 int byte
= subreg_lowpart_offset (mode
, op_mode
);
739 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
740 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
742 ? byte
- shifted_bytes
743 : byte
+ shifted_bytes
));
746 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
747 and try replacing the TRUNCATE and shift with it. Don't do this
748 if the MEM has a mode-dependent address. */
749 if ((GET_CODE (op
) == LSHIFTRT
750 || GET_CODE (op
) == ASHIFTRT
)
751 && SCALAR_INT_MODE_P (op_mode
)
752 && MEM_P (XEXP (op
, 0))
753 && CONST_INT_P (XEXP (op
, 1))
754 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
755 && INTVAL (XEXP (op
, 1)) > 0
756 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
757 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
758 MEM_ADDR_SPACE (XEXP (op
, 0)))
759 && ! MEM_VOLATILE_P (XEXP (op
, 0))
760 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
761 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
763 int byte
= subreg_lowpart_offset (mode
, op_mode
);
764 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
765 return adjust_address_nv (XEXP (op
, 0), mode
,
767 ? byte
- shifted_bytes
768 : byte
+ shifted_bytes
));
771 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
772 (OP:SI foo:SI) if OP is NEG or ABS. */
773 if ((GET_CODE (op
) == ABS
774 || GET_CODE (op
) == NEG
)
775 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
776 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
777 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
778 return simplify_gen_unary (GET_CODE (op
), mode
,
779 XEXP (XEXP (op
, 0), 0), mode
);
781 /* (truncate:A (subreg:B (truncate:C X) 0)) is
783 if (GET_CODE (op
) == SUBREG
784 && SCALAR_INT_MODE_P (mode
)
785 && SCALAR_INT_MODE_P (op_mode
)
786 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
787 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
788 && subreg_lowpart_p (op
))
790 rtx inner
= XEXP (SUBREG_REG (op
), 0);
791 if (GET_MODE_PRECISION (mode
)
792 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
793 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
795 /* If subreg above is paradoxical and C is narrower
796 than A, return (subreg:A (truncate:C X) 0). */
797 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
798 GET_MODE (SUBREG_REG (op
)), 0);
801 /* (truncate:A (truncate:B X)) is (truncate:A X). */
802 if (GET_CODE (op
) == TRUNCATE
)
803 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
804 GET_MODE (XEXP (op
, 0)));
809 /* Try to simplify a unary operation CODE whose output mode is to be
810 MODE with input operand OP whose mode was originally OP_MODE.
811 Return zero if no simplification can be made. */
813 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
814 rtx op
, enum machine_mode op_mode
)
818 trueop
= avoid_constant_pool_reference (op
);
820 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
824 return simplify_unary_operation_1 (code
, mode
, op
);
827 /* Perform some simplifications we can do even if the operands
830 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
832 enum rtx_code reversed
;
838 /* (not (not X)) == X. */
839 if (GET_CODE (op
) == NOT
)
842 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
843 comparison is all ones. */
844 if (COMPARISON_P (op
)
845 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
846 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
847 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
848 XEXP (op
, 0), XEXP (op
, 1));
850 /* (not (plus X -1)) can become (neg X). */
851 if (GET_CODE (op
) == PLUS
852 && XEXP (op
, 1) == constm1_rtx
)
853 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
855 /* Similarly, (not (neg X)) is (plus X -1). */
856 if (GET_CODE (op
) == NEG
)
857 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
860 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
861 if (GET_CODE (op
) == XOR
862 && CONST_INT_P (XEXP (op
, 1))
863 && (temp
= simplify_unary_operation (NOT
, mode
,
864 XEXP (op
, 1), mode
)) != 0)
865 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
867 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
868 if (GET_CODE (op
) == PLUS
869 && CONST_INT_P (XEXP (op
, 1))
870 && mode_signbit_p (mode
, XEXP (op
, 1))
871 && (temp
= simplify_unary_operation (NOT
, mode
,
872 XEXP (op
, 1), mode
)) != 0)
873 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
876 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
877 operands other than 1, but that is not valid. We could do a
878 similar simplification for (not (lshiftrt C X)) where C is
879 just the sign bit, but this doesn't seem common enough to
881 if (GET_CODE (op
) == ASHIFT
882 && XEXP (op
, 0) == const1_rtx
)
884 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
885 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
888 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
889 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
890 so we can perform the above simplification. */
891 if (STORE_FLAG_VALUE
== -1
892 && GET_CODE (op
) == ASHIFTRT
893 && CONST_INT_P (XEXP (op
, 1))
894 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
895 return simplify_gen_relational (GE
, mode
, VOIDmode
,
896 XEXP (op
, 0), const0_rtx
);
899 if (GET_CODE (op
) == SUBREG
900 && subreg_lowpart_p (op
)
901 && (GET_MODE_SIZE (GET_MODE (op
))
902 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
903 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
904 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
906 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
909 x
= gen_rtx_ROTATE (inner_mode
,
910 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
912 XEXP (SUBREG_REG (op
), 1));
913 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
918 /* Apply De Morgan's laws to reduce number of patterns for machines
919 with negating logical insns (and-not, nand, etc.). If result has
920 only one NOT, put it first, since that is how the patterns are
922 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
924 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
925 enum machine_mode op_mode
;
927 op_mode
= GET_MODE (in1
);
928 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
930 op_mode
= GET_MODE (in2
);
931 if (op_mode
== VOIDmode
)
933 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
935 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
938 in2
= in1
; in1
= tem
;
941 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
945 /* (not (bswap x)) -> (bswap (not x)). */
946 if (GET_CODE (op
) == BSWAP
)
948 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
949 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
954 /* (neg (neg X)) == X. */
955 if (GET_CODE (op
) == NEG
)
958 /* (neg (plus X 1)) can become (not X). */
959 if (GET_CODE (op
) == PLUS
960 && XEXP (op
, 1) == const1_rtx
)
961 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
963 /* Similarly, (neg (not X)) is (plus X 1). */
964 if (GET_CODE (op
) == NOT
)
965 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
968 /* (neg (minus X Y)) can become (minus Y X). This transformation
969 isn't safe for modes with signed zeros, since if X and Y are
970 both +0, (minus Y X) is the same as (minus X Y). If the
971 rounding mode is towards +infinity (or -infinity) then the two
972 expressions will be rounded differently. */
973 if (GET_CODE (op
) == MINUS
974 && !HONOR_SIGNED_ZEROS (mode
)
975 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
976 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
978 if (GET_CODE (op
) == PLUS
979 && !HONOR_SIGNED_ZEROS (mode
)
980 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
982 /* (neg (plus A C)) is simplified to (minus -C A). */
983 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
984 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
986 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
988 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
991 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
992 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
993 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
996 /* (neg (mult A B)) becomes (mult A (neg B)).
997 This works even for floating-point values. */
998 if (GET_CODE (op
) == MULT
999 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1001 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1002 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1005 /* NEG commutes with ASHIFT since it is multiplication. Only do
1006 this if we can then eliminate the NEG (e.g., if the operand
1008 if (GET_CODE (op
) == ASHIFT
)
1010 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1012 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1015 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1016 C is equal to the width of MODE minus 1. */
1017 if (GET_CODE (op
) == ASHIFTRT
1018 && CONST_INT_P (XEXP (op
, 1))
1019 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1020 return simplify_gen_binary (LSHIFTRT
, mode
,
1021 XEXP (op
, 0), XEXP (op
, 1));
1023 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1024 C is equal to the width of MODE minus 1. */
1025 if (GET_CODE (op
) == LSHIFTRT
1026 && CONST_INT_P (XEXP (op
, 1))
1027 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1028 return simplify_gen_binary (ASHIFTRT
, mode
,
1029 XEXP (op
, 0), XEXP (op
, 1));
1031 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1032 if (GET_CODE (op
) == XOR
1033 && XEXP (op
, 1) == const1_rtx
1034 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1035 return plus_constant (mode
, XEXP (op
, 0), -1);
1037 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1038 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1039 if (GET_CODE (op
) == LT
1040 && XEXP (op
, 1) == const0_rtx
1041 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1043 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
1044 int isize
= GET_MODE_PRECISION (inner
);
1045 if (STORE_FLAG_VALUE
== 1)
1047 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1048 GEN_INT (isize
- 1));
1051 if (GET_MODE_PRECISION (mode
) > isize
)
1052 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1053 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1055 else if (STORE_FLAG_VALUE
== -1)
1057 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1058 GEN_INT (isize
- 1));
1061 if (GET_MODE_PRECISION (mode
) > isize
)
1062 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1063 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1069 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1070 with the umulXi3_highpart patterns. */
1071 if (GET_CODE (op
) == LSHIFTRT
1072 && GET_CODE (XEXP (op
, 0)) == MULT
)
1075 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1077 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1079 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1083 /* We can't handle truncation to a partial integer mode here
1084 because we don't know the real bitsize of the partial
1089 if (GET_MODE (op
) != VOIDmode
)
1091 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1096 /* If we know that the value is already truncated, we can
1097 replace the TRUNCATE with a SUBREG. */
1098 if (GET_MODE_NUNITS (mode
) == 1
1099 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1100 || truncated_to_mode (mode
, op
)))
1102 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1107 /* A truncate of a comparison can be replaced with a subreg if
1108 STORE_FLAG_VALUE permits. This is like the previous test,
1109 but it works even if the comparison is done in a mode larger
1110 than HOST_BITS_PER_WIDE_INT. */
1111 if (HWI_COMPUTABLE_MODE_P (mode
)
1112 && COMPARISON_P (op
)
1113 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1115 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1120 /* A truncate of a memory is just loading the low part of the memory
1121 if we are not changing the meaning of the address. */
1122 if (GET_CODE (op
) == MEM
1123 && !VECTOR_MODE_P (mode
)
1124 && !MEM_VOLATILE_P (op
)
1125 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1127 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1134 case FLOAT_TRUNCATE
:
1135 if (DECIMAL_FLOAT_MODE_P (mode
))
1138 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1139 if (GET_CODE (op
) == FLOAT_EXTEND
1140 && GET_MODE (XEXP (op
, 0)) == mode
)
1141 return XEXP (op
, 0);
1143 /* (float_truncate:SF (float_truncate:DF foo:XF))
1144 = (float_truncate:SF foo:XF).
1145 This may eliminate double rounding, so it is unsafe.
1147 (float_truncate:SF (float_extend:XF foo:DF))
1148 = (float_truncate:SF foo:DF).
1150 (float_truncate:DF (float_extend:XF foo:SF))
1151 = (float_extend:SF foo:DF). */
1152 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1153 && flag_unsafe_math_optimizations
)
1154 || GET_CODE (op
) == FLOAT_EXTEND
)
1155 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1157 > GET_MODE_SIZE (mode
)
1158 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1160 XEXP (op
, 0), mode
);
1162 /* (float_truncate (float x)) is (float x) */
1163 if (GET_CODE (op
) == FLOAT
1164 && (flag_unsafe_math_optimizations
1165 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1166 && ((unsigned)significand_size (GET_MODE (op
))
1167 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1168 - num_sign_bit_copies (XEXP (op
, 0),
1169 GET_MODE (XEXP (op
, 0))))))))
1170 return simplify_gen_unary (FLOAT
, mode
,
1172 GET_MODE (XEXP (op
, 0)));
1174 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1175 (OP:SF foo:SF) if OP is NEG or ABS. */
1176 if ((GET_CODE (op
) == ABS
1177 || GET_CODE (op
) == NEG
)
1178 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1179 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1180 return simplify_gen_unary (GET_CODE (op
), mode
,
1181 XEXP (XEXP (op
, 0), 0), mode
);
1183 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1184 is (float_truncate:SF x). */
1185 if (GET_CODE (op
) == SUBREG
1186 && subreg_lowpart_p (op
)
1187 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1188 return SUBREG_REG (op
);
1192 if (DECIMAL_FLOAT_MODE_P (mode
))
1195 /* (float_extend (float_extend x)) is (float_extend x)
1197 (float_extend (float x)) is (float x) assuming that double
1198 rounding can't happen.
1200 if (GET_CODE (op
) == FLOAT_EXTEND
1201 || (GET_CODE (op
) == FLOAT
1202 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1203 && ((unsigned)significand_size (GET_MODE (op
))
1204 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1205 - num_sign_bit_copies (XEXP (op
, 0),
1206 GET_MODE (XEXP (op
, 0)))))))
1207 return simplify_gen_unary (GET_CODE (op
), mode
,
1209 GET_MODE (XEXP (op
, 0)));
1214 /* (abs (neg <foo>)) -> (abs <foo>) */
1215 if (GET_CODE (op
) == NEG
)
1216 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1217 GET_MODE (XEXP (op
, 0)));
1219 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1221 if (GET_MODE (op
) == VOIDmode
)
1224 /* If operand is something known to be positive, ignore the ABS. */
1225 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1226 || val_signbit_known_clear_p (GET_MODE (op
),
1227 nonzero_bits (op
, GET_MODE (op
))))
1230 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1231 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1232 return gen_rtx_NEG (mode
, op
);
1237 /* (ffs (*_extend <X>)) = (ffs <X>) */
1238 if (GET_CODE (op
) == SIGN_EXTEND
1239 || GET_CODE (op
) == ZERO_EXTEND
)
1240 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1241 GET_MODE (XEXP (op
, 0)));
1245 switch (GET_CODE (op
))
1249 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1250 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1251 GET_MODE (XEXP (op
, 0)));
1255 /* Rotations don't affect popcount. */
1256 if (!side_effects_p (XEXP (op
, 1)))
1257 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1258 GET_MODE (XEXP (op
, 0)));
1267 switch (GET_CODE (op
))
1273 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1274 GET_MODE (XEXP (op
, 0)));
1278 /* Rotations don't affect parity. */
1279 if (!side_effects_p (XEXP (op
, 1)))
1280 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1281 GET_MODE (XEXP (op
, 0)));
1290 /* (bswap (bswap x)) -> x. */
1291 if (GET_CODE (op
) == BSWAP
)
1292 return XEXP (op
, 0);
1296 /* (float (sign_extend <X>)) = (float <X>). */
1297 if (GET_CODE (op
) == SIGN_EXTEND
)
1298 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1299 GET_MODE (XEXP (op
, 0)));
1303 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1304 becomes just the MINUS if its mode is MODE. This allows
1305 folding switch statements on machines using casesi (such as
1307 if (GET_CODE (op
) == TRUNCATE
1308 && GET_MODE (XEXP (op
, 0)) == mode
1309 && GET_CODE (XEXP (op
, 0)) == MINUS
1310 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1311 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1312 return XEXP (op
, 0);
1314 /* Extending a widening multiplication should be canonicalized to
1315 a wider widening multiplication. */
1316 if (GET_CODE (op
) == MULT
)
1318 rtx lhs
= XEXP (op
, 0);
1319 rtx rhs
= XEXP (op
, 1);
1320 enum rtx_code lcode
= GET_CODE (lhs
);
1321 enum rtx_code rcode
= GET_CODE (rhs
);
1323 /* Widening multiplies usually extend both operands, but sometimes
1324 they use a shift to extract a portion of a register. */
1325 if ((lcode
== SIGN_EXTEND
1326 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1327 && (rcode
== SIGN_EXTEND
1328 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1330 enum machine_mode lmode
= GET_MODE (lhs
);
1331 enum machine_mode rmode
= GET_MODE (rhs
);
1334 if (lcode
== ASHIFTRT
)
1335 /* Number of bits not shifted off the end. */
1336 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1337 else /* lcode == SIGN_EXTEND */
1338 /* Size of inner mode. */
1339 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1341 if (rcode
== ASHIFTRT
)
1342 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1343 else /* rcode == SIGN_EXTEND */
1344 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1346 /* We can only widen multiplies if the result is mathematiclly
1347 equivalent. I.e. if overflow was impossible. */
1348 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1349 return simplify_gen_binary
1351 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1352 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1356 /* Check for a sign extension of a subreg of a promoted
1357 variable, where the promotion is sign-extended, and the
1358 target mode is the same as the variable's promotion. */
1359 if (GET_CODE (op
) == SUBREG
1360 && SUBREG_PROMOTED_VAR_P (op
)
1361 && SUBREG_PROMOTED_SIGNED_P (op
)
1362 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1364 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1369 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1370 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1371 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1373 gcc_assert (GET_MODE_PRECISION (mode
)
1374 > GET_MODE_PRECISION (GET_MODE (op
)));
1375 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1376 GET_MODE (XEXP (op
, 0)));
1379 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1380 is (sign_extend:M (subreg:O <X>)) if there is mode with
1381 GET_MODE_BITSIZE (N) - I bits.
1382 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1383 is similarly (zero_extend:M (subreg:O <X>)). */
1384 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1385 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1386 && CONST_INT_P (XEXP (op
, 1))
1387 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1388 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1390 enum machine_mode tmode
1391 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1392 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1393 gcc_assert (GET_MODE_BITSIZE (mode
)
1394 > GET_MODE_BITSIZE (GET_MODE (op
)));
1395 if (tmode
!= BLKmode
)
1398 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1400 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1401 ? SIGN_EXTEND
: ZERO_EXTEND
,
1402 mode
, inner
, tmode
);
1406 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1407 /* As we do not know which address space the pointer is referring to,
1408 we can do this only if the target does not support different pointer
1409 or address modes depending on the address space. */
1410 if (target_default_pointer_address_modes_p ()
1411 && ! POINTERS_EXTEND_UNSIGNED
1412 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1414 || (GET_CODE (op
) == SUBREG
1415 && REG_P (SUBREG_REG (op
))
1416 && REG_POINTER (SUBREG_REG (op
))
1417 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1418 return convert_memory_address (Pmode
, op
);
1423 /* Check for a zero extension of a subreg of a promoted
1424 variable, where the promotion is zero-extended, and the
1425 target mode is the same as the variable's promotion. */
1426 if (GET_CODE (op
) == SUBREG
1427 && SUBREG_PROMOTED_VAR_P (op
)
1428 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1429 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1431 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1436 /* Extending a widening multiplication should be canonicalized to
1437 a wider widening multiplication. */
1438 if (GET_CODE (op
) == MULT
)
1440 rtx lhs
= XEXP (op
, 0);
1441 rtx rhs
= XEXP (op
, 1);
1442 enum rtx_code lcode
= GET_CODE (lhs
);
1443 enum rtx_code rcode
= GET_CODE (rhs
);
1445 /* Widening multiplies usually extend both operands, but sometimes
1446 they use a shift to extract a portion of a register. */
1447 if ((lcode
== ZERO_EXTEND
1448 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1449 && (rcode
== ZERO_EXTEND
1450 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1452 enum machine_mode lmode
= GET_MODE (lhs
);
1453 enum machine_mode rmode
= GET_MODE (rhs
);
1456 if (lcode
== LSHIFTRT
)
1457 /* Number of bits not shifted off the end. */
1458 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1459 else /* lcode == ZERO_EXTEND */
1460 /* Size of inner mode. */
1461 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1463 if (rcode
== LSHIFTRT
)
1464 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1465 else /* rcode == ZERO_EXTEND */
1466 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1468 /* We can only widen multiplies if the result is mathematiclly
1469 equivalent. I.e. if overflow was impossible. */
1470 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1471 return simplify_gen_binary
1473 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1474 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1478 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1479 if (GET_CODE (op
) == ZERO_EXTEND
)
1480 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1481 GET_MODE (XEXP (op
, 0)));
1483 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1484 is (zero_extend:M (subreg:O <X>)) if there is mode with
1485 GET_MODE_PRECISION (N) - I bits. */
1486 if (GET_CODE (op
) == LSHIFTRT
1487 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1488 && CONST_INT_P (XEXP (op
, 1))
1489 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1490 && GET_MODE_PRECISION (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1492 enum machine_mode tmode
1493 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op
))
1494 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1495 if (tmode
!= BLKmode
)
1498 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1500 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1504 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1505 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1507 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1508 (and:SI (reg:SI) (const_int 63)). */
1509 if (GET_CODE (op
) == SUBREG
1510 && GET_MODE_PRECISION (GET_MODE (op
))
1511 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1512 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1513 <= HOST_BITS_PER_WIDE_INT
1514 && GET_MODE_PRECISION (mode
)
1515 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1516 && subreg_lowpart_p (op
)
1517 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1518 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1520 if (GET_MODE_PRECISION (mode
)
1521 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1522 return SUBREG_REG (op
);
1523 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1524 GET_MODE (SUBREG_REG (op
)));
1527 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1528 /* As we do not know which address space the pointer is referring to,
1529 we can do this only if the target does not support different pointer
1530 or address modes depending on the address space. */
1531 if (target_default_pointer_address_modes_p ()
1532 && POINTERS_EXTEND_UNSIGNED
> 0
1533 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1535 || (GET_CODE (op
) == SUBREG
1536 && REG_P (SUBREG_REG (op
))
1537 && REG_POINTER (SUBREG_REG (op
))
1538 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1539 return convert_memory_address (Pmode
, op
);
1550 /* Try to compute the value of a unary operation CODE whose output mode is to
1551 be MODE with input operand OP whose mode was originally OP_MODE.
1552 Return zero if the value cannot be computed. */
1554 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1555 rtx op
, enum machine_mode op_mode
)
1557 unsigned int width
= GET_MODE_PRECISION (mode
);
1559 if (code
== VEC_DUPLICATE
)
1561 gcc_assert (VECTOR_MODE_P (mode
));
1562 if (GET_MODE (op
) != VOIDmode
)
1564 if (!VECTOR_MODE_P (GET_MODE (op
)))
1565 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1567 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1570 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1571 || GET_CODE (op
) == CONST_VECTOR
)
1573 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1574 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1575 rtvec v
= rtvec_alloc (n_elts
);
1578 if (GET_CODE (op
) != CONST_VECTOR
)
1579 for (i
= 0; i
< n_elts
; i
++)
1580 RTVEC_ELT (v
, i
) = op
;
1583 enum machine_mode inmode
= GET_MODE (op
);
1584 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1585 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1587 gcc_assert (in_n_elts
< n_elts
);
1588 gcc_assert ((n_elts
% in_n_elts
) == 0);
1589 for (i
= 0; i
< n_elts
; i
++)
1590 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1592 return gen_rtx_CONST_VECTOR (mode
, v
);
1596 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1598 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1599 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1600 enum machine_mode opmode
= GET_MODE (op
);
1601 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1602 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1603 rtvec v
= rtvec_alloc (n_elts
);
1606 gcc_assert (op_n_elts
== n_elts
);
1607 for (i
= 0; i
< n_elts
; i
++)
1609 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1610 CONST_VECTOR_ELT (op
, i
),
1611 GET_MODE_INNER (opmode
));
1614 RTVEC_ELT (v
, i
) = x
;
1616 return gen_rtx_CONST_VECTOR (mode
, v
);
1619 /* The order of these tests is critical so that, for example, we don't
1620 check the wrong mode (input vs. output) for a conversion operation,
1621 such as FIX. At some point, this should be simplified. */
1623 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1627 if (op_mode
== VOIDmode
)
1629 /* CONST_INT have VOIDmode as the mode. We assume that all
1630 the bits of the constant are significant, though, this is
1631 a dangerous assumption as many times CONST_INTs are
1632 created and used with garbage in the bits outside of the
1633 precision of the implied mode of the const_int. */
1634 op_mode
= MAX_MODE_INT
;
1637 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), SIGNED
);
1638 d
= real_value_truncate (mode
, d
);
1639 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1641 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1645 if (op_mode
== VOIDmode
)
1647 /* CONST_INT have VOIDmode as the mode. We assume that all
1648 the bits of the constant are significant, though, this is
1649 a dangerous assumption as many times CONST_INTs are
1650 created and used with garbage in the bits outside of the
1651 precision of the implied mode of the const_int. */
1652 op_mode
= MAX_MODE_INT
;
1655 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), UNSIGNED
);
1656 d
= real_value_truncate (mode
, d
);
1657 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1660 if (CONST_SCALAR_INT_P (op
) && width
> 0)
1663 enum machine_mode imode
= op_mode
== VOIDmode
? mode
: op_mode
;
1664 rtx_mode_t op0
= std::make_pair (op
, imode
);
1667 #if TARGET_SUPPORTS_WIDE_INT == 0
1668 /* This assert keeps the simplification from producing a result
1669 that cannot be represented in a CONST_DOUBLE but a lot of
1670 upstream callers expect that this function never fails to
1671 simplify something and so you if you added this to the test
1672 above the code would die later anyway. If this assert
1673 happens, you just need to make the port support wide int. */
1674 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1680 result
= wi::bit_not (op0
);
1684 result
= wi::neg (op0
);
1688 result
= wi::abs (op0
);
1692 result
= wi::shwi (wi::ffs (op0
), mode
);
1696 if (wi::ne_p (op0
, 0))
1697 int_value
= wi::clz (op0
);
1698 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1699 int_value
= GET_MODE_PRECISION (mode
);
1700 result
= wi::shwi (int_value
, mode
);
1704 result
= wi::shwi (wi::clrsb (op0
), mode
);
1708 if (wi::ne_p (op0
, 0))
1709 int_value
= wi::ctz (op0
);
1710 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1711 int_value
= GET_MODE_PRECISION (mode
);
1712 result
= wi::shwi (int_value
, mode
);
1716 result
= wi::shwi (wi::popcount (op0
), mode
);
1720 result
= wi::shwi (wi::parity (op0
), mode
);
1724 result
= wide_int (op0
).bswap ();
1729 result
= wide_int::from (op0
, width
, UNSIGNED
);
1733 result
= wide_int::from (op0
, width
, SIGNED
);
1741 return immed_wide_int_const (result
, mode
);
1744 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1745 && SCALAR_FLOAT_MODE_P (mode
)
1746 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1749 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1756 d
= real_value_abs (&d
);
1759 d
= real_value_negate (&d
);
1761 case FLOAT_TRUNCATE
:
1762 d
= real_value_truncate (mode
, d
);
1765 /* All this does is change the mode, unless changing
1767 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1768 real_convert (&d
, mode
, &d
);
1771 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1778 real_to_target (tmp
, &d
, GET_MODE (op
));
1779 for (i
= 0; i
< 4; i
++)
1781 real_from_target (&d
, tmp
, mode
);
1787 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1789 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1790 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1791 && GET_MODE_CLASS (mode
) == MODE_INT
1794 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1795 operators are intentionally left unspecified (to ease implementation
1796 by target backends), for consistency, this routine implements the
1797 same semantics for constant folding as used by the middle-end. */
1799 /* This was formerly used only for non-IEEE float.
1800 eggert@twinsun.com says it is safe for IEEE also. */
1801 REAL_VALUE_TYPE x
, t
;
1802 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1803 wide_int wmax
, wmin
;
1804 /* This is part of the abi to real_to_integer, but we check
1805 things before making this call. */
1811 if (REAL_VALUE_ISNAN (x
))
1814 /* Test against the signed upper bound. */
1815 wmax
= wi::max_value (width
, SIGNED
);
1816 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1817 if (REAL_VALUES_LESS (t
, x
))
1818 return immed_wide_int_const (wmax
, mode
);
1820 /* Test against the signed lower bound. */
1821 wmin
= wi::min_value (width
, SIGNED
);
1822 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1823 if (REAL_VALUES_LESS (x
, t
))
1824 return immed_wide_int_const (wmin
, mode
);
1826 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
), mode
);
1830 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1833 /* Test against the unsigned upper bound. */
1834 wmax
= wi::max_value (width
, UNSIGNED
);
1835 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1836 if (REAL_VALUES_LESS (t
, x
))
1837 return immed_wide_int_const (wmax
, mode
);
1839 return immed_wide_int_const (real_to_integer (&x
, &fail
, width
),
1851 /* Subroutine of simplify_binary_operation to simplify a binary operation
1852 CODE that can commute with byte swapping, with result mode MODE and
1853 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1854 Return zero if no simplification or canonicalization is possible. */
1857 simplify_byte_swapping_operation (enum rtx_code code
, enum machine_mode mode
,
1862 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1863 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
1865 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
1866 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
1867 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1870 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1871 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
1873 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1874 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1880 /* Subroutine of simplify_binary_operation to simplify a commutative,
1881 associative binary operation CODE with result mode MODE, operating
1882 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1883 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1884 canonicalization is possible. */
1887 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1892 /* Linearize the operator to the left. */
1893 if (GET_CODE (op1
) == code
)
1895 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1896 if (GET_CODE (op0
) == code
)
1898 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1899 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1902 /* "a op (b op c)" becomes "(b op c) op a". */
1903 if (! swap_commutative_operands_p (op1
, op0
))
1904 return simplify_gen_binary (code
, mode
, op1
, op0
);
1911 if (GET_CODE (op0
) == code
)
1913 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1914 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1916 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1917 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1920 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1921 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1923 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1925 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1926 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1928 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1935 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1936 and OP1. Return 0 if no simplification is possible.
1938 Don't use this for relational operations such as EQ or LT.
1939 Use simplify_relational_operation instead. */
1941 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1944 rtx trueop0
, trueop1
;
1947 /* Relational operations don't work here. We must know the mode
1948 of the operands in order to do the comparison correctly.
1949 Assuming a full word can give incorrect results.
1950 Consider comparing 128 with -128 in QImode. */
1951 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1952 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1954 /* Make sure the constant is second. */
1955 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1956 && swap_commutative_operands_p (op0
, op1
))
1958 tem
= op0
, op0
= op1
, op1
= tem
;
1961 trueop0
= avoid_constant_pool_reference (op0
);
1962 trueop1
= avoid_constant_pool_reference (op1
);
1964 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1967 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1970 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1971 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1972 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1973 actual constants. */
1976 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1977 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1979 rtx tem
, reversed
, opleft
, opright
;
1981 unsigned int width
= GET_MODE_PRECISION (mode
);
1983 /* Even if we can't compute a constant result,
1984 there are some cases worth simplifying. */
1989 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1990 when x is NaN, infinite, or finite and nonzero. They aren't
1991 when x is -0 and the rounding mode is not towards -infinity,
1992 since (-0) + 0 is then 0. */
1993 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1996 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1997 transformations are safe even for IEEE. */
1998 if (GET_CODE (op0
) == NEG
)
1999 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2000 else if (GET_CODE (op1
) == NEG
)
2001 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2003 /* (~a) + 1 -> -a */
2004 if (INTEGRAL_MODE_P (mode
)
2005 && GET_CODE (op0
) == NOT
2006 && trueop1
== const1_rtx
)
2007 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2009 /* Handle both-operands-constant cases. We can only add
2010 CONST_INTs to constants since the sum of relocatable symbols
2011 can't be handled by most assemblers. Don't add CONST_INT
2012 to CONST_INT since overflow won't be computed properly if wider
2013 than HOST_BITS_PER_WIDE_INT. */
2015 if ((GET_CODE (op0
) == CONST
2016 || GET_CODE (op0
) == SYMBOL_REF
2017 || GET_CODE (op0
) == LABEL_REF
)
2018 && CONST_INT_P (op1
))
2019 return plus_constant (mode
, op0
, INTVAL (op1
));
2020 else if ((GET_CODE (op1
) == CONST
2021 || GET_CODE (op1
) == SYMBOL_REF
2022 || GET_CODE (op1
) == LABEL_REF
)
2023 && CONST_INT_P (op0
))
2024 return plus_constant (mode
, op1
, INTVAL (op0
));
2026 /* See if this is something like X * C - X or vice versa or
2027 if the multiplication is written as a shift. If so, we can
2028 distribute and make a new multiply, shift, or maybe just
2029 have X (if C is 2 in the example above). But don't make
2030 something more expensive than we had before. */
2032 if (SCALAR_INT_MODE_P (mode
))
2034 rtx lhs
= op0
, rhs
= op1
;
2036 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2037 wide_int coeff1
= wi::one (GET_MODE_PRECISION (mode
));
2039 if (GET_CODE (lhs
) == NEG
)
2041 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2042 lhs
= XEXP (lhs
, 0);
2044 else if (GET_CODE (lhs
) == MULT
2045 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2047 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2048 lhs
= XEXP (lhs
, 0);
2050 else if (GET_CODE (lhs
) == ASHIFT
2051 && CONST_INT_P (XEXP (lhs
, 1))
2052 && INTVAL (XEXP (lhs
, 1)) >= 0
2053 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2055 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2056 GET_MODE_PRECISION (mode
));
2057 lhs
= XEXP (lhs
, 0);
2060 if (GET_CODE (rhs
) == NEG
)
2062 coeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2063 rhs
= XEXP (rhs
, 0);
2065 else if (GET_CODE (rhs
) == MULT
2066 && CONST_INT_P (XEXP (rhs
, 1)))
2068 coeff1
= std::make_pair (XEXP (rhs
, 1), mode
);
2069 rhs
= XEXP (rhs
, 0);
2071 else if (GET_CODE (rhs
) == ASHIFT
2072 && CONST_INT_P (XEXP (rhs
, 1))
2073 && INTVAL (XEXP (rhs
, 1)) >= 0
2074 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2076 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2077 GET_MODE_PRECISION (mode
));
2078 rhs
= XEXP (rhs
, 0);
2081 if (rtx_equal_p (lhs
, rhs
))
2083 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2085 bool speed
= optimize_function_for_speed_p (cfun
);
2087 coeff
= immed_wide_int_const (coeff0
+ coeff1
, mode
);
2089 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2090 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2095 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2096 if (CONST_SCALAR_INT_P (op1
)
2097 && GET_CODE (op0
) == XOR
2098 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2099 && mode_signbit_p (mode
, op1
))
2100 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2101 simplify_gen_binary (XOR
, mode
, op1
,
2104 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2105 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2106 && GET_CODE (op0
) == MULT
2107 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2111 in1
= XEXP (XEXP (op0
, 0), 0);
2112 in2
= XEXP (op0
, 1);
2113 return simplify_gen_binary (MINUS
, mode
, op1
,
2114 simplify_gen_binary (MULT
, mode
,
2118 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2119 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2121 if (COMPARISON_P (op0
)
2122 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2123 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2124 && (reversed
= reversed_comparison (op0
, mode
)))
2126 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2128 /* If one of the operands is a PLUS or a MINUS, see if we can
2129 simplify this by the associative law.
2130 Don't use the associative law for floating point.
2131 The inaccuracy makes it nonassociative,
2132 and subtle programs can break if operations are associated. */
2134 if (INTEGRAL_MODE_P (mode
)
2135 && (plus_minus_operand_p (op0
)
2136 || plus_minus_operand_p (op1
))
2137 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2140 /* Reassociate floating point addition only when the user
2141 specifies associative math operations. */
2142 if (FLOAT_MODE_P (mode
)
2143 && flag_associative_math
)
2145 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2152 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2153 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2154 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2155 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2157 rtx xop00
= XEXP (op0
, 0);
2158 rtx xop10
= XEXP (op1
, 0);
2161 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2163 if (REG_P (xop00
) && REG_P (xop10
)
2164 && GET_MODE (xop00
) == GET_MODE (xop10
)
2165 && REGNO (xop00
) == REGNO (xop10
)
2166 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2167 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2174 /* We can't assume x-x is 0 even with non-IEEE floating point,
2175 but since it is zero except in very strange circumstances, we
2176 will treat it as zero with -ffinite-math-only. */
2177 if (rtx_equal_p (trueop0
, trueop1
)
2178 && ! side_effects_p (op0
)
2179 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2180 return CONST0_RTX (mode
);
2182 /* Change subtraction from zero into negation. (0 - x) is the
2183 same as -x when x is NaN, infinite, or finite and nonzero.
2184 But if the mode has signed zeros, and does not round towards
2185 -infinity, then 0 - 0 is 0, not -0. */
2186 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2187 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2189 /* (-1 - a) is ~a. */
2190 if (trueop0
== constm1_rtx
)
2191 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2193 /* Subtracting 0 has no effect unless the mode has signed zeros
2194 and supports rounding towards -infinity. In such a case,
2196 if (!(HONOR_SIGNED_ZEROS (mode
)
2197 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2198 && trueop1
== CONST0_RTX (mode
))
2201 /* See if this is something like X * C - X or vice versa or
2202 if the multiplication is written as a shift. If so, we can
2203 distribute and make a new multiply, shift, or maybe just
2204 have X (if C is 2 in the example above). But don't make
2205 something more expensive than we had before. */
2207 if (SCALAR_INT_MODE_P (mode
))
2209 rtx lhs
= op0
, rhs
= op1
;
2211 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2212 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2214 if (GET_CODE (lhs
) == NEG
)
2216 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2217 lhs
= XEXP (lhs
, 0);
2219 else if (GET_CODE (lhs
) == MULT
2220 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2222 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2223 lhs
= XEXP (lhs
, 0);
2225 else if (GET_CODE (lhs
) == ASHIFT
2226 && CONST_INT_P (XEXP (lhs
, 1))
2227 && INTVAL (XEXP (lhs
, 1)) >= 0
2228 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2230 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2231 GET_MODE_PRECISION (mode
));
2232 lhs
= XEXP (lhs
, 0);
2235 if (GET_CODE (rhs
) == NEG
)
2237 negcoeff1
= wi::one (GET_MODE_PRECISION (mode
));
2238 rhs
= XEXP (rhs
, 0);
2240 else if (GET_CODE (rhs
) == MULT
2241 && CONST_INT_P (XEXP (rhs
, 1)))
2243 negcoeff1
= wi::neg (std::make_pair (XEXP (rhs
, 1), mode
));
2244 rhs
= XEXP (rhs
, 0);
2246 else if (GET_CODE (rhs
) == ASHIFT
2247 && CONST_INT_P (XEXP (rhs
, 1))
2248 && INTVAL (XEXP (rhs
, 1)) >= 0
2249 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2251 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2252 GET_MODE_PRECISION (mode
));
2253 negcoeff1
= -negcoeff1
;
2254 rhs
= XEXP (rhs
, 0);
2257 if (rtx_equal_p (lhs
, rhs
))
2259 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2261 bool speed
= optimize_function_for_speed_p (cfun
);
2263 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, mode
);
2265 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2266 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2271 /* (a - (-b)) -> (a + b). True even for IEEE. */
2272 if (GET_CODE (op1
) == NEG
)
2273 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2275 /* (-x - c) may be simplified as (-c - x). */
2276 if (GET_CODE (op0
) == NEG
2277 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2279 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2281 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2284 /* Don't let a relocatable value get a negative coeff. */
2285 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2286 return simplify_gen_binary (PLUS
, mode
,
2288 neg_const_int (mode
, op1
));
2290 /* (x - (x & y)) -> (x & ~y) */
2291 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2293 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2295 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2296 GET_MODE (XEXP (op1
, 1)));
2297 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2299 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2301 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2302 GET_MODE (XEXP (op1
, 0)));
2303 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2307 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2308 by reversing the comparison code if valid. */
2309 if (STORE_FLAG_VALUE
== 1
2310 && trueop0
== const1_rtx
2311 && COMPARISON_P (op1
)
2312 && (reversed
= reversed_comparison (op1
, mode
)))
2315 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2316 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2317 && GET_CODE (op1
) == MULT
2318 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2322 in1
= XEXP (XEXP (op1
, 0), 0);
2323 in2
= XEXP (op1
, 1);
2324 return simplify_gen_binary (PLUS
, mode
,
2325 simplify_gen_binary (MULT
, mode
,
2330 /* Canonicalize (minus (neg A) (mult B C)) to
2331 (minus (mult (neg B) C) A). */
2332 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2333 && GET_CODE (op1
) == MULT
2334 && GET_CODE (op0
) == NEG
)
2338 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2339 in2
= XEXP (op1
, 1);
2340 return simplify_gen_binary (MINUS
, mode
,
2341 simplify_gen_binary (MULT
, mode
,
2346 /* If one of the operands is a PLUS or a MINUS, see if we can
2347 simplify this by the associative law. This will, for example,
2348 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2349 Don't use the associative law for floating point.
2350 The inaccuracy makes it nonassociative,
2351 and subtle programs can break if operations are associated. */
2353 if (INTEGRAL_MODE_P (mode
)
2354 && (plus_minus_operand_p (op0
)
2355 || plus_minus_operand_p (op1
))
2356 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2361 if (trueop1
== constm1_rtx
)
2362 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2364 if (GET_CODE (op0
) == NEG
)
2366 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2367 /* If op1 is a MULT as well and simplify_unary_operation
2368 just moved the NEG to the second operand, simplify_gen_binary
2369 below could through simplify_associative_operation move
2370 the NEG around again and recurse endlessly. */
2372 && GET_CODE (op1
) == MULT
2373 && GET_CODE (temp
) == MULT
2374 && XEXP (op1
, 0) == XEXP (temp
, 0)
2375 && GET_CODE (XEXP (temp
, 1)) == NEG
2376 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2379 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2381 if (GET_CODE (op1
) == NEG
)
2383 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2384 /* If op0 is a MULT as well and simplify_unary_operation
2385 just moved the NEG to the second operand, simplify_gen_binary
2386 below could through simplify_associative_operation move
2387 the NEG around again and recurse endlessly. */
2389 && GET_CODE (op0
) == MULT
2390 && GET_CODE (temp
) == MULT
2391 && XEXP (op0
, 0) == XEXP (temp
, 0)
2392 && GET_CODE (XEXP (temp
, 1)) == NEG
2393 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2396 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2399 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2400 x is NaN, since x * 0 is then also NaN. Nor is it valid
2401 when the mode has signed zeros, since multiplying a negative
2402 number by 0 will give -0, not 0. */
2403 if (!HONOR_NANS (mode
)
2404 && !HONOR_SIGNED_ZEROS (mode
)
2405 && trueop1
== CONST0_RTX (mode
)
2406 && ! side_effects_p (op0
))
2409 /* In IEEE floating point, x*1 is not equivalent to x for
2411 if (!HONOR_SNANS (mode
)
2412 && trueop1
== CONST1_RTX (mode
))
2415 /* Convert multiply by constant power of two into shift. */
2416 if (CONST_SCALAR_INT_P (trueop1
))
2418 val
= wi::exact_log2 (std::make_pair (trueop1
, mode
));
2420 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2423 /* x*2 is x+x and x*(-1) is -x */
2424 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2425 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2426 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2427 && GET_MODE (op0
) == mode
)
2430 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2432 if (REAL_VALUES_EQUAL (d
, dconst2
))
2433 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2435 if (!HONOR_SNANS (mode
)
2436 && REAL_VALUES_EQUAL (d
, dconstm1
))
2437 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2440 /* Optimize -x * -x as x * x. */
2441 if (FLOAT_MODE_P (mode
)
2442 && GET_CODE (op0
) == NEG
2443 && GET_CODE (op1
) == NEG
2444 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2445 && !side_effects_p (XEXP (op0
, 0)))
2446 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2448 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2449 if (SCALAR_FLOAT_MODE_P (mode
)
2450 && GET_CODE (op0
) == ABS
2451 && GET_CODE (op1
) == ABS
2452 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2453 && !side_effects_p (XEXP (op0
, 0)))
2454 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2456 /* Reassociate multiplication, but for floating point MULTs
2457 only when the user specifies unsafe math optimizations. */
2458 if (! FLOAT_MODE_P (mode
)
2459 || flag_unsafe_math_optimizations
)
2461 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2468 if (trueop1
== CONST0_RTX (mode
))
2470 if (INTEGRAL_MODE_P (mode
)
2471 && trueop1
== CONSTM1_RTX (mode
)
2472 && !side_effects_p (op0
))
2474 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2476 /* A | (~A) -> -1 */
2477 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2478 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2479 && ! side_effects_p (op0
)
2480 && SCALAR_INT_MODE_P (mode
))
2483 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2484 if (CONST_INT_P (op1
)
2485 && HWI_COMPUTABLE_MODE_P (mode
)
2486 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2487 && !side_effects_p (op0
))
2490 /* Canonicalize (X & C1) | C2. */
2491 if (GET_CODE (op0
) == AND
2492 && CONST_INT_P (trueop1
)
2493 && CONST_INT_P (XEXP (op0
, 1)))
2495 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2496 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2497 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2499 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2501 && !side_effects_p (XEXP (op0
, 0)))
2504 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2505 if (((c1
|c2
) & mask
) == mask
)
2506 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2508 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2509 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2511 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2512 gen_int_mode (c1
& ~c2
, mode
));
2513 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2517 /* Convert (A & B) | A to A. */
2518 if (GET_CODE (op0
) == AND
2519 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2520 || rtx_equal_p (XEXP (op0
, 1), op1
))
2521 && ! side_effects_p (XEXP (op0
, 0))
2522 && ! side_effects_p (XEXP (op0
, 1)))
2525 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2526 mode size to (rotate A CX). */
2528 if (GET_CODE (op1
) == ASHIFT
2529 || GET_CODE (op1
) == SUBREG
)
2540 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2541 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2542 && CONST_INT_P (XEXP (opleft
, 1))
2543 && CONST_INT_P (XEXP (opright
, 1))
2544 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2545 == GET_MODE_PRECISION (mode
)))
2546 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2548 /* Same, but for ashift that has been "simplified" to a wider mode
2549 by simplify_shift_const. */
2551 if (GET_CODE (opleft
) == SUBREG
2552 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2553 && GET_CODE (opright
) == LSHIFTRT
2554 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2555 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2556 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2557 && (GET_MODE_SIZE (GET_MODE (opleft
))
2558 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2559 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2560 SUBREG_REG (XEXP (opright
, 0)))
2561 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2562 && CONST_INT_P (XEXP (opright
, 1))
2563 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2564 == GET_MODE_PRECISION (mode
)))
2565 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2566 XEXP (SUBREG_REG (opleft
), 1));
2568 /* If we have (ior (and (X C1) C2)), simplify this by making
2569 C1 as small as possible if C1 actually changes. */
2570 if (CONST_INT_P (op1
)
2571 && (HWI_COMPUTABLE_MODE_P (mode
)
2572 || INTVAL (op1
) > 0)
2573 && GET_CODE (op0
) == AND
2574 && CONST_INT_P (XEXP (op0
, 1))
2575 && CONST_INT_P (op1
)
2576 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2578 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2579 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2582 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2585 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2586 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2587 the PLUS does not affect any of the bits in OP1: then we can do
2588 the IOR as a PLUS and we can associate. This is valid if OP1
2589 can be safely shifted left C bits. */
2590 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2591 && GET_CODE (XEXP (op0
, 0)) == PLUS
2592 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2593 && CONST_INT_P (XEXP (op0
, 1))
2594 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2596 int count
= INTVAL (XEXP (op0
, 1));
2597 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2599 if (mask
>> count
== INTVAL (trueop1
)
2600 && trunc_int_for_mode (mask
, mode
) == mask
2601 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2602 return simplify_gen_binary (ASHIFTRT
, mode
,
2603 plus_constant (mode
, XEXP (op0
, 0),
2608 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2612 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2618 if (trueop1
== CONST0_RTX (mode
))
2620 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2621 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2622 if (rtx_equal_p (trueop0
, trueop1
)
2623 && ! side_effects_p (op0
)
2624 && GET_MODE_CLASS (mode
) != MODE_CC
)
2625 return CONST0_RTX (mode
);
2627 /* Canonicalize XOR of the most significant bit to PLUS. */
2628 if (CONST_SCALAR_INT_P (op1
)
2629 && mode_signbit_p (mode
, op1
))
2630 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2631 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2632 if (CONST_SCALAR_INT_P (op1
)
2633 && GET_CODE (op0
) == PLUS
2634 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2635 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2636 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2637 simplify_gen_binary (XOR
, mode
, op1
,
2640 /* If we are XORing two things that have no bits in common,
2641 convert them into an IOR. This helps to detect rotation encoded
2642 using those methods and possibly other simplifications. */
2644 if (HWI_COMPUTABLE_MODE_P (mode
)
2645 && (nonzero_bits (op0
, mode
)
2646 & nonzero_bits (op1
, mode
)) == 0)
2647 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2649 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2650 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2653 int num_negated
= 0;
2655 if (GET_CODE (op0
) == NOT
)
2656 num_negated
++, op0
= XEXP (op0
, 0);
2657 if (GET_CODE (op1
) == NOT
)
2658 num_negated
++, op1
= XEXP (op1
, 0);
2660 if (num_negated
== 2)
2661 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2662 else if (num_negated
== 1)
2663 return simplify_gen_unary (NOT
, mode
,
2664 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2668 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2669 correspond to a machine insn or result in further simplifications
2670 if B is a constant. */
2672 if (GET_CODE (op0
) == AND
2673 && rtx_equal_p (XEXP (op0
, 1), op1
)
2674 && ! side_effects_p (op1
))
2675 return simplify_gen_binary (AND
, mode
,
2676 simplify_gen_unary (NOT
, mode
,
2677 XEXP (op0
, 0), mode
),
2680 else if (GET_CODE (op0
) == AND
2681 && rtx_equal_p (XEXP (op0
, 0), op1
)
2682 && ! side_effects_p (op1
))
2683 return simplify_gen_binary (AND
, mode
,
2684 simplify_gen_unary (NOT
, mode
,
2685 XEXP (op0
, 1), mode
),
2688 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2689 we can transform like this:
2690 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2691 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2692 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2693 Attempt a few simplifications when B and C are both constants. */
2694 if (GET_CODE (op0
) == AND
2695 && CONST_INT_P (op1
)
2696 && CONST_INT_P (XEXP (op0
, 1)))
2698 rtx a
= XEXP (op0
, 0);
2699 rtx b
= XEXP (op0
, 1);
2701 HOST_WIDE_INT bval
= INTVAL (b
);
2702 HOST_WIDE_INT cval
= INTVAL (c
);
2705 = simplify_binary_operation (AND
, mode
,
2706 simplify_gen_unary (NOT
, mode
, a
, mode
),
2708 if ((~cval
& bval
) == 0)
2710 /* Try to simplify ~A&C | ~B&C. */
2711 if (na_c
!= NULL_RTX
)
2712 return simplify_gen_binary (IOR
, mode
, na_c
,
2713 gen_int_mode (~bval
& cval
, mode
));
2717 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2718 if (na_c
== const0_rtx
)
2720 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2721 gen_int_mode (~cval
& bval
,
2723 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2724 gen_int_mode (~bval
& cval
,
2730 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2731 comparison if STORE_FLAG_VALUE is 1. */
2732 if (STORE_FLAG_VALUE
== 1
2733 && trueop1
== const1_rtx
2734 && COMPARISON_P (op0
)
2735 && (reversed
= reversed_comparison (op0
, mode
)))
2738 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2739 is (lt foo (const_int 0)), so we can perform the above
2740 simplification if STORE_FLAG_VALUE is 1. */
2742 if (STORE_FLAG_VALUE
== 1
2743 && trueop1
== const1_rtx
2744 && GET_CODE (op0
) == LSHIFTRT
2745 && CONST_INT_P (XEXP (op0
, 1))
2746 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2747 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2749 /* (xor (comparison foo bar) (const_int sign-bit))
2750 when STORE_FLAG_VALUE is the sign bit. */
2751 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2752 && trueop1
== const_true_rtx
2753 && COMPARISON_P (op0
)
2754 && (reversed
= reversed_comparison (op0
, mode
)))
2757 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2761 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2767 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2769 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2771 if (HWI_COMPUTABLE_MODE_P (mode
))
2773 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2774 HOST_WIDE_INT nzop1
;
2775 if (CONST_INT_P (trueop1
))
2777 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2778 /* If we are turning off bits already known off in OP0, we need
2780 if ((nzop0
& ~val1
) == 0)
2783 nzop1
= nonzero_bits (trueop1
, mode
);
2784 /* If we are clearing all the nonzero bits, the result is zero. */
2785 if ((nzop1
& nzop0
) == 0
2786 && !side_effects_p (op0
) && !side_effects_p (op1
))
2787 return CONST0_RTX (mode
);
2789 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2790 && GET_MODE_CLASS (mode
) != MODE_CC
)
2793 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2794 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2795 && ! side_effects_p (op0
)
2796 && GET_MODE_CLASS (mode
) != MODE_CC
)
2797 return CONST0_RTX (mode
);
2799 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2800 there are no nonzero bits of C outside of X's mode. */
2801 if ((GET_CODE (op0
) == SIGN_EXTEND
2802 || GET_CODE (op0
) == ZERO_EXTEND
)
2803 && CONST_INT_P (trueop1
)
2804 && HWI_COMPUTABLE_MODE_P (mode
)
2805 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2806 & UINTVAL (trueop1
)) == 0)
2808 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2809 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2810 gen_int_mode (INTVAL (trueop1
),
2812 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2815 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2816 we might be able to further simplify the AND with X and potentially
2817 remove the truncation altogether. */
2818 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2820 rtx x
= XEXP (op0
, 0);
2821 enum machine_mode xmode
= GET_MODE (x
);
2822 tem
= simplify_gen_binary (AND
, xmode
, x
,
2823 gen_int_mode (INTVAL (trueop1
), xmode
));
2824 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2827 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2828 if (GET_CODE (op0
) == IOR
2829 && CONST_INT_P (trueop1
)
2830 && CONST_INT_P (XEXP (op0
, 1)))
2832 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2833 return simplify_gen_binary (IOR
, mode
,
2834 simplify_gen_binary (AND
, mode
,
2835 XEXP (op0
, 0), op1
),
2836 gen_int_mode (tmp
, mode
));
2839 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2840 insn (and may simplify more). */
2841 if (GET_CODE (op0
) == XOR
2842 && rtx_equal_p (XEXP (op0
, 0), op1
)
2843 && ! side_effects_p (op1
))
2844 return simplify_gen_binary (AND
, mode
,
2845 simplify_gen_unary (NOT
, mode
,
2846 XEXP (op0
, 1), mode
),
2849 if (GET_CODE (op0
) == XOR
2850 && rtx_equal_p (XEXP (op0
, 1), op1
)
2851 && ! side_effects_p (op1
))
2852 return simplify_gen_binary (AND
, mode
,
2853 simplify_gen_unary (NOT
, mode
,
2854 XEXP (op0
, 0), mode
),
2857 /* Similarly for (~(A ^ B)) & A. */
2858 if (GET_CODE (op0
) == NOT
2859 && GET_CODE (XEXP (op0
, 0)) == XOR
2860 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2861 && ! side_effects_p (op1
))
2862 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2864 if (GET_CODE (op0
) == NOT
2865 && GET_CODE (XEXP (op0
, 0)) == XOR
2866 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2867 && ! side_effects_p (op1
))
2868 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2870 /* Convert (A | B) & A to A. */
2871 if (GET_CODE (op0
) == IOR
2872 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2873 || rtx_equal_p (XEXP (op0
, 1), op1
))
2874 && ! side_effects_p (XEXP (op0
, 0))
2875 && ! side_effects_p (XEXP (op0
, 1)))
2878 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2879 ((A & N) + B) & M -> (A + B) & M
2880 Similarly if (N & M) == 0,
2881 ((A | N) + B) & M -> (A + B) & M
2882 and for - instead of + and/or ^ instead of |.
2883 Also, if (N & M) == 0, then
2884 (A +- N) & M -> A & M. */
2885 if (CONST_INT_P (trueop1
)
2886 && HWI_COMPUTABLE_MODE_P (mode
)
2887 && ~UINTVAL (trueop1
)
2888 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2889 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2894 pmop
[0] = XEXP (op0
, 0);
2895 pmop
[1] = XEXP (op0
, 1);
2897 if (CONST_INT_P (pmop
[1])
2898 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2899 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2901 for (which
= 0; which
< 2; which
++)
2904 switch (GET_CODE (tem
))
2907 if (CONST_INT_P (XEXP (tem
, 1))
2908 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2909 == UINTVAL (trueop1
))
2910 pmop
[which
] = XEXP (tem
, 0);
2914 if (CONST_INT_P (XEXP (tem
, 1))
2915 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2916 pmop
[which
] = XEXP (tem
, 0);
2923 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2925 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2927 return simplify_gen_binary (code
, mode
, tem
, op1
);
2931 /* (and X (ior (not X) Y) -> (and X Y) */
2932 if (GET_CODE (op1
) == IOR
2933 && GET_CODE (XEXP (op1
, 0)) == NOT
2934 && op0
== XEXP (XEXP (op1
, 0), 0))
2935 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2937 /* (and (ior (not X) Y) X) -> (and X Y) */
2938 if (GET_CODE (op0
) == IOR
2939 && GET_CODE (XEXP (op0
, 0)) == NOT
2940 && op1
== XEXP (XEXP (op0
, 0), 0))
2941 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2943 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2947 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2953 /* 0/x is 0 (or x&0 if x has side-effects). */
2954 if (trueop0
== CONST0_RTX (mode
))
2956 if (side_effects_p (op1
))
2957 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2961 if (trueop1
== CONST1_RTX (mode
))
2963 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2967 /* Convert divide by power of two into shift. */
2968 if (CONST_INT_P (trueop1
)
2969 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
2970 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2974 /* Handle floating point and integers separately. */
2975 if (SCALAR_FLOAT_MODE_P (mode
))
2977 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2978 safe for modes with NaNs, since 0.0 / 0.0 will then be
2979 NaN rather than 0.0. Nor is it safe for modes with signed
2980 zeros, since dividing 0 by a negative number gives -0.0 */
2981 if (trueop0
== CONST0_RTX (mode
)
2982 && !HONOR_NANS (mode
)
2983 && !HONOR_SIGNED_ZEROS (mode
)
2984 && ! side_effects_p (op1
))
2987 if (trueop1
== CONST1_RTX (mode
)
2988 && !HONOR_SNANS (mode
))
2991 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2992 && trueop1
!= CONST0_RTX (mode
))
2995 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2998 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2999 && !HONOR_SNANS (mode
))
3000 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3002 /* Change FP division by a constant into multiplication.
3003 Only do this with -freciprocal-math. */
3004 if (flag_reciprocal_math
3005 && !REAL_VALUES_EQUAL (d
, dconst0
))
3007 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3008 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3009 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3013 else if (SCALAR_INT_MODE_P (mode
))
3015 /* 0/x is 0 (or x&0 if x has side-effects). */
3016 if (trueop0
== CONST0_RTX (mode
)
3017 && !cfun
->can_throw_non_call_exceptions
)
3019 if (side_effects_p (op1
))
3020 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3024 if (trueop1
== CONST1_RTX (mode
))
3026 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3031 if (trueop1
== constm1_rtx
)
3033 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3035 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3041 /* 0%x is 0 (or x&0 if x has side-effects). */
3042 if (trueop0
== CONST0_RTX (mode
))
3044 if (side_effects_p (op1
))
3045 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3048 /* x%1 is 0 (of x&0 if x has side-effects). */
3049 if (trueop1
== CONST1_RTX (mode
))
3051 if (side_effects_p (op0
))
3052 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3053 return CONST0_RTX (mode
);
3055 /* Implement modulus by power of two as AND. */
3056 if (CONST_INT_P (trueop1
)
3057 && exact_log2 (UINTVAL (trueop1
)) > 0)
3058 return simplify_gen_binary (AND
, mode
, op0
,
3059 gen_int_mode (INTVAL (op1
) - 1, mode
));
3063 /* 0%x is 0 (or x&0 if x has side-effects). */
3064 if (trueop0
== CONST0_RTX (mode
))
3066 if (side_effects_p (op1
))
3067 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3070 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3071 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3073 if (side_effects_p (op0
))
3074 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3075 return CONST0_RTX (mode
);
3081 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3082 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3083 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3085 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3086 if (CONST_INT_P (trueop1
)
3087 && IN_RANGE (INTVAL (trueop1
),
3088 GET_MODE_PRECISION (mode
) / 2 + (code
== ROTATE
),
3089 GET_MODE_PRECISION (mode
) - 1))
3090 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3091 mode
, op0
, GEN_INT (GET_MODE_PRECISION (mode
)
3092 - INTVAL (trueop1
)));
3096 if (trueop1
== CONST0_RTX (mode
))
3098 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3100 /* Rotating ~0 always results in ~0. */
3101 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3102 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3103 && ! side_effects_p (op1
))
3106 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3108 val
= INTVAL (op1
) & (GET_MODE_PRECISION (mode
) - 1);
3109 if (val
!= INTVAL (op1
))
3110 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3117 if (trueop1
== CONST0_RTX (mode
))
3119 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3121 goto canonicalize_shift
;
3124 if (trueop1
== CONST0_RTX (mode
))
3126 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3128 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3129 if (GET_CODE (op0
) == CLZ
3130 && CONST_INT_P (trueop1
)
3131 && STORE_FLAG_VALUE
== 1
3132 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3134 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3135 unsigned HOST_WIDE_INT zero_val
= 0;
3137 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3138 && zero_val
== GET_MODE_PRECISION (imode
)
3139 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3140 return simplify_gen_relational (EQ
, mode
, imode
,
3141 XEXP (op0
, 0), const0_rtx
);
3143 goto canonicalize_shift
;
3146 if (width
<= HOST_BITS_PER_WIDE_INT
3147 && mode_signbit_p (mode
, trueop1
)
3148 && ! side_effects_p (op0
))
3150 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3152 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3158 if (width
<= HOST_BITS_PER_WIDE_INT
3159 && CONST_INT_P (trueop1
)
3160 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3161 && ! side_effects_p (op0
))
3163 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3165 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3171 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3173 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3175 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3181 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3183 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3185 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3198 /* ??? There are simplifications that can be done. */
3202 if (!VECTOR_MODE_P (mode
))
3204 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3205 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3206 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3207 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3208 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3210 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3211 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3214 /* Extract a scalar element from a nested VEC_SELECT expression
3215 (with optional nested VEC_CONCAT expression). Some targets
3216 (i386) extract scalar element from a vector using chain of
3217 nested VEC_SELECT expressions. When input operand is a memory
3218 operand, this operation can be simplified to a simple scalar
3219 load from an offseted memory address. */
3220 if (GET_CODE (trueop0
) == VEC_SELECT
)
3222 rtx op0
= XEXP (trueop0
, 0);
3223 rtx op1
= XEXP (trueop0
, 1);
3225 enum machine_mode opmode
= GET_MODE (op0
);
3226 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3227 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3229 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3235 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3236 gcc_assert (i
< n_elts
);
3238 /* Select element, pointed by nested selector. */
3239 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3241 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3242 if (GET_CODE (op0
) == VEC_CONCAT
)
3244 rtx op00
= XEXP (op0
, 0);
3245 rtx op01
= XEXP (op0
, 1);
3247 enum machine_mode mode00
, mode01
;
3248 int n_elts00
, n_elts01
;
3250 mode00
= GET_MODE (op00
);
3251 mode01
= GET_MODE (op01
);
3253 /* Find out number of elements of each operand. */
3254 if (VECTOR_MODE_P (mode00
))
3256 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3257 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3262 if (VECTOR_MODE_P (mode01
))
3264 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3265 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3270 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3272 /* Select correct operand of VEC_CONCAT
3273 and adjust selector. */
3274 if (elem
< n_elts01
)
3285 vec
= rtvec_alloc (1);
3286 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3288 tmp
= gen_rtx_fmt_ee (code
, mode
,
3289 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3292 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3293 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3294 return XEXP (trueop0
, 0);
3298 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3299 gcc_assert (GET_MODE_INNER (mode
)
3300 == GET_MODE_INNER (GET_MODE (trueop0
)));
3301 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3303 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3305 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3306 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3307 rtvec v
= rtvec_alloc (n_elts
);
3310 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3311 for (i
= 0; i
< n_elts
; i
++)
3313 rtx x
= XVECEXP (trueop1
, 0, i
);
3315 gcc_assert (CONST_INT_P (x
));
3316 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3320 return gen_rtx_CONST_VECTOR (mode
, v
);
3323 /* Recognize the identity. */
3324 if (GET_MODE (trueop0
) == mode
)
3326 bool maybe_ident
= true;
3327 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3329 rtx j
= XVECEXP (trueop1
, 0, i
);
3330 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3332 maybe_ident
= false;
3340 /* If we build {a,b} then permute it, build the result directly. */
3341 if (XVECLEN (trueop1
, 0) == 2
3342 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3343 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3344 && GET_CODE (trueop0
) == VEC_CONCAT
3345 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3346 && GET_MODE (XEXP (trueop0
, 0)) == mode
3347 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3348 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3350 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3351 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3354 gcc_assert (i0
< 4 && i1
< 4);
3355 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3356 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3358 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3361 if (XVECLEN (trueop1
, 0) == 2
3362 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3363 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3364 && GET_CODE (trueop0
) == VEC_CONCAT
3365 && GET_MODE (trueop0
) == mode
)
3367 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3368 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3371 gcc_assert (i0
< 2 && i1
< 2);
3372 subop0
= XEXP (trueop0
, i0
);
3373 subop1
= XEXP (trueop0
, i1
);
3375 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3378 /* If we select one half of a vec_concat, return that. */
3379 if (GET_CODE (trueop0
) == VEC_CONCAT
3380 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3382 rtx subop0
= XEXP (trueop0
, 0);
3383 rtx subop1
= XEXP (trueop0
, 1);
3384 enum machine_mode mode0
= GET_MODE (subop0
);
3385 enum machine_mode mode1
= GET_MODE (subop1
);
3386 int li
= GET_MODE_SIZE (GET_MODE_INNER (mode0
));
3387 int l0
= GET_MODE_SIZE (mode0
) / li
;
3388 int l1
= GET_MODE_SIZE (mode1
) / li
;
3389 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3390 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3392 bool success
= true;
3393 for (int i
= 1; i
< l0
; ++i
)
3395 rtx j
= XVECEXP (trueop1
, 0, i
);
3396 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3405 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3407 bool success
= true;
3408 for (int i
= 1; i
< l1
; ++i
)
3410 rtx j
= XVECEXP (trueop1
, 0, i
);
3411 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3423 if (XVECLEN (trueop1
, 0) == 1
3424 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3425 && GET_CODE (trueop0
) == VEC_CONCAT
)
3428 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3430 /* Try to find the element in the VEC_CONCAT. */
3431 while (GET_MODE (vec
) != mode
3432 && GET_CODE (vec
) == VEC_CONCAT
)
3434 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3435 if (offset
< vec_size
)
3436 vec
= XEXP (vec
, 0);
3440 vec
= XEXP (vec
, 1);
3442 vec
= avoid_constant_pool_reference (vec
);
3445 if (GET_MODE (vec
) == mode
)
3449 /* If we select elements in a vec_merge that all come from the same
3450 operand, select from that operand directly. */
3451 if (GET_CODE (op0
) == VEC_MERGE
)
3453 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3454 if (CONST_INT_P (trueop02
))
3456 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3457 bool all_operand0
= true;
3458 bool all_operand1
= true;
3459 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3461 rtx j
= XVECEXP (trueop1
, 0, i
);
3462 if (sel
& (1 << UINTVAL (j
)))
3463 all_operand1
= false;
3465 all_operand0
= false;
3467 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3468 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3469 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3470 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3474 /* If we have two nested selects that are inverses of each
3475 other, replace them with the source operand. */
3476 if (GET_CODE (trueop0
) == VEC_SELECT
3477 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3479 rtx op0_subop1
= XEXP (trueop0
, 1);
3480 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3481 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3483 /* Apply the outer ordering vector to the inner one. (The inner
3484 ordering vector is expressly permitted to be of a different
3485 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3486 then the two VEC_SELECTs cancel. */
3487 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3489 rtx x
= XVECEXP (trueop1
, 0, i
);
3490 if (!CONST_INT_P (x
))
3492 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3493 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3496 return XEXP (trueop0
, 0);
3502 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3503 ? GET_MODE (trueop0
)
3504 : GET_MODE_INNER (mode
));
3505 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3506 ? GET_MODE (trueop1
)
3507 : GET_MODE_INNER (mode
));
3509 gcc_assert (VECTOR_MODE_P (mode
));
3510 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3511 == GET_MODE_SIZE (mode
));
3513 if (VECTOR_MODE_P (op0_mode
))
3514 gcc_assert (GET_MODE_INNER (mode
)
3515 == GET_MODE_INNER (op0_mode
));
3517 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3519 if (VECTOR_MODE_P (op1_mode
))
3520 gcc_assert (GET_MODE_INNER (mode
)
3521 == GET_MODE_INNER (op1_mode
));
3523 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3525 if ((GET_CODE (trueop0
) == CONST_VECTOR
3526 || CONST_SCALAR_INT_P (trueop0
)
3527 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3528 && (GET_CODE (trueop1
) == CONST_VECTOR
3529 || CONST_SCALAR_INT_P (trueop1
)
3530 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3532 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3533 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3534 rtvec v
= rtvec_alloc (n_elts
);
3536 unsigned in_n_elts
= 1;
3538 if (VECTOR_MODE_P (op0_mode
))
3539 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3540 for (i
= 0; i
< n_elts
; i
++)
3544 if (!VECTOR_MODE_P (op0_mode
))
3545 RTVEC_ELT (v
, i
) = trueop0
;
3547 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3551 if (!VECTOR_MODE_P (op1_mode
))
3552 RTVEC_ELT (v
, i
) = trueop1
;
3554 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3559 return gen_rtx_CONST_VECTOR (mode
, v
);
3562 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3563 Restrict the transformation to avoid generating a VEC_SELECT with a
3564 mode unrelated to its operand. */
3565 if (GET_CODE (trueop0
) == VEC_SELECT
3566 && GET_CODE (trueop1
) == VEC_SELECT
3567 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3568 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3570 rtx par0
= XEXP (trueop0
, 1);
3571 rtx par1
= XEXP (trueop1
, 1);
3572 int len0
= XVECLEN (par0
, 0);
3573 int len1
= XVECLEN (par1
, 0);
3574 rtvec vec
= rtvec_alloc (len0
+ len1
);
3575 for (int i
= 0; i
< len0
; i
++)
3576 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3577 for (int i
= 0; i
< len1
; i
++)
3578 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3579 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3580 gen_rtx_PARALLEL (VOIDmode
, vec
));
3593 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3596 unsigned int width
= GET_MODE_PRECISION (mode
);
3598 if (VECTOR_MODE_P (mode
)
3599 && code
!= VEC_CONCAT
3600 && GET_CODE (op0
) == CONST_VECTOR
3601 && GET_CODE (op1
) == CONST_VECTOR
)
3603 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3604 enum machine_mode op0mode
= GET_MODE (op0
);
3605 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3606 enum machine_mode op1mode
= GET_MODE (op1
);
3607 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3608 rtvec v
= rtvec_alloc (n_elts
);
3611 gcc_assert (op0_n_elts
== n_elts
);
3612 gcc_assert (op1_n_elts
== n_elts
);
3613 for (i
= 0; i
< n_elts
; i
++)
3615 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3616 CONST_VECTOR_ELT (op0
, i
),
3617 CONST_VECTOR_ELT (op1
, i
));
3620 RTVEC_ELT (v
, i
) = x
;
3623 return gen_rtx_CONST_VECTOR (mode
, v
);
3626 if (VECTOR_MODE_P (mode
)
3627 && code
== VEC_CONCAT
3628 && (CONST_SCALAR_INT_P (op0
)
3629 || GET_CODE (op0
) == CONST_FIXED
3630 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3631 && (CONST_SCALAR_INT_P (op1
)
3632 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3633 || GET_CODE (op1
) == CONST_FIXED
))
3635 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3636 rtvec v
= rtvec_alloc (n_elts
);
3638 gcc_assert (n_elts
>= 2);
3641 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3642 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3644 RTVEC_ELT (v
, 0) = op0
;
3645 RTVEC_ELT (v
, 1) = op1
;
3649 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3650 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3653 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3654 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3655 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3657 for (i
= 0; i
< op0_n_elts
; ++i
)
3658 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3659 for (i
= 0; i
< op1_n_elts
; ++i
)
3660 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3663 return gen_rtx_CONST_VECTOR (mode
, v
);
3666 if (SCALAR_FLOAT_MODE_P (mode
)
3667 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3668 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3669 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3680 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3682 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3684 for (i
= 0; i
< 4; i
++)
3701 real_from_target (&r
, tmp0
, mode
);
3702 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3706 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3709 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3710 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3711 real_convert (&f0
, mode
, &f0
);
3712 real_convert (&f1
, mode
, &f1
);
3714 if (HONOR_SNANS (mode
)
3715 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3719 && REAL_VALUES_EQUAL (f1
, dconst0
)
3720 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3723 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3724 && flag_trapping_math
3725 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3727 int s0
= REAL_VALUE_NEGATIVE (f0
);
3728 int s1
= REAL_VALUE_NEGATIVE (f1
);
3733 /* Inf + -Inf = NaN plus exception. */
3738 /* Inf - Inf = NaN plus exception. */
3743 /* Inf / Inf = NaN plus exception. */
3750 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3751 && flag_trapping_math
3752 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3753 || (REAL_VALUE_ISINF (f1
)
3754 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3755 /* Inf * 0 = NaN plus exception. */
3758 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3760 real_convert (&result
, mode
, &value
);
3762 /* Don't constant fold this floating point operation if
3763 the result has overflowed and flag_trapping_math. */
3765 if (flag_trapping_math
3766 && MODE_HAS_INFINITIES (mode
)
3767 && REAL_VALUE_ISINF (result
)
3768 && !REAL_VALUE_ISINF (f0
)
3769 && !REAL_VALUE_ISINF (f1
))
3770 /* Overflow plus exception. */
3773 /* Don't constant fold this floating point operation if the
3774 result may dependent upon the run-time rounding mode and
3775 flag_rounding_math is set, or if GCC's software emulation
3776 is unable to accurately represent the result. */
3778 if ((flag_rounding_math
3779 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3780 && (inexact
|| !real_identical (&result
, &value
)))
3783 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3787 /* We can fold some multi-word operations. */
3788 if ((GET_MODE_CLASS (mode
) == MODE_INT
3789 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
3790 && CONST_SCALAR_INT_P (op0
)
3791 && CONST_SCALAR_INT_P (op1
))
3795 rtx_mode_t pop0
= std::make_pair (op0
, mode
);
3796 rtx_mode_t pop1
= std::make_pair (op1
, mode
);
3798 #if TARGET_SUPPORTS_WIDE_INT == 0
3799 /* This assert keeps the simplification from producing a result
3800 that cannot be represented in a CONST_DOUBLE but a lot of
3801 upstream callers expect that this function never fails to
3802 simplify something and so you if you added this to the test
3803 above the code would die later anyway. If this assert
3804 happens, you just need to make the port support wide int. */
3805 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
3810 result
= wi::sub (pop0
, pop1
);
3814 result
= wi::add (pop0
, pop1
);
3818 result
= wi::mul (pop0
, pop1
);
3822 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3828 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3834 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3840 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3846 result
= wi::bit_and (pop0
, pop1
);
3850 result
= wi::bit_or (pop0
, pop1
);
3854 result
= wi::bit_xor (pop0
, pop1
);
3858 result
= wi::smin (pop0
, pop1
);
3862 result
= wi::smax (pop0
, pop1
);
3866 result
= wi::umin (pop0
, pop1
);
3870 result
= wi::umax (pop0
, pop1
);
3877 wide_int wop1
= pop1
;
3878 if (SHIFT_COUNT_TRUNCATED
)
3879 wop1
= wi::umod_trunc (wop1
, width
);
3880 else if (wi::geu_p (wop1
, width
))
3886 result
= wi::lrshift (pop0
, wop1
);
3890 result
= wi::arshift (pop0
, wop1
);
3894 result
= wi::lshift (pop0
, wop1
);
3905 if (wi::neg_p (pop1
))
3911 result
= wi::lrotate (pop0
, pop1
);
3915 result
= wi::rrotate (pop0
, pop1
);
3926 return immed_wide_int_const (result
, mode
);
3934 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3937 Rather than test for specific case, we do this by a brute-force method
3938 and do all possible simplifications until no more changes occur. Then
3939 we rebuild the operation. */
3941 struct simplify_plus_minus_op_data
3948 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3952 result
= (commutative_operand_precedence (y
)
3953 - commutative_operand_precedence (x
));
3957 /* Group together equal REGs to do more simplification. */
3958 if (REG_P (x
) && REG_P (y
))
3959 return REGNO (x
) > REGNO (y
);
3965 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3968 struct simplify_plus_minus_op_data ops
[16];
3971 int changed
, n_constants
, canonicalized
= 0;
3974 memset (ops
, 0, sizeof ops
);
3976 /* Set up the two operands and then expand them until nothing has been
3977 changed. If we run out of room in our array, give up; this should
3978 almost never happen. */
3983 ops
[1].neg
= (code
== MINUS
);
3990 for (i
= 0; i
< n_ops
; i
++)
3992 rtx this_op
= ops
[i
].op
;
3993 int this_neg
= ops
[i
].neg
;
3994 enum rtx_code this_code
= GET_CODE (this_op
);
4000 if (n_ops
== ARRAY_SIZE (ops
))
4003 ops
[n_ops
].op
= XEXP (this_op
, 1);
4004 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4007 ops
[i
].op
= XEXP (this_op
, 0);
4009 canonicalized
|= this_neg
;
4013 ops
[i
].op
= XEXP (this_op
, 0);
4014 ops
[i
].neg
= ! this_neg
;
4020 if (n_ops
!= ARRAY_SIZE (ops
)
4021 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4022 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4023 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4025 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4026 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4027 ops
[n_ops
].neg
= this_neg
;
4035 /* ~a -> (-a - 1) */
4036 if (n_ops
!= ARRAY_SIZE (ops
))
4038 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4039 ops
[n_ops
++].neg
= this_neg
;
4040 ops
[i
].op
= XEXP (this_op
, 0);
4041 ops
[i
].neg
= !this_neg
;
4051 ops
[i
].op
= neg_const_int (mode
, this_op
);
4065 if (n_constants
> 1)
4068 gcc_assert (n_ops
>= 2);
4070 /* If we only have two operands, we can avoid the loops. */
4073 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4076 /* Get the two operands. Be careful with the order, especially for
4077 the cases where code == MINUS. */
4078 if (ops
[0].neg
&& ops
[1].neg
)
4080 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4083 else if (ops
[0].neg
)
4094 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4097 /* Now simplify each pair of operands until nothing changes. */
4100 /* Insertion sort is good enough for a small array. */
4101 for (i
= 1; i
< n_ops
; i
++)
4103 struct simplify_plus_minus_op_data save
;
4105 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4111 ops
[j
+ 1] = ops
[j
];
4112 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4117 for (i
= n_ops
- 1; i
> 0; i
--)
4118 for (j
= i
- 1; j
>= 0; j
--)
4120 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4121 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4123 if (lhs
!= 0 && rhs
!= 0)
4125 enum rtx_code ncode
= PLUS
;
4131 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4133 else if (swap_commutative_operands_p (lhs
, rhs
))
4134 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4136 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4137 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4139 rtx tem_lhs
, tem_rhs
;
4141 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4142 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4143 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4145 if (tem
&& !CONSTANT_P (tem
))
4146 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4149 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4153 /* Reject "simplifications" that just wrap the two
4154 arguments in a CONST. Failure to do so can result
4155 in infinite recursion with simplify_binary_operation
4156 when it calls us to simplify CONST operations.
4157 Also, if we find such a simplification, don't try
4158 any more combinations with this rhs: We must have
4159 something like symbol+offset, ie. one of the
4160 trivial CONST expressions we handle later. */
4161 if (GET_CODE (tem
) == CONST
4162 && GET_CODE (XEXP (tem
, 0)) == ncode
4163 && XEXP (XEXP (tem
, 0), 0) == lhs
4164 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4167 if (GET_CODE (tem
) == NEG
)
4168 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4169 if (CONST_INT_P (tem
) && lneg
)
4170 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4174 ops
[j
].op
= NULL_RTX
;
4181 /* If nothing changed, fail. */
4185 /* Pack all the operands to the lower-numbered entries. */
4186 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4196 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4198 && CONST_INT_P (ops
[1].op
)
4199 && CONSTANT_P (ops
[0].op
)
4201 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4203 /* We suppressed creation of trivial CONST expressions in the
4204 combination loop to avoid recursion. Create one manually now.
4205 The combination loop should have ensured that there is exactly
4206 one CONST_INT, and the sort will have ensured that it is last
4207 in the array and that any other constant will be next-to-last. */
4210 && CONST_INT_P (ops
[n_ops
- 1].op
)
4211 && CONSTANT_P (ops
[n_ops
- 2].op
))
4213 rtx value
= ops
[n_ops
- 1].op
;
4214 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4215 value
= neg_const_int (mode
, value
);
4216 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4221 /* Put a non-negated operand first, if possible. */
4223 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4226 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4235 /* Now make the result by performing the requested operations. */
4237 for (i
= 1; i
< n_ops
; i
++)
4238 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4239 mode
, result
, ops
[i
].op
);
4244 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4246 plus_minus_operand_p (const_rtx x
)
4248 return GET_CODE (x
) == PLUS
4249 || GET_CODE (x
) == MINUS
4250 || (GET_CODE (x
) == CONST
4251 && GET_CODE (XEXP (x
, 0)) == PLUS
4252 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4253 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4256 /* Like simplify_binary_operation except used for relational operators.
4257 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4258 not also be VOIDmode.
4260 CMP_MODE specifies in which mode the comparison is done in, so it is
4261 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4262 the operands or, if both are VOIDmode, the operands are compared in
4263 "infinite precision". */
4265 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4266 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4268 rtx tem
, trueop0
, trueop1
;
4270 if (cmp_mode
== VOIDmode
)
4271 cmp_mode
= GET_MODE (op0
);
4272 if (cmp_mode
== VOIDmode
)
4273 cmp_mode
= GET_MODE (op1
);
4275 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4278 if (SCALAR_FLOAT_MODE_P (mode
))
4280 if (tem
== const0_rtx
)
4281 return CONST0_RTX (mode
);
4282 #ifdef FLOAT_STORE_FLAG_VALUE
4284 REAL_VALUE_TYPE val
;
4285 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4286 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4292 if (VECTOR_MODE_P (mode
))
4294 if (tem
== const0_rtx
)
4295 return CONST0_RTX (mode
);
4296 #ifdef VECTOR_STORE_FLAG_VALUE
4301 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4302 if (val
== NULL_RTX
)
4304 if (val
== const1_rtx
)
4305 return CONST1_RTX (mode
);
4307 units
= GET_MODE_NUNITS (mode
);
4308 v
= rtvec_alloc (units
);
4309 for (i
= 0; i
< units
; i
++)
4310 RTVEC_ELT (v
, i
) = val
;
4311 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4321 /* For the following tests, ensure const0_rtx is op1. */
4322 if (swap_commutative_operands_p (op0
, op1
)
4323 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4324 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4326 /* If op0 is a compare, extract the comparison arguments from it. */
4327 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4328 return simplify_gen_relational (code
, mode
, VOIDmode
,
4329 XEXP (op0
, 0), XEXP (op0
, 1));
4331 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4335 trueop0
= avoid_constant_pool_reference (op0
);
4336 trueop1
= avoid_constant_pool_reference (op1
);
4337 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4341 /* This part of simplify_relational_operation is only used when CMP_MODE
4342 is not in class MODE_CC (i.e. it is a real comparison).
4344 MODE is the mode of the result, while CMP_MODE specifies in which
4345 mode the comparison is done in, so it is the mode of the operands. */
4348 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4349 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4351 enum rtx_code op0code
= GET_CODE (op0
);
4353 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4355 /* If op0 is a comparison, extract the comparison arguments
4359 if (GET_MODE (op0
) == mode
)
4360 return simplify_rtx (op0
);
4362 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4363 XEXP (op0
, 0), XEXP (op0
, 1));
4365 else if (code
== EQ
)
4367 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4368 if (new_code
!= UNKNOWN
)
4369 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4370 XEXP (op0
, 0), XEXP (op0
, 1));
4374 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4375 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4376 if ((code
== LTU
|| code
== GEU
)
4377 && GET_CODE (op0
) == PLUS
4378 && CONST_INT_P (XEXP (op0
, 1))
4379 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4380 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4381 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4382 && XEXP (op0
, 1) != const0_rtx
)
4385 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4386 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4387 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4390 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4391 if ((code
== LTU
|| code
== GEU
)
4392 && GET_CODE (op0
) == PLUS
4393 && rtx_equal_p (op1
, XEXP (op0
, 1))
4394 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4395 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4396 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4397 copy_rtx (XEXP (op0
, 0)));
4399 if (op1
== const0_rtx
)
4401 /* Canonicalize (GTU x 0) as (NE x 0). */
4403 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4404 /* Canonicalize (LEU x 0) as (EQ x 0). */
4406 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4408 else if (op1
== const1_rtx
)
4413 /* Canonicalize (GE x 1) as (GT x 0). */
4414 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4417 /* Canonicalize (GEU x 1) as (NE x 0). */
4418 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4421 /* Canonicalize (LT x 1) as (LE x 0). */
4422 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4425 /* Canonicalize (LTU x 1) as (EQ x 0). */
4426 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4432 else if (op1
== constm1_rtx
)
4434 /* Canonicalize (LE x -1) as (LT x 0). */
4436 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4437 /* Canonicalize (GT x -1) as (GE x 0). */
4439 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4442 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4443 if ((code
== EQ
|| code
== NE
)
4444 && (op0code
== PLUS
|| op0code
== MINUS
)
4446 && CONSTANT_P (XEXP (op0
, 1))
4447 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4449 rtx x
= XEXP (op0
, 0);
4450 rtx c
= XEXP (op0
, 1);
4451 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4452 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4454 /* Detect an infinite recursive condition, where we oscillate at this
4455 simplification case between:
4456 A + B == C <---> C - B == A,
4457 where A, B, and C are all constants with non-simplifiable expressions,
4458 usually SYMBOL_REFs. */
4459 if (GET_CODE (tem
) == invcode
4461 && rtx_equal_p (c
, XEXP (tem
, 1)))
4464 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4467 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4468 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4470 && op1
== const0_rtx
4471 && GET_MODE_CLASS (mode
) == MODE_INT
4472 && cmp_mode
!= VOIDmode
4473 /* ??? Work-around BImode bugs in the ia64 backend. */
4475 && cmp_mode
!= BImode
4476 && nonzero_bits (op0
, cmp_mode
) == 1
4477 && STORE_FLAG_VALUE
== 1)
4478 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4479 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4480 : lowpart_subreg (mode
, op0
, cmp_mode
);
4482 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4483 if ((code
== EQ
|| code
== NE
)
4484 && op1
== const0_rtx
4486 return simplify_gen_relational (code
, mode
, cmp_mode
,
4487 XEXP (op0
, 0), XEXP (op0
, 1));
4489 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4490 if ((code
== EQ
|| code
== NE
)
4492 && rtx_equal_p (XEXP (op0
, 0), op1
)
4493 && !side_effects_p (XEXP (op0
, 0)))
4494 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4497 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4498 if ((code
== EQ
|| code
== NE
)
4500 && rtx_equal_p (XEXP (op0
, 1), op1
)
4501 && !side_effects_p (XEXP (op0
, 1)))
4502 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4505 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4506 if ((code
== EQ
|| code
== NE
)
4508 && CONST_SCALAR_INT_P (op1
)
4509 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4510 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4511 simplify_gen_binary (XOR
, cmp_mode
,
4512 XEXP (op0
, 1), op1
));
4514 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4515 if ((code
== EQ
|| code
== NE
)
4516 && GET_CODE (op0
) == BSWAP
4517 && CONST_SCALAR_INT_P (op1
))
4518 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4519 simplify_gen_unary (BSWAP
, cmp_mode
,
4522 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4523 if ((code
== EQ
|| code
== NE
)
4524 && GET_CODE (op0
) == BSWAP
4525 && GET_CODE (op1
) == BSWAP
)
4526 return simplify_gen_relational (code
, mode
, cmp_mode
,
4527 XEXP (op0
, 0), XEXP (op1
, 0));
4529 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4535 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4536 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4537 XEXP (op0
, 0), const0_rtx
);
4542 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4543 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4544 XEXP (op0
, 0), const0_rtx
);
4563 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4564 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4565 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4566 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4567 For floating-point comparisons, assume that the operands were ordered. */
4570 comparison_result (enum rtx_code code
, int known_results
)
4576 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4579 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4583 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4586 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4590 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4593 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4596 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4598 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4601 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4603 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4606 return const_true_rtx
;
4614 /* Check if the given comparison (done in the given MODE) is actually
4615 a tautology or a contradiction. If the mode is VOID_mode, the
4616 comparison is done in "infinite precision". If no simplification
4617 is possible, this function returns zero. Otherwise, it returns
4618 either const_true_rtx or const0_rtx. */
4621 simplify_const_relational_operation (enum rtx_code code
,
4622 enum machine_mode mode
,
4629 gcc_assert (mode
!= VOIDmode
4630 || (GET_MODE (op0
) == VOIDmode
4631 && GET_MODE (op1
) == VOIDmode
));
4633 /* If op0 is a compare, extract the comparison arguments from it. */
4634 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4636 op1
= XEXP (op0
, 1);
4637 op0
= XEXP (op0
, 0);
4639 if (GET_MODE (op0
) != VOIDmode
)
4640 mode
= GET_MODE (op0
);
4641 else if (GET_MODE (op1
) != VOIDmode
)
4642 mode
= GET_MODE (op1
);
4647 /* We can't simplify MODE_CC values since we don't know what the
4648 actual comparison is. */
4649 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4652 /* Make sure the constant is second. */
4653 if (swap_commutative_operands_p (op0
, op1
))
4655 tem
= op0
, op0
= op1
, op1
= tem
;
4656 code
= swap_condition (code
);
4659 trueop0
= avoid_constant_pool_reference (op0
);
4660 trueop1
= avoid_constant_pool_reference (op1
);
4662 /* For integer comparisons of A and B maybe we can simplify A - B and can
4663 then simplify a comparison of that with zero. If A and B are both either
4664 a register or a CONST_INT, this can't help; testing for these cases will
4665 prevent infinite recursion here and speed things up.
4667 We can only do this for EQ and NE comparisons as otherwise we may
4668 lose or introduce overflow which we cannot disregard as undefined as
4669 we do not know the signedness of the operation on either the left or
4670 the right hand side of the comparison. */
4672 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4673 && (code
== EQ
|| code
== NE
)
4674 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4675 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4676 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4677 /* We cannot do this if tem is a nonzero address. */
4678 && ! nonzero_address_p (tem
))
4679 return simplify_const_relational_operation (signed_condition (code
),
4680 mode
, tem
, const0_rtx
);
4682 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4683 return const_true_rtx
;
4685 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4688 /* For modes without NaNs, if the two operands are equal, we know the
4689 result except if they have side-effects. Even with NaNs we know
4690 the result of unordered comparisons and, if signaling NaNs are
4691 irrelevant, also the result of LT/GT/LTGT. */
4692 if ((! HONOR_NANS (GET_MODE (trueop0
))
4693 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4694 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4695 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4696 && rtx_equal_p (trueop0
, trueop1
)
4697 && ! side_effects_p (trueop0
))
4698 return comparison_result (code
, CMP_EQ
);
4700 /* If the operands are floating-point constants, see if we can fold
4702 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4703 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4704 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4706 REAL_VALUE_TYPE d0
, d1
;
4708 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4709 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4711 /* Comparisons are unordered iff at least one of the values is NaN. */
4712 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4722 return const_true_rtx
;
4735 return comparison_result (code
,
4736 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4737 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4740 /* Otherwise, see if the operands are both integers. */
4741 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4742 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
4744 /* It would be nice if we really had a mode here. However, the
4745 largest int representable on the target is as good as
4747 enum machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
4748 rtx_mode_t ptrueop0
= std::make_pair (trueop0
, cmode
);
4749 rtx_mode_t ptrueop1
= std::make_pair (trueop1
, cmode
);
4751 if (wi::eq_p (ptrueop0
, ptrueop1
))
4752 return comparison_result (code
, CMP_EQ
);
4755 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
4756 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
4757 return comparison_result (code
, cr
);
4761 /* Optimize comparisons with upper and lower bounds. */
4762 if (HWI_COMPUTABLE_MODE_P (mode
)
4763 && CONST_INT_P (trueop1
))
4766 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4767 HOST_WIDE_INT val
= INTVAL (trueop1
);
4768 HOST_WIDE_INT mmin
, mmax
;
4778 /* Get a reduced range if the sign bit is zero. */
4779 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4786 rtx mmin_rtx
, mmax_rtx
;
4787 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4789 mmin
= INTVAL (mmin_rtx
);
4790 mmax
= INTVAL (mmax_rtx
);
4793 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4795 mmin
>>= (sign_copies
- 1);
4796 mmax
>>= (sign_copies
- 1);
4802 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4804 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4805 return const_true_rtx
;
4806 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4811 return const_true_rtx
;
4816 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4818 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4819 return const_true_rtx
;
4820 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4825 return const_true_rtx
;
4831 /* x == y is always false for y out of range. */
4832 if (val
< mmin
|| val
> mmax
)
4836 /* x > y is always false for y >= mmax, always true for y < mmin. */
4838 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4840 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4841 return const_true_rtx
;
4847 return const_true_rtx
;
4850 /* x < y is always false for y <= mmin, always true for y > mmax. */
4852 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4854 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4855 return const_true_rtx
;
4861 return const_true_rtx
;
4865 /* x != y is always true for y out of range. */
4866 if (val
< mmin
|| val
> mmax
)
4867 return const_true_rtx
;
4875 /* Optimize integer comparisons with zero. */
4876 if (trueop1
== const0_rtx
)
4878 /* Some addresses are known to be nonzero. We don't know
4879 their sign, but equality comparisons are known. */
4880 if (nonzero_address_p (trueop0
))
4882 if (code
== EQ
|| code
== LEU
)
4884 if (code
== NE
|| code
== GTU
)
4885 return const_true_rtx
;
4888 /* See if the first operand is an IOR with a constant. If so, we
4889 may be able to determine the result of this comparison. */
4890 if (GET_CODE (op0
) == IOR
)
4892 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4893 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4895 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
4896 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4897 && (UINTVAL (inner_const
)
4898 & ((unsigned HOST_WIDE_INT
) 1
4908 return const_true_rtx
;
4912 return const_true_rtx
;
4926 /* Optimize comparison of ABS with zero. */
4927 if (trueop1
== CONST0_RTX (mode
)
4928 && (GET_CODE (trueop0
) == ABS
4929 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4930 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4935 /* Optimize abs(x) < 0.0. */
4936 if (!HONOR_SNANS (mode
)
4937 && (!INTEGRAL_MODE_P (mode
)
4938 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4940 if (INTEGRAL_MODE_P (mode
)
4941 && (issue_strict_overflow_warning
4942 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4943 warning (OPT_Wstrict_overflow
,
4944 ("assuming signed overflow does not occur when "
4945 "assuming abs (x) < 0 is false"));
4951 /* Optimize abs(x) >= 0.0. */
4952 if (!HONOR_NANS (mode
)
4953 && (!INTEGRAL_MODE_P (mode
)
4954 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4956 if (INTEGRAL_MODE_P (mode
)
4957 && (issue_strict_overflow_warning
4958 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4959 warning (OPT_Wstrict_overflow
,
4960 ("assuming signed overflow does not occur when "
4961 "assuming abs (x) >= 0 is true"));
4962 return const_true_rtx
;
4967 /* Optimize ! (abs(x) < 0.0). */
4968 return const_true_rtx
;
4978 /* Simplify CODE, an operation with result mode MODE and three operands,
4979 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4980 a constant. Return 0 if no simplifications is possible. */
4983 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4984 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4987 unsigned int width
= GET_MODE_PRECISION (mode
);
4988 bool any_change
= false;
4991 /* VOIDmode means "infinite" precision. */
4993 width
= HOST_BITS_PER_WIDE_INT
;
4998 /* Simplify negations around the multiplication. */
4999 /* -a * -b + c => a * b + c. */
5000 if (GET_CODE (op0
) == NEG
)
5002 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5004 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5006 else if (GET_CODE (op1
) == NEG
)
5008 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5010 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5013 /* Canonicalize the two multiplication operands. */
5014 /* a * -b + c => -b * a + c. */
5015 if (swap_commutative_operands_p (op0
, op1
))
5016 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
5019 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5024 if (CONST_INT_P (op0
)
5025 && CONST_INT_P (op1
)
5026 && CONST_INT_P (op2
)
5027 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5028 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5030 /* Extracting a bit-field from a constant */
5031 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5032 HOST_WIDE_INT op1val
= INTVAL (op1
);
5033 HOST_WIDE_INT op2val
= INTVAL (op2
);
5034 if (BITS_BIG_ENDIAN
)
5035 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5039 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5041 /* First zero-extend. */
5042 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5043 /* If desired, propagate sign bit. */
5044 if (code
== SIGN_EXTRACT
5045 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5047 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5050 return gen_int_mode (val
, mode
);
5055 if (CONST_INT_P (op0
))
5056 return op0
!= const0_rtx
? op1
: op2
;
5058 /* Convert c ? a : a into "a". */
5059 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5062 /* Convert a != b ? a : b into "a". */
5063 if (GET_CODE (op0
) == NE
5064 && ! side_effects_p (op0
)
5065 && ! HONOR_NANS (mode
)
5066 && ! HONOR_SIGNED_ZEROS (mode
)
5067 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5068 && rtx_equal_p (XEXP (op0
, 1), op2
))
5069 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5070 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5073 /* Convert a == b ? a : b into "b". */
5074 if (GET_CODE (op0
) == EQ
5075 && ! side_effects_p (op0
)
5076 && ! HONOR_NANS (mode
)
5077 && ! HONOR_SIGNED_ZEROS (mode
)
5078 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5079 && rtx_equal_p (XEXP (op0
, 1), op2
))
5080 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5081 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5084 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5086 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5087 ? GET_MODE (XEXP (op0
, 1))
5088 : GET_MODE (XEXP (op0
, 0)));
5091 /* Look for happy constants in op1 and op2. */
5092 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5094 HOST_WIDE_INT t
= INTVAL (op1
);
5095 HOST_WIDE_INT f
= INTVAL (op2
);
5097 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5098 code
= GET_CODE (op0
);
5099 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5102 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5110 return simplify_gen_relational (code
, mode
, cmp_mode
,
5111 XEXP (op0
, 0), XEXP (op0
, 1));
5114 if (cmp_mode
== VOIDmode
)
5115 cmp_mode
= op0_mode
;
5116 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5117 cmp_mode
, XEXP (op0
, 0),
5120 /* See if any simplifications were possible. */
5123 if (CONST_INT_P (temp
))
5124 return temp
== const0_rtx
? op2
: op1
;
5126 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5132 gcc_assert (GET_MODE (op0
) == mode
);
5133 gcc_assert (GET_MODE (op1
) == mode
);
5134 gcc_assert (VECTOR_MODE_P (mode
));
5135 trueop2
= avoid_constant_pool_reference (op2
);
5136 if (CONST_INT_P (trueop2
))
5138 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5139 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5140 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5141 unsigned HOST_WIDE_INT mask
;
5142 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5145 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5147 if (!(sel
& mask
) && !side_effects_p (op0
))
5149 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5152 rtx trueop0
= avoid_constant_pool_reference (op0
);
5153 rtx trueop1
= avoid_constant_pool_reference (op1
);
5154 if (GET_CODE (trueop0
) == CONST_VECTOR
5155 && GET_CODE (trueop1
) == CONST_VECTOR
)
5157 rtvec v
= rtvec_alloc (n_elts
);
5160 for (i
= 0; i
< n_elts
; i
++)
5161 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5162 ? CONST_VECTOR_ELT (trueop0
, i
)
5163 : CONST_VECTOR_ELT (trueop1
, i
));
5164 return gen_rtx_CONST_VECTOR (mode
, v
);
5167 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5168 if no element from a appears in the result. */
5169 if (GET_CODE (op0
) == VEC_MERGE
)
5171 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5172 if (CONST_INT_P (tem
))
5174 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5175 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5176 return simplify_gen_ternary (code
, mode
, mode
,
5177 XEXP (op0
, 1), op1
, op2
);
5178 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5179 return simplify_gen_ternary (code
, mode
, mode
,
5180 XEXP (op0
, 0), op1
, op2
);
5183 if (GET_CODE (op1
) == VEC_MERGE
)
5185 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5186 if (CONST_INT_P (tem
))
5188 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5189 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5190 return simplify_gen_ternary (code
, mode
, mode
,
5191 op0
, XEXP (op1
, 1), op2
);
5192 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5193 return simplify_gen_ternary (code
, mode
, mode
,
5194 op0
, XEXP (op1
, 0), op2
);
5199 if (rtx_equal_p (op0
, op1
)
5200 && !side_effects_p (op2
) && !side_effects_p (op1
))
5212 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5213 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5214 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5216 Works by unpacking OP into a collection of 8-bit values
5217 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5218 and then repacking them again for OUTERMODE. */
5221 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5222 enum machine_mode innermode
, unsigned int byte
)
5226 value_mask
= (1 << value_bit
) - 1
5228 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5237 rtvec result_v
= NULL
;
5238 enum mode_class outer_class
;
5239 enum machine_mode outer_submode
;
5242 /* Some ports misuse CCmode. */
5243 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5246 /* We have no way to represent a complex constant at the rtl level. */
5247 if (COMPLEX_MODE_P (outermode
))
5250 /* We support any size mode. */
5251 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5252 GET_MODE_BITSIZE (innermode
));
5254 /* Unpack the value. */
5256 if (GET_CODE (op
) == CONST_VECTOR
)
5258 num_elem
= CONST_VECTOR_NUNITS (op
);
5259 elems
= &CONST_VECTOR_ELT (op
, 0);
5260 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5266 elem_bitsize
= max_bitsize
;
5268 /* If this asserts, it is too complicated; reducing value_bit may help. */
5269 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5270 /* I don't know how to handle endianness of sub-units. */
5271 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5273 for (elem
= 0; elem
< num_elem
; elem
++)
5276 rtx el
= elems
[elem
];
5278 /* Vectors are kept in target memory order. (This is probably
5281 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5282 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5284 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5285 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5286 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5287 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5288 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5291 switch (GET_CODE (el
))
5295 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5297 *vp
++ = INTVAL (el
) >> i
;
5298 /* CONST_INTs are always logically sign-extended. */
5299 for (; i
< elem_bitsize
; i
+= value_bit
)
5300 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5303 case CONST_WIDE_INT
:
5305 rtx_mode_t val
= std::make_pair (el
, innermode
);
5306 unsigned char extend
= wi::sign_mask (val
);
5308 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5309 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5310 for (; i
< elem_bitsize
; i
+= value_bit
)
5316 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5318 unsigned char extend
= 0;
5319 /* If this triggers, someone should have generated a
5320 CONST_INT instead. */
5321 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5323 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5324 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5325 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5328 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5332 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5334 for (; i
< elem_bitsize
; i
+= value_bit
)
5339 /* This is big enough for anything on the platform. */
5340 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5341 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5343 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5344 gcc_assert (bitsize
<= elem_bitsize
);
5345 gcc_assert (bitsize
% value_bit
== 0);
5347 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5350 /* real_to_target produces its result in words affected by
5351 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5352 and use WORDS_BIG_ENDIAN instead; see the documentation
5353 of SUBREG in rtl.texi. */
5354 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5357 if (WORDS_BIG_ENDIAN
)
5358 ibase
= bitsize
- 1 - i
;
5361 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5364 /* It shouldn't matter what's done here, so fill it with
5366 for (; i
< elem_bitsize
; i
+= value_bit
)
5372 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5374 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5375 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5379 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5380 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5381 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5383 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5384 >> (i
- HOST_BITS_PER_WIDE_INT
);
5385 for (; i
< elem_bitsize
; i
+= value_bit
)
5395 /* Now, pick the right byte to start with. */
5396 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5397 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5398 will already have offset 0. */
5399 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5401 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5403 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5404 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5405 byte
= (subword_byte
% UNITS_PER_WORD
5406 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5409 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5410 so if it's become negative it will instead be very large.) */
5411 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5413 /* Convert from bytes to chunks of size value_bit. */
5414 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5416 /* Re-pack the value. */
5418 if (VECTOR_MODE_P (outermode
))
5420 num_elem
= GET_MODE_NUNITS (outermode
);
5421 result_v
= rtvec_alloc (num_elem
);
5422 elems
= &RTVEC_ELT (result_v
, 0);
5423 outer_submode
= GET_MODE_INNER (outermode
);
5429 outer_submode
= outermode
;
5432 outer_class
= GET_MODE_CLASS (outer_submode
);
5433 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5435 gcc_assert (elem_bitsize
% value_bit
== 0);
5436 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5438 for (elem
= 0; elem
< num_elem
; elem
++)
5442 /* Vectors are stored in target memory order. (This is probably
5445 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5446 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5448 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5449 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5450 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5451 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5452 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5455 switch (outer_class
)
5458 case MODE_PARTIAL_INT
:
5463 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5464 / HOST_BITS_PER_WIDE_INT
;
5465 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5468 for (u
= 0; u
< units
; u
++)
5470 unsigned HOST_WIDE_INT buf
= 0;
5472 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5474 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5477 base
+= HOST_BITS_PER_WIDE_INT
;
5479 gcc_assert (GET_MODE_PRECISION (outer_submode
)
5480 <= MAX_BITSIZE_MODE_ANY_INT
);
5481 r
= wide_int::from_array (tmp
, units
,
5482 GET_MODE_PRECISION (outer_submode
));
5483 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5488 case MODE_DECIMAL_FLOAT
:
5491 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5493 /* real_from_target wants its input in words affected by
5494 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5495 and use WORDS_BIG_ENDIAN instead; see the documentation
5496 of SUBREG in rtl.texi. */
5497 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5499 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5502 if (WORDS_BIG_ENDIAN
)
5503 ibase
= elem_bitsize
- 1 - i
;
5506 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5509 real_from_target (&r
, tmp
, outer_submode
);
5510 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5522 f
.mode
= outer_submode
;
5525 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5527 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5528 for (; i
< elem_bitsize
; i
+= value_bit
)
5529 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5530 << (i
- HOST_BITS_PER_WIDE_INT
));
5532 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5540 if (VECTOR_MODE_P (outermode
))
5541 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5546 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5547 Return 0 if no simplifications are possible. */
5549 simplify_subreg (enum machine_mode outermode
, rtx op
,
5550 enum machine_mode innermode
, unsigned int byte
)
5552 /* Little bit of sanity checking. */
5553 gcc_assert (innermode
!= VOIDmode
);
5554 gcc_assert (outermode
!= VOIDmode
);
5555 gcc_assert (innermode
!= BLKmode
);
5556 gcc_assert (outermode
!= BLKmode
);
5558 gcc_assert (GET_MODE (op
) == innermode
5559 || GET_MODE (op
) == VOIDmode
);
5561 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5564 if (byte
>= GET_MODE_SIZE (innermode
))
5567 if (outermode
== innermode
&& !byte
)
5570 if (CONST_SCALAR_INT_P (op
)
5571 || CONST_DOUBLE_AS_FLOAT_P (op
)
5572 || GET_CODE (op
) == CONST_FIXED
5573 || GET_CODE (op
) == CONST_VECTOR
)
5574 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5576 /* Changing mode twice with SUBREG => just change it once,
5577 or not at all if changing back op starting mode. */
5578 if (GET_CODE (op
) == SUBREG
)
5580 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5581 int final_offset
= byte
+ SUBREG_BYTE (op
);
5584 if (outermode
== innermostmode
5585 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5586 return SUBREG_REG (op
);
5588 /* The SUBREG_BYTE represents offset, as if the value were stored
5589 in memory. Irritating exception is paradoxical subreg, where
5590 we define SUBREG_BYTE to be 0. On big endian machines, this
5591 value should be negative. For a moment, undo this exception. */
5592 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5594 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5595 if (WORDS_BIG_ENDIAN
)
5596 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5597 if (BYTES_BIG_ENDIAN
)
5598 final_offset
+= difference
% UNITS_PER_WORD
;
5600 if (SUBREG_BYTE (op
) == 0
5601 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5603 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5604 if (WORDS_BIG_ENDIAN
)
5605 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5606 if (BYTES_BIG_ENDIAN
)
5607 final_offset
+= difference
% UNITS_PER_WORD
;
5610 /* See whether resulting subreg will be paradoxical. */
5611 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5613 /* In nonparadoxical subregs we can't handle negative offsets. */
5614 if (final_offset
< 0)
5616 /* Bail out in case resulting subreg would be incorrect. */
5617 if (final_offset
% GET_MODE_SIZE (outermode
)
5618 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5624 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5626 /* In paradoxical subreg, see if we are still looking on lower part.
5627 If so, our SUBREG_BYTE will be 0. */
5628 if (WORDS_BIG_ENDIAN
)
5629 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5630 if (BYTES_BIG_ENDIAN
)
5631 offset
+= difference
% UNITS_PER_WORD
;
5632 if (offset
== final_offset
)
5638 /* Recurse for further possible simplifications. */
5639 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5643 if (validate_subreg (outermode
, innermostmode
,
5644 SUBREG_REG (op
), final_offset
))
5646 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5647 if (SUBREG_PROMOTED_VAR_P (op
)
5648 && SUBREG_PROMOTED_SIGN (op
) >= 0
5649 && GET_MODE_CLASS (outermode
) == MODE_INT
5650 && IN_RANGE (GET_MODE_SIZE (outermode
),
5651 GET_MODE_SIZE (innermode
),
5652 GET_MODE_SIZE (innermostmode
))
5653 && subreg_lowpart_p (newx
))
5655 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5656 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
5663 /* SUBREG of a hard register => just change the register number
5664 and/or mode. If the hard register is not valid in that mode,
5665 suppress this simplification. If the hard register is the stack,
5666 frame, or argument pointer, leave this as a SUBREG. */
5668 if (REG_P (op
) && HARD_REGISTER_P (op
))
5670 unsigned int regno
, final_regno
;
5673 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5674 if (HARD_REGISTER_NUM_P (final_regno
))
5677 int final_offset
= byte
;
5679 /* Adjust offset for paradoxical subregs. */
5681 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5683 int difference
= (GET_MODE_SIZE (innermode
)
5684 - GET_MODE_SIZE (outermode
));
5685 if (WORDS_BIG_ENDIAN
)
5686 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5687 if (BYTES_BIG_ENDIAN
)
5688 final_offset
+= difference
% UNITS_PER_WORD
;
5691 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5693 /* Propagate original regno. We don't have any way to specify
5694 the offset inside original regno, so do so only for lowpart.
5695 The information is used only by alias analysis that can not
5696 grog partial register anyway. */
5698 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5699 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5704 /* If we have a SUBREG of a register that we are replacing and we are
5705 replacing it with a MEM, make a new MEM and try replacing the
5706 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5707 or if we would be widening it. */
5710 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
5711 /* Allow splitting of volatile memory references in case we don't
5712 have instruction to move the whole thing. */
5713 && (! MEM_VOLATILE_P (op
)
5714 || ! have_insn_for (SET
, innermode
))
5715 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5716 return adjust_address_nv (op
, outermode
, byte
);
5718 /* Handle complex values represented as CONCAT
5719 of real and imaginary part. */
5720 if (GET_CODE (op
) == CONCAT
)
5722 unsigned int part_size
, final_offset
;
5725 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5726 if (byte
< part_size
)
5728 part
= XEXP (op
, 0);
5729 final_offset
= byte
;
5733 part
= XEXP (op
, 1);
5734 final_offset
= byte
- part_size
;
5737 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5740 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5743 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5744 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5748 /* A SUBREG resulting from a zero extension may fold to zero if
5749 it extracts higher bits that the ZERO_EXTEND's source bits. */
5750 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
5752 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5753 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5754 return CONST0_RTX (outermode
);
5757 if (SCALAR_INT_MODE_P (outermode
)
5758 && SCALAR_INT_MODE_P (innermode
)
5759 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5760 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5762 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
5770 /* Make a SUBREG operation or equivalent if it folds. */
5773 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5774 enum machine_mode innermode
, unsigned int byte
)
5778 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5782 if (GET_CODE (op
) == SUBREG
5783 || GET_CODE (op
) == CONCAT
5784 || GET_MODE (op
) == VOIDmode
)
5787 if (validate_subreg (outermode
, innermode
, op
, byte
))
5788 return gen_rtx_SUBREG (outermode
, op
, byte
);
5793 /* Simplify X, an rtx expression.
5795 Return the simplified expression or NULL if no simplifications
5798 This is the preferred entry point into the simplification routines;
5799 however, we still allow passes to call the more specific routines.
5801 Right now GCC has three (yes, three) major bodies of RTL simplification
5802 code that need to be unified.
5804 1. fold_rtx in cse.c. This code uses various CSE specific
5805 information to aid in RTL simplification.
5807 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5808 it uses combine specific information to aid in RTL
5811 3. The routines in this file.
5814 Long term we want to only have one body of simplification code; to
5815 get to that state I recommend the following steps:
5817 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5818 which are not pass dependent state into these routines.
5820 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5821 use this routine whenever possible.
5823 3. Allow for pass dependent state to be provided to these
5824 routines and add simplifications based on the pass dependent
5825 state. Remove code from cse.c & combine.c that becomes
5828 It will take time, but ultimately the compiler will be easier to
5829 maintain and improve. It's totally silly that when we add a
5830 simplification that it needs to be added to 4 places (3 for RTL
5831 simplification and 1 for tree simplification. */
5834 simplify_rtx (const_rtx x
)
5836 const enum rtx_code code
= GET_CODE (x
);
5837 const enum machine_mode mode
= GET_MODE (x
);
5839 switch (GET_RTX_CLASS (code
))
5842 return simplify_unary_operation (code
, mode
,
5843 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5844 case RTX_COMM_ARITH
:
5845 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5846 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5848 /* Fall through.... */
5851 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5854 case RTX_BITFIELD_OPS
:
5855 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5856 XEXP (x
, 0), XEXP (x
, 1),
5860 case RTX_COMM_COMPARE
:
5861 return simplify_relational_operation (code
, mode
,
5862 ((GET_MODE (XEXP (x
, 0))
5864 ? GET_MODE (XEXP (x
, 0))
5865 : GET_MODE (XEXP (x
, 1))),
5871 return simplify_subreg (mode
, SUBREG_REG (x
),
5872 GET_MODE (SUBREG_REG (x
)),
5879 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5880 if (GET_CODE (XEXP (x
, 0)) == HIGH
5881 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))