1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
32 #include "diagnostic-core.h"
36 /* Simplification and canonicalization of RTL. */
38 /* Much code operates on (low, high) pairs; the low value is an
39 unsigned wide int, the high value a signed wide int. We
40 occasionally need to sign extend from low to high as if low were a
42 #define HWI_SIGN_EXTEND(low) \
43 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
45 static rtx
neg_const_int (machine_mode
, const_rtx
);
46 static bool plus_minus_operand_p (const_rtx
);
47 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
48 static rtx
simplify_immed_subreg (machine_mode
, rtx
, machine_mode
,
50 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
52 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
53 machine_mode
, rtx
, rtx
);
54 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
55 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
58 /* Negate a CONST_INT rtx, truncating (because a conversion from a
59 maximally negative number can overflow). */
61 neg_const_int (machine_mode mode
, const_rtx i
)
63 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
66 /* Test whether expression, X, is an immediate constant that represents
67 the most significant bit of machine mode MODE. */
70 mode_signbit_p (machine_mode mode
, const_rtx x
)
72 unsigned HOST_WIDE_INT val
;
75 if (GET_MODE_CLASS (mode
) != MODE_INT
)
78 width
= GET_MODE_PRECISION (mode
);
82 if (width
<= HOST_BITS_PER_WIDE_INT
85 #if TARGET_SUPPORTS_WIDE_INT
86 else if (CONST_WIDE_INT_P (x
))
89 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
90 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
92 for (i
= 0; i
< elts
- 1; i
++)
93 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
95 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
96 width
%= HOST_BITS_PER_WIDE_INT
;
98 width
= HOST_BITS_PER_WIDE_INT
;
101 else if (width
<= HOST_BITS_PER_DOUBLE_INT
102 && CONST_DOUBLE_AS_INT_P (x
)
103 && CONST_DOUBLE_LOW (x
) == 0)
105 val
= CONST_DOUBLE_HIGH (x
);
106 width
-= HOST_BITS_PER_WIDE_INT
;
110 /* X is not an integer constant. */
113 if (width
< HOST_BITS_PER_WIDE_INT
)
114 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
115 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
118 /* Test whether VAL is equal to the most significant bit of mode MODE
119 (after masking with the mode mask of MODE). Returns false if the
120 precision of MODE is too large to handle. */
123 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
127 if (GET_MODE_CLASS (mode
) != MODE_INT
)
130 width
= GET_MODE_PRECISION (mode
);
131 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
134 val
&= GET_MODE_MASK (mode
);
135 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
138 /* Test whether the most significant bit of mode MODE is set in VAL.
139 Returns false if the precision of MODE is too large to handle. */
141 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
145 if (GET_MODE_CLASS (mode
) != MODE_INT
)
148 width
= GET_MODE_PRECISION (mode
);
149 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
152 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
156 /* Test whether the most significant bit of mode MODE is clear in VAL.
157 Returns false if the precision of MODE is too large to handle. */
159 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
163 if (GET_MODE_CLASS (mode
) != MODE_INT
)
166 width
= GET_MODE_PRECISION (mode
);
167 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
170 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
174 /* Make a binary operation by properly ordering the operands and
175 seeing if the expression folds. */
178 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
183 /* If this simplifies, do it. */
184 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
188 /* Put complex operands first and constants second if commutative. */
189 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
190 && swap_commutative_operands_p (op0
, op1
))
191 std::swap (op0
, op1
);
193 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
196 /* If X is a MEM referencing the constant pool, return the real value.
197 Otherwise return X. */
199 avoid_constant_pool_reference (rtx x
)
203 HOST_WIDE_INT offset
= 0;
205 switch (GET_CODE (x
))
211 /* Handle float extensions of constant pool references. */
213 c
= avoid_constant_pool_reference (tmp
);
214 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
215 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
223 if (GET_MODE (x
) == BLKmode
)
228 /* Call target hook to avoid the effects of -fpic etc.... */
229 addr
= targetm
.delegitimize_address (addr
);
231 /* Split the address into a base and integer offset. */
232 if (GET_CODE (addr
) == CONST
233 && GET_CODE (XEXP (addr
, 0)) == PLUS
234 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
236 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
237 addr
= XEXP (XEXP (addr
, 0), 0);
240 if (GET_CODE (addr
) == LO_SUM
)
241 addr
= XEXP (addr
, 1);
243 /* If this is a constant pool reference, we can turn it into its
244 constant and hope that simplifications happen. */
245 if (GET_CODE (addr
) == SYMBOL_REF
246 && CONSTANT_POOL_ADDRESS_P (addr
))
248 c
= get_pool_constant (addr
);
249 cmode
= get_pool_mode (addr
);
251 /* If we're accessing the constant in a different mode than it was
252 originally stored, attempt to fix that up via subreg simplifications.
253 If that fails we have no choice but to return the original memory. */
254 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
255 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
257 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
258 if (tem
&& CONSTANT_P (tem
))
268 /* Simplify a MEM based on its attributes. This is the default
269 delegitimize_address target hook, and it's recommended that every
270 overrider call it. */
273 delegitimize_mem_from_attrs (rtx x
)
275 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
276 use their base addresses as equivalent. */
279 && MEM_OFFSET_KNOWN_P (x
))
281 tree decl
= MEM_EXPR (x
);
282 machine_mode mode
= GET_MODE (x
);
283 HOST_WIDE_INT offset
= 0;
285 switch (TREE_CODE (decl
))
295 case ARRAY_RANGE_REF
:
300 case VIEW_CONVERT_EXPR
:
302 HOST_WIDE_INT bitsize
, bitpos
;
304 int unsignedp
, reversep
, volatilep
= 0;
307 = get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
, &mode
,
308 &unsignedp
, &reversep
, &volatilep
, false);
309 if (bitsize
!= GET_MODE_BITSIZE (mode
)
310 || (bitpos
% BITS_PER_UNIT
)
311 || (toffset
&& !tree_fits_shwi_p (toffset
)))
315 offset
+= bitpos
/ BITS_PER_UNIT
;
317 offset
+= tree_to_shwi (toffset
);
324 && mode
== GET_MODE (x
)
325 && TREE_CODE (decl
) == VAR_DECL
326 && (TREE_STATIC (decl
)
327 || DECL_THREAD_LOCAL_P (decl
))
328 && DECL_RTL_SET_P (decl
)
329 && MEM_P (DECL_RTL (decl
)))
333 offset
+= MEM_OFFSET (x
);
335 newx
= DECL_RTL (decl
);
339 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
341 /* Avoid creating a new MEM needlessly if we already had
342 the same address. We do if there's no OFFSET and the
343 old address X is identical to NEWX, or if X is of the
344 form (plus NEWX OFFSET), or the NEWX is of the form
345 (plus Y (const_int Z)) and X is that with the offset
346 added: (plus Y (const_int Z+OFFSET)). */
348 || (GET_CODE (o
) == PLUS
349 && GET_CODE (XEXP (o
, 1)) == CONST_INT
350 && (offset
== INTVAL (XEXP (o
, 1))
351 || (GET_CODE (n
) == PLUS
352 && GET_CODE (XEXP (n
, 1)) == CONST_INT
353 && (INTVAL (XEXP (n
, 1)) + offset
354 == INTVAL (XEXP (o
, 1)))
355 && (n
= XEXP (n
, 0))))
356 && (o
= XEXP (o
, 0))))
357 && rtx_equal_p (o
, n
)))
358 x
= adjust_address_nv (newx
, mode
, offset
);
360 else if (GET_MODE (x
) == GET_MODE (newx
)
369 /* Make a unary operation by first seeing if it folds and otherwise making
370 the specified operation. */
373 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
374 machine_mode op_mode
)
378 /* If this simplifies, use it. */
379 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
382 return gen_rtx_fmt_e (code
, mode
, op
);
385 /* Likewise for ternary operations. */
388 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
389 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
393 /* If this simplifies, use it. */
394 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
398 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
401 /* Likewise, for relational operations.
402 CMP_MODE specifies mode comparison is done in. */
405 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
406 machine_mode cmp_mode
, rtx op0
, rtx op1
)
410 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
414 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
417 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
418 and simplify the result. If FN is non-NULL, call this callback on each
419 X, if it returns non-NULL, replace X with its return value and simplify the
423 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
424 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
426 enum rtx_code code
= GET_CODE (x
);
427 machine_mode mode
= GET_MODE (x
);
428 machine_mode op_mode
;
430 rtx op0
, op1
, op2
, newx
, op
;
434 if (__builtin_expect (fn
!= NULL
, 0))
436 newx
= fn (x
, old_rtx
, data
);
440 else if (rtx_equal_p (x
, old_rtx
))
441 return copy_rtx ((rtx
) data
);
443 switch (GET_RTX_CLASS (code
))
447 op_mode
= GET_MODE (op0
);
448 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
449 if (op0
== XEXP (x
, 0))
451 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
455 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
456 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
457 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
459 return simplify_gen_binary (code
, mode
, op0
, op1
);
462 case RTX_COMM_COMPARE
:
465 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
466 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
467 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
468 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
470 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
473 case RTX_BITFIELD_OPS
:
475 op_mode
= GET_MODE (op0
);
476 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
477 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
478 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
479 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
481 if (op_mode
== VOIDmode
)
482 op_mode
= GET_MODE (op0
);
483 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
488 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
489 if (op0
== SUBREG_REG (x
))
491 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
492 GET_MODE (SUBREG_REG (x
)),
494 return op0
? op0
: x
;
501 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
502 if (op0
== XEXP (x
, 0))
504 return replace_equiv_address_nv (x
, op0
);
506 else if (code
== LO_SUM
)
508 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
509 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
511 /* (lo_sum (high x) y) -> y where x and y have the same base. */
512 if (GET_CODE (op0
) == HIGH
)
514 rtx base0
, base1
, offset0
, offset1
;
515 split_const (XEXP (op0
, 0), &base0
, &offset0
);
516 split_const (op1
, &base1
, &offset1
);
517 if (rtx_equal_p (base0
, base1
))
521 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
523 return gen_rtx_LO_SUM (mode
, op0
, op1
);
532 fmt
= GET_RTX_FORMAT (code
);
533 for (i
= 0; fmt
[i
]; i
++)
538 newvec
= XVEC (newx
, i
);
539 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
541 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
543 if (op
!= RTVEC_ELT (vec
, j
))
547 newvec
= shallow_copy_rtvec (vec
);
549 newx
= shallow_copy_rtx (x
);
550 XVEC (newx
, i
) = newvec
;
552 RTVEC_ELT (newvec
, j
) = op
;
560 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
561 if (op
!= XEXP (x
, i
))
564 newx
= shallow_copy_rtx (x
);
573 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
574 resulting RTX. Return a new RTX which is as simplified as possible. */
577 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
579 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
582 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
583 Only handle cases where the truncated value is inherently an rvalue.
585 RTL provides two ways of truncating a value:
587 1. a lowpart subreg. This form is only a truncation when both
588 the outer and inner modes (here MODE and OP_MODE respectively)
589 are scalar integers, and only then when the subreg is used as
592 It is only valid to form such truncating subregs if the
593 truncation requires no action by the target. The onus for
594 proving this is on the creator of the subreg -- e.g. the
595 caller to simplify_subreg or simplify_gen_subreg -- and typically
596 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
598 2. a TRUNCATE. This form handles both scalar and compound integers.
600 The first form is preferred where valid. However, the TRUNCATE
601 handling in simplify_unary_operation turns the second form into the
602 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
603 so it is generally safe to form rvalue truncations using:
605 simplify_gen_unary (TRUNCATE, ...)
607 and leave simplify_unary_operation to work out which representation
610 Because of the proof requirements on (1), simplify_truncation must
611 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
612 regardless of whether the outer truncation came from a SUBREG or a
613 TRUNCATE. For example, if the caller has proven that an SImode
618 is a no-op and can be represented as a subreg, it does not follow
619 that SImode truncations of X and Y are also no-ops. On a target
620 like 64-bit MIPS that requires SImode values to be stored in
621 sign-extended form, an SImode truncation of:
623 (and:DI (reg:DI X) (const_int 63))
625 is trivially a no-op because only the lower 6 bits can be set.
626 However, X is still an arbitrary 64-bit number and so we cannot
627 assume that truncating it too is a no-op. */
630 simplify_truncation (machine_mode mode
, rtx op
,
631 machine_mode op_mode
)
633 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
634 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
635 gcc_assert (precision
<= op_precision
);
637 /* Optimize truncations of zero and sign extended values. */
638 if (GET_CODE (op
) == ZERO_EXTEND
639 || GET_CODE (op
) == SIGN_EXTEND
)
641 /* There are three possibilities. If MODE is the same as the
642 origmode, we can omit both the extension and the subreg.
643 If MODE is not larger than the origmode, we can apply the
644 truncation without the extension. Finally, if the outermode
645 is larger than the origmode, we can just extend to the appropriate
647 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
648 if (mode
== origmode
)
650 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
651 return simplify_gen_unary (TRUNCATE
, mode
,
652 XEXP (op
, 0), origmode
);
654 return simplify_gen_unary (GET_CODE (op
), mode
,
655 XEXP (op
, 0), origmode
);
658 /* If the machine can perform operations in the truncated mode, distribute
659 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
660 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
662 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
663 && (GET_CODE (op
) == PLUS
664 || GET_CODE (op
) == MINUS
665 || GET_CODE (op
) == MULT
))
667 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
670 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
672 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
676 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
677 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
678 the outer subreg is effectively a truncation to the original mode. */
679 if ((GET_CODE (op
) == LSHIFTRT
680 || GET_CODE (op
) == ASHIFTRT
)
681 /* Ensure that OP_MODE is at least twice as wide as MODE
682 to avoid the possibility that an outer LSHIFTRT shifts by more
683 than the sign extension's sign_bit_copies and introduces zeros
684 into the high bits of the result. */
685 && 2 * precision
<= op_precision
686 && CONST_INT_P (XEXP (op
, 1))
687 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
688 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
689 && UINTVAL (XEXP (op
, 1)) < precision
)
690 return simplify_gen_binary (ASHIFTRT
, mode
,
691 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
693 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
694 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
695 the outer subreg is effectively a truncation to the original mode. */
696 if ((GET_CODE (op
) == LSHIFTRT
697 || GET_CODE (op
) == ASHIFTRT
)
698 && CONST_INT_P (XEXP (op
, 1))
699 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
700 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
701 && UINTVAL (XEXP (op
, 1)) < precision
)
702 return simplify_gen_binary (LSHIFTRT
, mode
,
703 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
705 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
706 to (ashift:QI (x:QI) C), where C is a suitable small constant and
707 the outer subreg is effectively a truncation to the original mode. */
708 if (GET_CODE (op
) == ASHIFT
709 && CONST_INT_P (XEXP (op
, 1))
710 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
711 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
712 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
713 && UINTVAL (XEXP (op
, 1)) < precision
)
714 return simplify_gen_binary (ASHIFT
, mode
,
715 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
717 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
718 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
720 if (GET_CODE (op
) == AND
721 && (GET_CODE (XEXP (op
, 0)) == LSHIFTRT
722 || GET_CODE (XEXP (op
, 0)) == ASHIFTRT
)
723 && CONST_INT_P (XEXP (XEXP (op
, 0), 1))
724 && CONST_INT_P (XEXP (op
, 1)))
726 rtx op0
= (XEXP (XEXP (op
, 0), 0));
727 rtx shift_op
= XEXP (XEXP (op
, 0), 1);
728 rtx mask_op
= XEXP (op
, 1);
729 unsigned HOST_WIDE_INT shift
= UINTVAL (shift_op
);
730 unsigned HOST_WIDE_INT mask
= UINTVAL (mask_op
);
732 if (shift
< precision
733 /* If doing this transform works for an X with all bits set,
734 it works for any X. */
735 && ((GET_MODE_MASK (mode
) >> shift
) & mask
)
736 == ((GET_MODE_MASK (op_mode
) >> shift
) & mask
)
737 && (op0
= simplify_gen_unary (TRUNCATE
, mode
, op0
, op_mode
))
738 && (op0
= simplify_gen_binary (LSHIFTRT
, mode
, op0
, shift_op
)))
740 mask_op
= GEN_INT (trunc_int_for_mode (mask
, mode
));
741 return simplify_gen_binary (AND
, mode
, op0
, mask_op
);
745 /* Recognize a word extraction from a multi-word subreg. */
746 if ((GET_CODE (op
) == LSHIFTRT
747 || GET_CODE (op
) == ASHIFTRT
)
748 && SCALAR_INT_MODE_P (mode
)
749 && SCALAR_INT_MODE_P (op_mode
)
750 && precision
>= BITS_PER_WORD
751 && 2 * precision
<= op_precision
752 && CONST_INT_P (XEXP (op
, 1))
753 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
754 && UINTVAL (XEXP (op
, 1)) < op_precision
)
756 int byte
= subreg_lowpart_offset (mode
, op_mode
);
757 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
758 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
760 ? byte
- shifted_bytes
761 : byte
+ shifted_bytes
));
764 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
765 and try replacing the TRUNCATE and shift with it. Don't do this
766 if the MEM has a mode-dependent address. */
767 if ((GET_CODE (op
) == LSHIFTRT
768 || GET_CODE (op
) == ASHIFTRT
)
769 && SCALAR_INT_MODE_P (op_mode
)
770 && MEM_P (XEXP (op
, 0))
771 && CONST_INT_P (XEXP (op
, 1))
772 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
773 && INTVAL (XEXP (op
, 1)) > 0
774 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
775 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
776 MEM_ADDR_SPACE (XEXP (op
, 0)))
777 && ! MEM_VOLATILE_P (XEXP (op
, 0))
778 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
779 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
781 int byte
= subreg_lowpart_offset (mode
, op_mode
);
782 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
783 return adjust_address_nv (XEXP (op
, 0), mode
,
785 ? byte
- shifted_bytes
786 : byte
+ shifted_bytes
));
789 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
790 (OP:SI foo:SI) if OP is NEG or ABS. */
791 if ((GET_CODE (op
) == ABS
792 || GET_CODE (op
) == NEG
)
793 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
794 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
795 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
796 return simplify_gen_unary (GET_CODE (op
), mode
,
797 XEXP (XEXP (op
, 0), 0), mode
);
799 /* (truncate:A (subreg:B (truncate:C X) 0)) is
801 if (GET_CODE (op
) == SUBREG
802 && SCALAR_INT_MODE_P (mode
)
803 && SCALAR_INT_MODE_P (op_mode
)
804 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
805 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
806 && subreg_lowpart_p (op
))
808 rtx inner
= XEXP (SUBREG_REG (op
), 0);
809 if (GET_MODE_PRECISION (mode
)
810 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
811 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
813 /* If subreg above is paradoxical and C is narrower
814 than A, return (subreg:A (truncate:C X) 0). */
815 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
816 GET_MODE (SUBREG_REG (op
)), 0);
819 /* (truncate:A (truncate:B X)) is (truncate:A X). */
820 if (GET_CODE (op
) == TRUNCATE
)
821 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
822 GET_MODE (XEXP (op
, 0)));
827 /* Try to simplify a unary operation CODE whose output mode is to be
828 MODE with input operand OP whose mode was originally OP_MODE.
829 Return zero if no simplification can be made. */
831 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
832 rtx op
, machine_mode op_mode
)
836 trueop
= avoid_constant_pool_reference (op
);
838 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
842 return simplify_unary_operation_1 (code
, mode
, op
);
845 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
849 exact_int_to_float_conversion_p (const_rtx op
)
851 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
852 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
853 /* Constants shouldn't reach here. */
854 gcc_assert (op0_mode
!= VOIDmode
);
855 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
856 int in_bits
= in_prec
;
857 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
859 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
860 if (GET_CODE (op
) == FLOAT
)
861 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
862 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
863 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
866 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
868 return in_bits
<= out_bits
;
871 /* Perform some simplifications we can do even if the operands
874 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
876 enum rtx_code reversed
;
882 /* (not (not X)) == X. */
883 if (GET_CODE (op
) == NOT
)
886 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
887 comparison is all ones. */
888 if (COMPARISON_P (op
)
889 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
890 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
891 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
892 XEXP (op
, 0), XEXP (op
, 1));
894 /* (not (plus X -1)) can become (neg X). */
895 if (GET_CODE (op
) == PLUS
896 && XEXP (op
, 1) == constm1_rtx
)
897 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
899 /* Similarly, (not (neg X)) is (plus X -1). */
900 if (GET_CODE (op
) == NEG
)
901 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
904 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
905 if (GET_CODE (op
) == XOR
906 && CONST_INT_P (XEXP (op
, 1))
907 && (temp
= simplify_unary_operation (NOT
, mode
,
908 XEXP (op
, 1), mode
)) != 0)
909 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
911 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
912 if (GET_CODE (op
) == PLUS
913 && CONST_INT_P (XEXP (op
, 1))
914 && mode_signbit_p (mode
, XEXP (op
, 1))
915 && (temp
= simplify_unary_operation (NOT
, mode
,
916 XEXP (op
, 1), mode
)) != 0)
917 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
920 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
921 operands other than 1, but that is not valid. We could do a
922 similar simplification for (not (lshiftrt C X)) where C is
923 just the sign bit, but this doesn't seem common enough to
925 if (GET_CODE (op
) == ASHIFT
926 && XEXP (op
, 0) == const1_rtx
)
928 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
929 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
932 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
933 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
934 so we can perform the above simplification. */
935 if (STORE_FLAG_VALUE
== -1
936 && GET_CODE (op
) == ASHIFTRT
937 && CONST_INT_P (XEXP (op
, 1))
938 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
939 return simplify_gen_relational (GE
, mode
, VOIDmode
,
940 XEXP (op
, 0), const0_rtx
);
943 if (GET_CODE (op
) == SUBREG
944 && subreg_lowpart_p (op
)
945 && (GET_MODE_SIZE (GET_MODE (op
))
946 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
947 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
948 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
950 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
953 x
= gen_rtx_ROTATE (inner_mode
,
954 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
956 XEXP (SUBREG_REG (op
), 1));
957 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
962 /* Apply De Morgan's laws to reduce number of patterns for machines
963 with negating logical insns (and-not, nand, etc.). If result has
964 only one NOT, put it first, since that is how the patterns are
966 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
968 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
969 machine_mode op_mode
;
971 op_mode
= GET_MODE (in1
);
972 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
974 op_mode
= GET_MODE (in2
);
975 if (op_mode
== VOIDmode
)
977 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
979 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
980 std::swap (in1
, in2
);
982 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
986 /* (not (bswap x)) -> (bswap (not x)). */
987 if (GET_CODE (op
) == BSWAP
)
989 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
990 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
995 /* (neg (neg X)) == X. */
996 if (GET_CODE (op
) == NEG
)
999 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1000 If comparison is not reversible use
1002 if (GET_CODE (op
) == IF_THEN_ELSE
)
1004 rtx cond
= XEXP (op
, 0);
1005 rtx true_rtx
= XEXP (op
, 1);
1006 rtx false_rtx
= XEXP (op
, 2);
1008 if ((GET_CODE (true_rtx
) == NEG
1009 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
1010 || (GET_CODE (false_rtx
) == NEG
1011 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
1013 if (reversed_comparison_code (cond
, NULL_RTX
) != UNKNOWN
)
1014 temp
= reversed_comparison (cond
, mode
);
1018 std::swap (true_rtx
, false_rtx
);
1020 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1021 mode
, temp
, true_rtx
, false_rtx
);
1025 /* (neg (plus X 1)) can become (not X). */
1026 if (GET_CODE (op
) == PLUS
1027 && XEXP (op
, 1) == const1_rtx
)
1028 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1030 /* Similarly, (neg (not X)) is (plus X 1). */
1031 if (GET_CODE (op
) == NOT
)
1032 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1035 /* (neg (minus X Y)) can become (minus Y X). This transformation
1036 isn't safe for modes with signed zeros, since if X and Y are
1037 both +0, (minus Y X) is the same as (minus X Y). If the
1038 rounding mode is towards +infinity (or -infinity) then the two
1039 expressions will be rounded differently. */
1040 if (GET_CODE (op
) == MINUS
1041 && !HONOR_SIGNED_ZEROS (mode
)
1042 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1043 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1045 if (GET_CODE (op
) == PLUS
1046 && !HONOR_SIGNED_ZEROS (mode
)
1047 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1049 /* (neg (plus A C)) is simplified to (minus -C A). */
1050 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1051 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1053 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1055 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1058 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1059 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1060 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1063 /* (neg (mult A B)) becomes (mult A (neg B)).
1064 This works even for floating-point values. */
1065 if (GET_CODE (op
) == MULT
1066 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1068 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1069 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1072 /* NEG commutes with ASHIFT since it is multiplication. Only do
1073 this if we can then eliminate the NEG (e.g., if the operand
1075 if (GET_CODE (op
) == ASHIFT
)
1077 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1079 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1082 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1083 C is equal to the width of MODE minus 1. */
1084 if (GET_CODE (op
) == ASHIFTRT
1085 && CONST_INT_P (XEXP (op
, 1))
1086 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1087 return simplify_gen_binary (LSHIFTRT
, mode
,
1088 XEXP (op
, 0), XEXP (op
, 1));
1090 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1091 C is equal to the width of MODE minus 1. */
1092 if (GET_CODE (op
) == LSHIFTRT
1093 && CONST_INT_P (XEXP (op
, 1))
1094 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1095 return simplify_gen_binary (ASHIFTRT
, mode
,
1096 XEXP (op
, 0), XEXP (op
, 1));
1098 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1099 if (GET_CODE (op
) == XOR
1100 && XEXP (op
, 1) == const1_rtx
1101 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1102 return plus_constant (mode
, XEXP (op
, 0), -1);
1104 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1105 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1106 if (GET_CODE (op
) == LT
1107 && XEXP (op
, 1) == const0_rtx
1108 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1110 machine_mode inner
= GET_MODE (XEXP (op
, 0));
1111 int isize
= GET_MODE_PRECISION (inner
);
1112 if (STORE_FLAG_VALUE
== 1)
1114 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1115 GEN_INT (isize
- 1));
1118 if (GET_MODE_PRECISION (mode
) > isize
)
1119 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1120 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1122 else if (STORE_FLAG_VALUE
== -1)
1124 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1125 GEN_INT (isize
- 1));
1128 if (GET_MODE_PRECISION (mode
) > isize
)
1129 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1130 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1136 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1137 with the umulXi3_highpart patterns. */
1138 if (GET_CODE (op
) == LSHIFTRT
1139 && GET_CODE (XEXP (op
, 0)) == MULT
)
1142 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1144 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1146 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1150 /* We can't handle truncation to a partial integer mode here
1151 because we don't know the real bitsize of the partial
1156 if (GET_MODE (op
) != VOIDmode
)
1158 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1163 /* If we know that the value is already truncated, we can
1164 replace the TRUNCATE with a SUBREG. */
1165 if (GET_MODE_NUNITS (mode
) == 1
1166 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1167 || truncated_to_mode (mode
, op
)))
1169 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1174 /* A truncate of a comparison can be replaced with a subreg if
1175 STORE_FLAG_VALUE permits. This is like the previous test,
1176 but it works even if the comparison is done in a mode larger
1177 than HOST_BITS_PER_WIDE_INT. */
1178 if (HWI_COMPUTABLE_MODE_P (mode
)
1179 && COMPARISON_P (op
)
1180 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1182 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1187 /* A truncate of a memory is just loading the low part of the memory
1188 if we are not changing the meaning of the address. */
1189 if (GET_CODE (op
) == MEM
1190 && !VECTOR_MODE_P (mode
)
1191 && !MEM_VOLATILE_P (op
)
1192 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1194 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1201 case FLOAT_TRUNCATE
:
1202 if (DECIMAL_FLOAT_MODE_P (mode
))
1205 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1206 if (GET_CODE (op
) == FLOAT_EXTEND
1207 && GET_MODE (XEXP (op
, 0)) == mode
)
1208 return XEXP (op
, 0);
1210 /* (float_truncate:SF (float_truncate:DF foo:XF))
1211 = (float_truncate:SF foo:XF).
1212 This may eliminate double rounding, so it is unsafe.
1214 (float_truncate:SF (float_extend:XF foo:DF))
1215 = (float_truncate:SF foo:DF).
1217 (float_truncate:DF (float_extend:XF foo:SF))
1218 = (float_extend:DF foo:SF). */
1219 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1220 && flag_unsafe_math_optimizations
)
1221 || GET_CODE (op
) == FLOAT_EXTEND
)
1222 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1224 > GET_MODE_SIZE (mode
)
1225 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1227 XEXP (op
, 0), mode
);
1229 /* (float_truncate (float x)) is (float x) */
1230 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1231 && (flag_unsafe_math_optimizations
1232 || exact_int_to_float_conversion_p (op
)))
1233 return simplify_gen_unary (GET_CODE (op
), mode
,
1235 GET_MODE (XEXP (op
, 0)));
1237 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1238 (OP:SF foo:SF) if OP is NEG or ABS. */
1239 if ((GET_CODE (op
) == ABS
1240 || GET_CODE (op
) == NEG
)
1241 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1242 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1243 return simplify_gen_unary (GET_CODE (op
), mode
,
1244 XEXP (XEXP (op
, 0), 0), mode
);
1246 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1247 is (float_truncate:SF x). */
1248 if (GET_CODE (op
) == SUBREG
1249 && subreg_lowpart_p (op
)
1250 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1251 return SUBREG_REG (op
);
1255 if (DECIMAL_FLOAT_MODE_P (mode
))
1258 /* (float_extend (float_extend x)) is (float_extend x)
1260 (float_extend (float x)) is (float x) assuming that double
1261 rounding can't happen.
1263 if (GET_CODE (op
) == FLOAT_EXTEND
1264 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1265 && exact_int_to_float_conversion_p (op
)))
1266 return simplify_gen_unary (GET_CODE (op
), mode
,
1268 GET_MODE (XEXP (op
, 0)));
1273 /* (abs (neg <foo>)) -> (abs <foo>) */
1274 if (GET_CODE (op
) == NEG
)
1275 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1276 GET_MODE (XEXP (op
, 0)));
1278 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1280 if (GET_MODE (op
) == VOIDmode
)
1283 /* If operand is something known to be positive, ignore the ABS. */
1284 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1285 || val_signbit_known_clear_p (GET_MODE (op
),
1286 nonzero_bits (op
, GET_MODE (op
))))
1289 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1290 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1291 return gen_rtx_NEG (mode
, op
);
1296 /* (ffs (*_extend <X>)) = (ffs <X>) */
1297 if (GET_CODE (op
) == SIGN_EXTEND
1298 || GET_CODE (op
) == ZERO_EXTEND
)
1299 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1300 GET_MODE (XEXP (op
, 0)));
1304 switch (GET_CODE (op
))
1308 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1309 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1310 GET_MODE (XEXP (op
, 0)));
1314 /* Rotations don't affect popcount. */
1315 if (!side_effects_p (XEXP (op
, 1)))
1316 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1317 GET_MODE (XEXP (op
, 0)));
1326 switch (GET_CODE (op
))
1332 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1333 GET_MODE (XEXP (op
, 0)));
1337 /* Rotations don't affect parity. */
1338 if (!side_effects_p (XEXP (op
, 1)))
1339 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1340 GET_MODE (XEXP (op
, 0)));
1349 /* (bswap (bswap x)) -> x. */
1350 if (GET_CODE (op
) == BSWAP
)
1351 return XEXP (op
, 0);
1355 /* (float (sign_extend <X>)) = (float <X>). */
1356 if (GET_CODE (op
) == SIGN_EXTEND
)
1357 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1358 GET_MODE (XEXP (op
, 0)));
1362 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1363 becomes just the MINUS if its mode is MODE. This allows
1364 folding switch statements on machines using casesi (such as
1366 if (GET_CODE (op
) == TRUNCATE
1367 && GET_MODE (XEXP (op
, 0)) == mode
1368 && GET_CODE (XEXP (op
, 0)) == MINUS
1369 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1370 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1371 return XEXP (op
, 0);
1373 /* Extending a widening multiplication should be canonicalized to
1374 a wider widening multiplication. */
1375 if (GET_CODE (op
) == MULT
)
1377 rtx lhs
= XEXP (op
, 0);
1378 rtx rhs
= XEXP (op
, 1);
1379 enum rtx_code lcode
= GET_CODE (lhs
);
1380 enum rtx_code rcode
= GET_CODE (rhs
);
1382 /* Widening multiplies usually extend both operands, but sometimes
1383 they use a shift to extract a portion of a register. */
1384 if ((lcode
== SIGN_EXTEND
1385 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1386 && (rcode
== SIGN_EXTEND
1387 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1389 machine_mode lmode
= GET_MODE (lhs
);
1390 machine_mode rmode
= GET_MODE (rhs
);
1393 if (lcode
== ASHIFTRT
)
1394 /* Number of bits not shifted off the end. */
1395 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1396 else /* lcode == SIGN_EXTEND */
1397 /* Size of inner mode. */
1398 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1400 if (rcode
== ASHIFTRT
)
1401 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1402 else /* rcode == SIGN_EXTEND */
1403 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1405 /* We can only widen multiplies if the result is mathematiclly
1406 equivalent. I.e. if overflow was impossible. */
1407 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1408 return simplify_gen_binary
1410 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1411 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1415 /* Check for a sign extension of a subreg of a promoted
1416 variable, where the promotion is sign-extended, and the
1417 target mode is the same as the variable's promotion. */
1418 if (GET_CODE (op
) == SUBREG
1419 && SUBREG_PROMOTED_VAR_P (op
)
1420 && SUBREG_PROMOTED_SIGNED_P (op
)
1421 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1423 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1428 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1429 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1430 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1432 gcc_assert (GET_MODE_PRECISION (mode
)
1433 > GET_MODE_PRECISION (GET_MODE (op
)));
1434 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1435 GET_MODE (XEXP (op
, 0)));
1438 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1439 is (sign_extend:M (subreg:O <X>)) if there is mode with
1440 GET_MODE_BITSIZE (N) - I bits.
1441 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1442 is similarly (zero_extend:M (subreg:O <X>)). */
1443 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1444 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1445 && CONST_INT_P (XEXP (op
, 1))
1446 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1447 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1450 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1451 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1452 gcc_assert (GET_MODE_BITSIZE (mode
)
1453 > GET_MODE_BITSIZE (GET_MODE (op
)));
1454 if (tmode
!= BLKmode
)
1457 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1459 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1460 ? SIGN_EXTEND
: ZERO_EXTEND
,
1461 mode
, inner
, tmode
);
1465 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1466 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1467 if (GET_CODE (op
) == LSHIFTRT
1468 && CONST_INT_P (XEXP (op
, 1))
1469 && XEXP (op
, 1) != const0_rtx
)
1470 return simplify_gen_unary (ZERO_EXTEND
, mode
, op
, GET_MODE (op
));
1472 #if defined(POINTERS_EXTEND_UNSIGNED)
1473 /* As we do not know which address space the pointer is referring to,
1474 we can do this only if the target does not support different pointer
1475 or address modes depending on the address space. */
1476 if (target_default_pointer_address_modes_p ()
1477 && ! POINTERS_EXTEND_UNSIGNED
1478 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1480 || (GET_CODE (op
) == SUBREG
1481 && REG_P (SUBREG_REG (op
))
1482 && REG_POINTER (SUBREG_REG (op
))
1483 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1484 && !targetm
.have_ptr_extend ())
1487 = convert_memory_address_addr_space_1 (Pmode
, op
,
1488 ADDR_SPACE_GENERIC
, false,
1497 /* Check for a zero extension of a subreg of a promoted
1498 variable, where the promotion is zero-extended, and the
1499 target mode is the same as the variable's promotion. */
1500 if (GET_CODE (op
) == SUBREG
1501 && SUBREG_PROMOTED_VAR_P (op
)
1502 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1503 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1505 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1510 /* Extending a widening multiplication should be canonicalized to
1511 a wider widening multiplication. */
1512 if (GET_CODE (op
) == MULT
)
1514 rtx lhs
= XEXP (op
, 0);
1515 rtx rhs
= XEXP (op
, 1);
1516 enum rtx_code lcode
= GET_CODE (lhs
);
1517 enum rtx_code rcode
= GET_CODE (rhs
);
1519 /* Widening multiplies usually extend both operands, but sometimes
1520 they use a shift to extract a portion of a register. */
1521 if ((lcode
== ZERO_EXTEND
1522 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1523 && (rcode
== ZERO_EXTEND
1524 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1526 machine_mode lmode
= GET_MODE (lhs
);
1527 machine_mode rmode
= GET_MODE (rhs
);
1530 if (lcode
== LSHIFTRT
)
1531 /* Number of bits not shifted off the end. */
1532 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1533 else /* lcode == ZERO_EXTEND */
1534 /* Size of inner mode. */
1535 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1537 if (rcode
== LSHIFTRT
)
1538 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1539 else /* rcode == ZERO_EXTEND */
1540 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1542 /* We can only widen multiplies if the result is mathematiclly
1543 equivalent. I.e. if overflow was impossible. */
1544 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1545 return simplify_gen_binary
1547 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1548 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1552 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1553 if (GET_CODE (op
) == ZERO_EXTEND
)
1554 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1555 GET_MODE (XEXP (op
, 0)));
1557 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1558 is (zero_extend:M (subreg:O <X>)) if there is mode with
1559 GET_MODE_PRECISION (N) - I bits. */
1560 if (GET_CODE (op
) == LSHIFTRT
1561 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1562 && CONST_INT_P (XEXP (op
, 1))
1563 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1564 && GET_MODE_PRECISION (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1567 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op
))
1568 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1569 if (tmode
!= BLKmode
)
1572 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1574 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1578 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1579 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1581 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1582 (and:SI (reg:SI) (const_int 63)). */
1583 if (GET_CODE (op
) == SUBREG
1584 && GET_MODE_PRECISION (GET_MODE (op
))
1585 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1586 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1587 <= HOST_BITS_PER_WIDE_INT
1588 && GET_MODE_PRECISION (mode
)
1589 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1590 && subreg_lowpart_p (op
)
1591 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1592 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1594 if (GET_MODE_PRECISION (mode
)
1595 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1596 return SUBREG_REG (op
);
1597 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1598 GET_MODE (SUBREG_REG (op
)));
1601 #if defined(POINTERS_EXTEND_UNSIGNED)
1602 /* As we do not know which address space the pointer is referring to,
1603 we can do this only if the target does not support different pointer
1604 or address modes depending on the address space. */
1605 if (target_default_pointer_address_modes_p ()
1606 && POINTERS_EXTEND_UNSIGNED
> 0
1607 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1609 || (GET_CODE (op
) == SUBREG
1610 && REG_P (SUBREG_REG (op
))
1611 && REG_POINTER (SUBREG_REG (op
))
1612 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1613 && !targetm
.have_ptr_extend ())
1616 = convert_memory_address_addr_space_1 (Pmode
, op
,
1617 ADDR_SPACE_GENERIC
, false,
1632 /* Try to compute the value of a unary operation CODE whose output mode is to
1633 be MODE with input operand OP whose mode was originally OP_MODE.
1634 Return zero if the value cannot be computed. */
1636 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1637 rtx op
, machine_mode op_mode
)
1639 unsigned int width
= GET_MODE_PRECISION (mode
);
1641 if (code
== VEC_DUPLICATE
)
1643 gcc_assert (VECTOR_MODE_P (mode
));
1644 if (GET_MODE (op
) != VOIDmode
)
1646 if (!VECTOR_MODE_P (GET_MODE (op
)))
1647 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1649 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1652 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1653 || GET_CODE (op
) == CONST_VECTOR
)
1655 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1656 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1657 rtvec v
= rtvec_alloc (n_elts
);
1660 if (GET_CODE (op
) != CONST_VECTOR
)
1661 for (i
= 0; i
< n_elts
; i
++)
1662 RTVEC_ELT (v
, i
) = op
;
1665 machine_mode inmode
= GET_MODE (op
);
1666 int in_elt_size
= GET_MODE_UNIT_SIZE (inmode
);
1667 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1669 gcc_assert (in_n_elts
< n_elts
);
1670 gcc_assert ((n_elts
% in_n_elts
) == 0);
1671 for (i
= 0; i
< n_elts
; i
++)
1672 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1674 return gen_rtx_CONST_VECTOR (mode
, v
);
1678 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1680 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1681 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1682 machine_mode opmode
= GET_MODE (op
);
1683 int op_elt_size
= GET_MODE_UNIT_SIZE (opmode
);
1684 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1685 rtvec v
= rtvec_alloc (n_elts
);
1688 gcc_assert (op_n_elts
== n_elts
);
1689 for (i
= 0; i
< n_elts
; i
++)
1691 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1692 CONST_VECTOR_ELT (op
, i
),
1693 GET_MODE_INNER (opmode
));
1696 RTVEC_ELT (v
, i
) = x
;
1698 return gen_rtx_CONST_VECTOR (mode
, v
);
1701 /* The order of these tests is critical so that, for example, we don't
1702 check the wrong mode (input vs. output) for a conversion operation,
1703 such as FIX. At some point, this should be simplified. */
1705 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1709 if (op_mode
== VOIDmode
)
1711 /* CONST_INT have VOIDmode as the mode. We assume that all
1712 the bits of the constant are significant, though, this is
1713 a dangerous assumption as many times CONST_INTs are
1714 created and used with garbage in the bits outside of the
1715 precision of the implied mode of the const_int. */
1716 op_mode
= MAX_MODE_INT
;
1719 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), SIGNED
);
1721 /* Avoid the folding if flag_signaling_nans is on and
1722 operand is a signaling NaN. */
1723 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1726 d
= real_value_truncate (mode
, d
);
1727 return const_double_from_real_value (d
, mode
);
1729 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1733 if (op_mode
== VOIDmode
)
1735 /* CONST_INT have VOIDmode as the mode. We assume that all
1736 the bits of the constant are significant, though, this is
1737 a dangerous assumption as many times CONST_INTs are
1738 created and used with garbage in the bits outside of the
1739 precision of the implied mode of the const_int. */
1740 op_mode
= MAX_MODE_INT
;
1743 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), UNSIGNED
);
1745 /* Avoid the folding if flag_signaling_nans is on and
1746 operand is a signaling NaN. */
1747 if (HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
))
1750 d
= real_value_truncate (mode
, d
);
1751 return const_double_from_real_value (d
, mode
);
1754 if (CONST_SCALAR_INT_P (op
) && width
> 0)
1757 machine_mode imode
= op_mode
== VOIDmode
? mode
: op_mode
;
1758 rtx_mode_t op0
= std::make_pair (op
, imode
);
1761 #if TARGET_SUPPORTS_WIDE_INT == 0
1762 /* This assert keeps the simplification from producing a result
1763 that cannot be represented in a CONST_DOUBLE but a lot of
1764 upstream callers expect that this function never fails to
1765 simplify something and so you if you added this to the test
1766 above the code would die later anyway. If this assert
1767 happens, you just need to make the port support wide int. */
1768 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1774 result
= wi::bit_not (op0
);
1778 result
= wi::neg (op0
);
1782 result
= wi::abs (op0
);
1786 result
= wi::shwi (wi::ffs (op0
), mode
);
1790 if (wi::ne_p (op0
, 0))
1791 int_value
= wi::clz (op0
);
1792 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1793 int_value
= GET_MODE_PRECISION (mode
);
1794 result
= wi::shwi (int_value
, mode
);
1798 result
= wi::shwi (wi::clrsb (op0
), mode
);
1802 if (wi::ne_p (op0
, 0))
1803 int_value
= wi::ctz (op0
);
1804 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1805 int_value
= GET_MODE_PRECISION (mode
);
1806 result
= wi::shwi (int_value
, mode
);
1810 result
= wi::shwi (wi::popcount (op0
), mode
);
1814 result
= wi::shwi (wi::parity (op0
), mode
);
1818 result
= wide_int (op0
).bswap ();
1823 result
= wide_int::from (op0
, width
, UNSIGNED
);
1827 result
= wide_int::from (op0
, width
, SIGNED
);
1835 return immed_wide_int_const (result
, mode
);
1838 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1839 && SCALAR_FLOAT_MODE_P (mode
)
1840 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1842 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1848 d
= real_value_abs (&d
);
1851 d
= real_value_negate (&d
);
1853 case FLOAT_TRUNCATE
:
1854 /* Don't perform the operation if flag_signaling_nans is on
1855 and the operand is a signaling NaN. */
1856 if (!(HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
)))
1857 d
= real_value_truncate (mode
, d
);
1860 /* All this does is change the mode, unless changing
1862 /* Don't perform the operation if flag_signaling_nans is on
1863 and the operand is a signaling NaN. */
1864 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
))
1865 && !(HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
)))
1866 real_convert (&d
, mode
, &d
);
1869 /* Don't perform the operation if flag_signaling_nans is on
1870 and the operand is a signaling NaN. */
1871 if (!(HONOR_SNANS (mode
) && REAL_VALUE_ISSIGNALING_NAN (d
)))
1872 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1879 real_to_target (tmp
, &d
, GET_MODE (op
));
1880 for (i
= 0; i
< 4; i
++)
1882 real_from_target (&d
, tmp
, mode
);
1888 return const_double_from_real_value (d
, mode
);
1890 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1891 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1892 && GET_MODE_CLASS (mode
) == MODE_INT
1895 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1896 operators are intentionally left unspecified (to ease implementation
1897 by target backends), for consistency, this routine implements the
1898 same semantics for constant folding as used by the middle-end. */
1900 /* This was formerly used only for non-IEEE float.
1901 eggert@twinsun.com says it is safe for IEEE also. */
1903 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1904 wide_int wmax
, wmin
;
1905 /* This is part of the abi to real_to_integer, but we check
1906 things before making this call. */
1912 if (REAL_VALUE_ISNAN (*x
))
1915 /* Test against the signed upper bound. */
1916 wmax
= wi::max_value (width
, SIGNED
);
1917 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1918 if (real_less (&t
, x
))
1919 return immed_wide_int_const (wmax
, mode
);
1921 /* Test against the signed lower bound. */
1922 wmin
= wi::min_value (width
, SIGNED
);
1923 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1924 if (real_less (x
, &t
))
1925 return immed_wide_int_const (wmin
, mode
);
1927 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
1931 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
1934 /* Test against the unsigned upper bound. */
1935 wmax
= wi::max_value (width
, UNSIGNED
);
1936 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1937 if (real_less (&t
, x
))
1938 return immed_wide_int_const (wmax
, mode
);
1940 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
1951 /* Subroutine of simplify_binary_operation to simplify a binary operation
1952 CODE that can commute with byte swapping, with result mode MODE and
1953 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1954 Return zero if no simplification or canonicalization is possible. */
1957 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
1962 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1963 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
1965 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
1966 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
1967 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1970 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1971 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
1973 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1974 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1980 /* Subroutine of simplify_binary_operation to simplify a commutative,
1981 associative binary operation CODE with result mode MODE, operating
1982 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1983 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1984 canonicalization is possible. */
1987 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
1992 /* Linearize the operator to the left. */
1993 if (GET_CODE (op1
) == code
)
1995 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1996 if (GET_CODE (op0
) == code
)
1998 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1999 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2002 /* "a op (b op c)" becomes "(b op c) op a". */
2003 if (! swap_commutative_operands_p (op1
, op0
))
2004 return simplify_gen_binary (code
, mode
, op1
, op0
);
2006 std::swap (op0
, op1
);
2009 if (GET_CODE (op0
) == code
)
2011 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2012 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2014 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2015 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2018 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2019 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2021 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2023 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2024 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2026 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2033 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2034 and OP1. Return 0 if no simplification is possible.
2036 Don't use this for relational operations such as EQ or LT.
2037 Use simplify_relational_operation instead. */
2039 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
2042 rtx trueop0
, trueop1
;
2045 /* Relational operations don't work here. We must know the mode
2046 of the operands in order to do the comparison correctly.
2047 Assuming a full word can give incorrect results.
2048 Consider comparing 128 with -128 in QImode. */
2049 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2050 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2052 /* Make sure the constant is second. */
2053 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2054 && swap_commutative_operands_p (op0
, op1
))
2055 std::swap (op0
, op1
);
2057 trueop0
= avoid_constant_pool_reference (op0
);
2058 trueop1
= avoid_constant_pool_reference (op1
);
2060 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2063 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2068 /* If the above steps did not result in a simplification and op0 or op1
2069 were constant pool references, use the referenced constants directly. */
2070 if (trueop0
!= op0
|| trueop1
!= op1
)
2071 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2076 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2077 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2078 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2079 actual constants. */
2082 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2083 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2085 rtx tem
, reversed
, opleft
, opright
;
2087 unsigned int width
= GET_MODE_PRECISION (mode
);
2089 /* Even if we can't compute a constant result,
2090 there are some cases worth simplifying. */
2095 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2096 when x is NaN, infinite, or finite and nonzero. They aren't
2097 when x is -0 and the rounding mode is not towards -infinity,
2098 since (-0) + 0 is then 0. */
2099 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2102 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2103 transformations are safe even for IEEE. */
2104 if (GET_CODE (op0
) == NEG
)
2105 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2106 else if (GET_CODE (op1
) == NEG
)
2107 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2109 /* (~a) + 1 -> -a */
2110 if (INTEGRAL_MODE_P (mode
)
2111 && GET_CODE (op0
) == NOT
2112 && trueop1
== const1_rtx
)
2113 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2115 /* Handle both-operands-constant cases. We can only add
2116 CONST_INTs to constants since the sum of relocatable symbols
2117 can't be handled by most assemblers. Don't add CONST_INT
2118 to CONST_INT since overflow won't be computed properly if wider
2119 than HOST_BITS_PER_WIDE_INT. */
2121 if ((GET_CODE (op0
) == CONST
2122 || GET_CODE (op0
) == SYMBOL_REF
2123 || GET_CODE (op0
) == LABEL_REF
)
2124 && CONST_INT_P (op1
))
2125 return plus_constant (mode
, op0
, INTVAL (op1
));
2126 else if ((GET_CODE (op1
) == CONST
2127 || GET_CODE (op1
) == SYMBOL_REF
2128 || GET_CODE (op1
) == LABEL_REF
)
2129 && CONST_INT_P (op0
))
2130 return plus_constant (mode
, op1
, INTVAL (op0
));
2132 /* See if this is something like X * C - X or vice versa or
2133 if the multiplication is written as a shift. If so, we can
2134 distribute and make a new multiply, shift, or maybe just
2135 have X (if C is 2 in the example above). But don't make
2136 something more expensive than we had before. */
2138 if (SCALAR_INT_MODE_P (mode
))
2140 rtx lhs
= op0
, rhs
= op1
;
2142 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2143 wide_int coeff1
= wi::one (GET_MODE_PRECISION (mode
));
2145 if (GET_CODE (lhs
) == NEG
)
2147 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2148 lhs
= XEXP (lhs
, 0);
2150 else if (GET_CODE (lhs
) == MULT
2151 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2153 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2154 lhs
= XEXP (lhs
, 0);
2156 else if (GET_CODE (lhs
) == ASHIFT
2157 && CONST_INT_P (XEXP (lhs
, 1))
2158 && INTVAL (XEXP (lhs
, 1)) >= 0
2159 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2161 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2162 GET_MODE_PRECISION (mode
));
2163 lhs
= XEXP (lhs
, 0);
2166 if (GET_CODE (rhs
) == NEG
)
2168 coeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2169 rhs
= XEXP (rhs
, 0);
2171 else if (GET_CODE (rhs
) == MULT
2172 && CONST_INT_P (XEXP (rhs
, 1)))
2174 coeff1
= std::make_pair (XEXP (rhs
, 1), mode
);
2175 rhs
= XEXP (rhs
, 0);
2177 else if (GET_CODE (rhs
) == ASHIFT
2178 && CONST_INT_P (XEXP (rhs
, 1))
2179 && INTVAL (XEXP (rhs
, 1)) >= 0
2180 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2182 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2183 GET_MODE_PRECISION (mode
));
2184 rhs
= XEXP (rhs
, 0);
2187 if (rtx_equal_p (lhs
, rhs
))
2189 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2191 bool speed
= optimize_function_for_speed_p (cfun
);
2193 coeff
= immed_wide_int_const (coeff0
+ coeff1
, mode
);
2195 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2196 return (set_src_cost (tem
, mode
, speed
)
2197 <= set_src_cost (orig
, mode
, speed
) ? tem
: 0);
2201 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2202 if (CONST_SCALAR_INT_P (op1
)
2203 && GET_CODE (op0
) == XOR
2204 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2205 && mode_signbit_p (mode
, op1
))
2206 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2207 simplify_gen_binary (XOR
, mode
, op1
,
2210 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2211 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2212 && GET_CODE (op0
) == MULT
2213 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2217 in1
= XEXP (XEXP (op0
, 0), 0);
2218 in2
= XEXP (op0
, 1);
2219 return simplify_gen_binary (MINUS
, mode
, op1
,
2220 simplify_gen_binary (MULT
, mode
,
2224 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2225 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2227 if (COMPARISON_P (op0
)
2228 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2229 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2230 && (reversed
= reversed_comparison (op0
, mode
)))
2232 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2234 /* If one of the operands is a PLUS or a MINUS, see if we can
2235 simplify this by the associative law.
2236 Don't use the associative law for floating point.
2237 The inaccuracy makes it nonassociative,
2238 and subtle programs can break if operations are associated. */
2240 if (INTEGRAL_MODE_P (mode
)
2241 && (plus_minus_operand_p (op0
)
2242 || plus_minus_operand_p (op1
))
2243 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2246 /* Reassociate floating point addition only when the user
2247 specifies associative math operations. */
2248 if (FLOAT_MODE_P (mode
)
2249 && flag_associative_math
)
2251 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2258 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2259 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2260 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2261 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2263 rtx xop00
= XEXP (op0
, 0);
2264 rtx xop10
= XEXP (op1
, 0);
2266 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2269 if (REG_P (xop00
) && REG_P (xop10
)
2270 && GET_MODE (xop00
) == GET_MODE (xop10
)
2271 && REGNO (xop00
) == REGNO (xop10
)
2272 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2273 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2279 /* We can't assume x-x is 0 even with non-IEEE floating point,
2280 but since it is zero except in very strange circumstances, we
2281 will treat it as zero with -ffinite-math-only. */
2282 if (rtx_equal_p (trueop0
, trueop1
)
2283 && ! side_effects_p (op0
)
2284 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2285 return CONST0_RTX (mode
);
2287 /* Change subtraction from zero into negation. (0 - x) is the
2288 same as -x when x is NaN, infinite, or finite and nonzero.
2289 But if the mode has signed zeros, and does not round towards
2290 -infinity, then 0 - 0 is 0, not -0. */
2291 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2292 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2294 /* (-1 - a) is ~a, unless the expression contains symbolic
2295 constants, in which case not retaining additions and
2296 subtractions could cause invalid assembly to be produced. */
2297 if (trueop0
== constm1_rtx
2298 && !contains_symbolic_reference_p (op1
))
2299 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2301 /* Subtracting 0 has no effect unless the mode has signed zeros
2302 and supports rounding towards -infinity. In such a case,
2304 if (!(HONOR_SIGNED_ZEROS (mode
)
2305 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2306 && trueop1
== CONST0_RTX (mode
))
2309 /* See if this is something like X * C - X or vice versa or
2310 if the multiplication is written as a shift. If so, we can
2311 distribute and make a new multiply, shift, or maybe just
2312 have X (if C is 2 in the example above). But don't make
2313 something more expensive than we had before. */
2315 if (SCALAR_INT_MODE_P (mode
))
2317 rtx lhs
= op0
, rhs
= op1
;
2319 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2320 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2322 if (GET_CODE (lhs
) == NEG
)
2324 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2325 lhs
= XEXP (lhs
, 0);
2327 else if (GET_CODE (lhs
) == MULT
2328 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2330 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2331 lhs
= XEXP (lhs
, 0);
2333 else if (GET_CODE (lhs
) == ASHIFT
2334 && CONST_INT_P (XEXP (lhs
, 1))
2335 && INTVAL (XEXP (lhs
, 1)) >= 0
2336 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2338 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2339 GET_MODE_PRECISION (mode
));
2340 lhs
= XEXP (lhs
, 0);
2343 if (GET_CODE (rhs
) == NEG
)
2345 negcoeff1
= wi::one (GET_MODE_PRECISION (mode
));
2346 rhs
= XEXP (rhs
, 0);
2348 else if (GET_CODE (rhs
) == MULT
2349 && CONST_INT_P (XEXP (rhs
, 1)))
2351 negcoeff1
= wi::neg (std::make_pair (XEXP (rhs
, 1), mode
));
2352 rhs
= XEXP (rhs
, 0);
2354 else if (GET_CODE (rhs
) == ASHIFT
2355 && CONST_INT_P (XEXP (rhs
, 1))
2356 && INTVAL (XEXP (rhs
, 1)) >= 0
2357 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2359 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2360 GET_MODE_PRECISION (mode
));
2361 negcoeff1
= -negcoeff1
;
2362 rhs
= XEXP (rhs
, 0);
2365 if (rtx_equal_p (lhs
, rhs
))
2367 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2369 bool speed
= optimize_function_for_speed_p (cfun
);
2371 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, mode
);
2373 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2374 return (set_src_cost (tem
, mode
, speed
)
2375 <= set_src_cost (orig
, mode
, speed
) ? tem
: 0);
2379 /* (a - (-b)) -> (a + b). True even for IEEE. */
2380 if (GET_CODE (op1
) == NEG
)
2381 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2383 /* (-x - c) may be simplified as (-c - x). */
2384 if (GET_CODE (op0
) == NEG
2385 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2387 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2389 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2392 /* Don't let a relocatable value get a negative coeff. */
2393 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2394 return simplify_gen_binary (PLUS
, mode
,
2396 neg_const_int (mode
, op1
));
2398 /* (x - (x & y)) -> (x & ~y) */
2399 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2401 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2403 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2404 GET_MODE (XEXP (op1
, 1)));
2405 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2407 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2409 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2410 GET_MODE (XEXP (op1
, 0)));
2411 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2415 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2416 by reversing the comparison code if valid. */
2417 if (STORE_FLAG_VALUE
== 1
2418 && trueop0
== const1_rtx
2419 && COMPARISON_P (op1
)
2420 && (reversed
= reversed_comparison (op1
, mode
)))
2423 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2424 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2425 && GET_CODE (op1
) == MULT
2426 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2430 in1
= XEXP (XEXP (op1
, 0), 0);
2431 in2
= XEXP (op1
, 1);
2432 return simplify_gen_binary (PLUS
, mode
,
2433 simplify_gen_binary (MULT
, mode
,
2438 /* Canonicalize (minus (neg A) (mult B C)) to
2439 (minus (mult (neg B) C) A). */
2440 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2441 && GET_CODE (op1
) == MULT
2442 && GET_CODE (op0
) == NEG
)
2446 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2447 in2
= XEXP (op1
, 1);
2448 return simplify_gen_binary (MINUS
, mode
,
2449 simplify_gen_binary (MULT
, mode
,
2454 /* If one of the operands is a PLUS or a MINUS, see if we can
2455 simplify this by the associative law. This will, for example,
2456 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2457 Don't use the associative law for floating point.
2458 The inaccuracy makes it nonassociative,
2459 and subtle programs can break if operations are associated. */
2461 if (INTEGRAL_MODE_P (mode
)
2462 && (plus_minus_operand_p (op0
)
2463 || plus_minus_operand_p (op1
))
2464 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2469 if (trueop1
== constm1_rtx
)
2470 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2472 if (GET_CODE (op0
) == NEG
)
2474 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2475 /* If op1 is a MULT as well and simplify_unary_operation
2476 just moved the NEG to the second operand, simplify_gen_binary
2477 below could through simplify_associative_operation move
2478 the NEG around again and recurse endlessly. */
2480 && GET_CODE (op1
) == MULT
2481 && GET_CODE (temp
) == MULT
2482 && XEXP (op1
, 0) == XEXP (temp
, 0)
2483 && GET_CODE (XEXP (temp
, 1)) == NEG
2484 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2487 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2489 if (GET_CODE (op1
) == NEG
)
2491 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2492 /* If op0 is a MULT as well and simplify_unary_operation
2493 just moved the NEG to the second operand, simplify_gen_binary
2494 below could through simplify_associative_operation move
2495 the NEG around again and recurse endlessly. */
2497 && GET_CODE (op0
) == MULT
2498 && GET_CODE (temp
) == MULT
2499 && XEXP (op0
, 0) == XEXP (temp
, 0)
2500 && GET_CODE (XEXP (temp
, 1)) == NEG
2501 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2504 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2507 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2508 x is NaN, since x * 0 is then also NaN. Nor is it valid
2509 when the mode has signed zeros, since multiplying a negative
2510 number by 0 will give -0, not 0. */
2511 if (!HONOR_NANS (mode
)
2512 && !HONOR_SIGNED_ZEROS (mode
)
2513 && trueop1
== CONST0_RTX (mode
)
2514 && ! side_effects_p (op0
))
2517 /* In IEEE floating point, x*1 is not equivalent to x for
2519 if (!HONOR_SNANS (mode
)
2520 && trueop1
== CONST1_RTX (mode
))
2523 /* Convert multiply by constant power of two into shift. */
2524 if (CONST_SCALAR_INT_P (trueop1
))
2526 val
= wi::exact_log2 (std::make_pair (trueop1
, mode
));
2528 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2531 /* x*2 is x+x and x*(-1) is -x */
2532 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2533 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2534 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2535 && GET_MODE (op0
) == mode
)
2537 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2539 if (real_equal (d1
, &dconst2
))
2540 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2542 if (!HONOR_SNANS (mode
)
2543 && real_equal (d1
, &dconstm1
))
2544 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2547 /* Optimize -x * -x as x * x. */
2548 if (FLOAT_MODE_P (mode
)
2549 && GET_CODE (op0
) == NEG
2550 && GET_CODE (op1
) == NEG
2551 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2552 && !side_effects_p (XEXP (op0
, 0)))
2553 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2555 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2556 if (SCALAR_FLOAT_MODE_P (mode
)
2557 && GET_CODE (op0
) == ABS
2558 && GET_CODE (op1
) == ABS
2559 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2560 && !side_effects_p (XEXP (op0
, 0)))
2561 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2563 /* Reassociate multiplication, but for floating point MULTs
2564 only when the user specifies unsafe math optimizations. */
2565 if (! FLOAT_MODE_P (mode
)
2566 || flag_unsafe_math_optimizations
)
2568 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2575 if (trueop1
== CONST0_RTX (mode
))
2577 if (INTEGRAL_MODE_P (mode
)
2578 && trueop1
== CONSTM1_RTX (mode
)
2579 && !side_effects_p (op0
))
2581 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2583 /* A | (~A) -> -1 */
2584 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2585 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2586 && ! side_effects_p (op0
)
2587 && SCALAR_INT_MODE_P (mode
))
2590 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2591 if (CONST_INT_P (op1
)
2592 && HWI_COMPUTABLE_MODE_P (mode
)
2593 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2594 && !side_effects_p (op0
))
2597 /* Canonicalize (X & C1) | C2. */
2598 if (GET_CODE (op0
) == AND
2599 && CONST_INT_P (trueop1
)
2600 && CONST_INT_P (XEXP (op0
, 1)))
2602 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2603 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2604 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2606 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2608 && !side_effects_p (XEXP (op0
, 0)))
2611 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2612 if (((c1
|c2
) & mask
) == mask
)
2613 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2615 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2616 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2618 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2619 gen_int_mode (c1
& ~c2
, mode
));
2620 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2624 /* Convert (A & B) | A to A. */
2625 if (GET_CODE (op0
) == AND
2626 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2627 || rtx_equal_p (XEXP (op0
, 1), op1
))
2628 && ! side_effects_p (XEXP (op0
, 0))
2629 && ! side_effects_p (XEXP (op0
, 1)))
2632 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2633 mode size to (rotate A CX). */
2635 if (GET_CODE (op1
) == ASHIFT
2636 || GET_CODE (op1
) == SUBREG
)
2647 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2648 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2649 && CONST_INT_P (XEXP (opleft
, 1))
2650 && CONST_INT_P (XEXP (opright
, 1))
2651 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2652 == GET_MODE_PRECISION (mode
)))
2653 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2655 /* Same, but for ashift that has been "simplified" to a wider mode
2656 by simplify_shift_const. */
2658 if (GET_CODE (opleft
) == SUBREG
2659 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2660 && GET_CODE (opright
) == LSHIFTRT
2661 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2662 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2663 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2664 && (GET_MODE_SIZE (GET_MODE (opleft
))
2665 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2666 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2667 SUBREG_REG (XEXP (opright
, 0)))
2668 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2669 && CONST_INT_P (XEXP (opright
, 1))
2670 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2671 == GET_MODE_PRECISION (mode
)))
2672 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2673 XEXP (SUBREG_REG (opleft
), 1));
2675 /* If we have (ior (and (X C1) C2)), simplify this by making
2676 C1 as small as possible if C1 actually changes. */
2677 if (CONST_INT_P (op1
)
2678 && (HWI_COMPUTABLE_MODE_P (mode
)
2679 || INTVAL (op1
) > 0)
2680 && GET_CODE (op0
) == AND
2681 && CONST_INT_P (XEXP (op0
, 1))
2682 && CONST_INT_P (op1
)
2683 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2685 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2686 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2689 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2692 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2693 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2694 the PLUS does not affect any of the bits in OP1: then we can do
2695 the IOR as a PLUS and we can associate. This is valid if OP1
2696 can be safely shifted left C bits. */
2697 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2698 && GET_CODE (XEXP (op0
, 0)) == PLUS
2699 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2700 && CONST_INT_P (XEXP (op0
, 1))
2701 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2703 int count
= INTVAL (XEXP (op0
, 1));
2704 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2706 if (mask
>> count
== INTVAL (trueop1
)
2707 && trunc_int_for_mode (mask
, mode
) == mask
2708 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2709 return simplify_gen_binary (ASHIFTRT
, mode
,
2710 plus_constant (mode
, XEXP (op0
, 0),
2715 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2719 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2725 if (trueop1
== CONST0_RTX (mode
))
2727 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2728 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2729 if (rtx_equal_p (trueop0
, trueop1
)
2730 && ! side_effects_p (op0
)
2731 && GET_MODE_CLASS (mode
) != MODE_CC
)
2732 return CONST0_RTX (mode
);
2734 /* Canonicalize XOR of the most significant bit to PLUS. */
2735 if (CONST_SCALAR_INT_P (op1
)
2736 && mode_signbit_p (mode
, op1
))
2737 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2738 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2739 if (CONST_SCALAR_INT_P (op1
)
2740 && GET_CODE (op0
) == PLUS
2741 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2742 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2743 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2744 simplify_gen_binary (XOR
, mode
, op1
,
2747 /* If we are XORing two things that have no bits in common,
2748 convert them into an IOR. This helps to detect rotation encoded
2749 using those methods and possibly other simplifications. */
2751 if (HWI_COMPUTABLE_MODE_P (mode
)
2752 && (nonzero_bits (op0
, mode
)
2753 & nonzero_bits (op1
, mode
)) == 0)
2754 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2756 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2757 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2760 int num_negated
= 0;
2762 if (GET_CODE (op0
) == NOT
)
2763 num_negated
++, op0
= XEXP (op0
, 0);
2764 if (GET_CODE (op1
) == NOT
)
2765 num_negated
++, op1
= XEXP (op1
, 0);
2767 if (num_negated
== 2)
2768 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2769 else if (num_negated
== 1)
2770 return simplify_gen_unary (NOT
, mode
,
2771 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2775 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2776 correspond to a machine insn or result in further simplifications
2777 if B is a constant. */
2779 if (GET_CODE (op0
) == AND
2780 && rtx_equal_p (XEXP (op0
, 1), op1
)
2781 && ! side_effects_p (op1
))
2782 return simplify_gen_binary (AND
, mode
,
2783 simplify_gen_unary (NOT
, mode
,
2784 XEXP (op0
, 0), mode
),
2787 else if (GET_CODE (op0
) == AND
2788 && rtx_equal_p (XEXP (op0
, 0), op1
)
2789 && ! side_effects_p (op1
))
2790 return simplify_gen_binary (AND
, mode
,
2791 simplify_gen_unary (NOT
, mode
,
2792 XEXP (op0
, 1), mode
),
2795 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2796 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2797 out bits inverted twice and not set by C. Similarly, given
2798 (xor (and (xor A B) C) D), simplify without inverting C in
2799 the xor operand: (xor (and A C) (B&C)^D).
2801 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2802 && GET_CODE (XEXP (op0
, 0)) == XOR
2803 && CONST_INT_P (op1
)
2804 && CONST_INT_P (XEXP (op0
, 1))
2805 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2807 enum rtx_code op
= GET_CODE (op0
);
2808 rtx a
= XEXP (XEXP (op0
, 0), 0);
2809 rtx b
= XEXP (XEXP (op0
, 0), 1);
2810 rtx c
= XEXP (op0
, 1);
2812 HOST_WIDE_INT bval
= INTVAL (b
);
2813 HOST_WIDE_INT cval
= INTVAL (c
);
2814 HOST_WIDE_INT dval
= INTVAL (d
);
2815 HOST_WIDE_INT xcval
;
2822 return simplify_gen_binary (XOR
, mode
,
2823 simplify_gen_binary (op
, mode
, a
, c
),
2824 gen_int_mode ((bval
& xcval
) ^ dval
,
2828 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2829 we can transform like this:
2830 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2831 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2832 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2833 Attempt a few simplifications when B and C are both constants. */
2834 if (GET_CODE (op0
) == AND
2835 && CONST_INT_P (op1
)
2836 && CONST_INT_P (XEXP (op0
, 1)))
2838 rtx a
= XEXP (op0
, 0);
2839 rtx b
= XEXP (op0
, 1);
2841 HOST_WIDE_INT bval
= INTVAL (b
);
2842 HOST_WIDE_INT cval
= INTVAL (c
);
2844 /* Instead of computing ~A&C, we compute its negated value,
2845 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2846 optimize for sure. If it does not simplify, we still try
2847 to compute ~A&C below, but since that always allocates
2848 RTL, we don't try that before committing to returning a
2849 simplified expression. */
2850 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
2853 if ((~cval
& bval
) == 0)
2855 rtx na_c
= NULL_RTX
;
2857 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
2860 /* If ~A does not simplify, don't bother: we don't
2861 want to simplify 2 operations into 3, and if na_c
2862 were to simplify with na, n_na_c would have
2863 simplified as well. */
2864 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
2866 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
2869 /* Try to simplify ~A&C | ~B&C. */
2870 if (na_c
!= NULL_RTX
)
2871 return simplify_gen_binary (IOR
, mode
, na_c
,
2872 gen_int_mode (~bval
& cval
, mode
));
2876 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2877 if (n_na_c
== CONSTM1_RTX (mode
))
2879 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2880 gen_int_mode (~cval
& bval
,
2882 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2883 gen_int_mode (~bval
& cval
,
2889 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2890 comparison if STORE_FLAG_VALUE is 1. */
2891 if (STORE_FLAG_VALUE
== 1
2892 && trueop1
== const1_rtx
2893 && COMPARISON_P (op0
)
2894 && (reversed
= reversed_comparison (op0
, mode
)))
2897 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2898 is (lt foo (const_int 0)), so we can perform the above
2899 simplification if STORE_FLAG_VALUE is 1. */
2901 if (STORE_FLAG_VALUE
== 1
2902 && trueop1
== const1_rtx
2903 && GET_CODE (op0
) == LSHIFTRT
2904 && CONST_INT_P (XEXP (op0
, 1))
2905 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2906 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2908 /* (xor (comparison foo bar) (const_int sign-bit))
2909 when STORE_FLAG_VALUE is the sign bit. */
2910 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2911 && trueop1
== const_true_rtx
2912 && COMPARISON_P (op0
)
2913 && (reversed
= reversed_comparison (op0
, mode
)))
2916 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2920 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2926 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2928 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2930 if (HWI_COMPUTABLE_MODE_P (mode
))
2932 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2933 HOST_WIDE_INT nzop1
;
2934 if (CONST_INT_P (trueop1
))
2936 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2937 /* If we are turning off bits already known off in OP0, we need
2939 if ((nzop0
& ~val1
) == 0)
2942 nzop1
= nonzero_bits (trueop1
, mode
);
2943 /* If we are clearing all the nonzero bits, the result is zero. */
2944 if ((nzop1
& nzop0
) == 0
2945 && !side_effects_p (op0
) && !side_effects_p (op1
))
2946 return CONST0_RTX (mode
);
2948 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2949 && GET_MODE_CLASS (mode
) != MODE_CC
)
2952 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2953 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2954 && ! side_effects_p (op0
)
2955 && GET_MODE_CLASS (mode
) != MODE_CC
)
2956 return CONST0_RTX (mode
);
2958 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2959 there are no nonzero bits of C outside of X's mode. */
2960 if ((GET_CODE (op0
) == SIGN_EXTEND
2961 || GET_CODE (op0
) == ZERO_EXTEND
)
2962 && CONST_INT_P (trueop1
)
2963 && HWI_COMPUTABLE_MODE_P (mode
)
2964 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2965 & UINTVAL (trueop1
)) == 0)
2967 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2968 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2969 gen_int_mode (INTVAL (trueop1
),
2971 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2974 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2975 we might be able to further simplify the AND with X and potentially
2976 remove the truncation altogether. */
2977 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2979 rtx x
= XEXP (op0
, 0);
2980 machine_mode xmode
= GET_MODE (x
);
2981 tem
= simplify_gen_binary (AND
, xmode
, x
,
2982 gen_int_mode (INTVAL (trueop1
), xmode
));
2983 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2986 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2987 if (GET_CODE (op0
) == IOR
2988 && CONST_INT_P (trueop1
)
2989 && CONST_INT_P (XEXP (op0
, 1)))
2991 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2992 return simplify_gen_binary (IOR
, mode
,
2993 simplify_gen_binary (AND
, mode
,
2994 XEXP (op0
, 0), op1
),
2995 gen_int_mode (tmp
, mode
));
2998 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2999 insn (and may simplify more). */
3000 if (GET_CODE (op0
) == XOR
3001 && rtx_equal_p (XEXP (op0
, 0), op1
)
3002 && ! side_effects_p (op1
))
3003 return simplify_gen_binary (AND
, mode
,
3004 simplify_gen_unary (NOT
, mode
,
3005 XEXP (op0
, 1), mode
),
3008 if (GET_CODE (op0
) == XOR
3009 && rtx_equal_p (XEXP (op0
, 1), op1
)
3010 && ! side_effects_p (op1
))
3011 return simplify_gen_binary (AND
, mode
,
3012 simplify_gen_unary (NOT
, mode
,
3013 XEXP (op0
, 0), mode
),
3016 /* Similarly for (~(A ^ B)) & A. */
3017 if (GET_CODE (op0
) == NOT
3018 && GET_CODE (XEXP (op0
, 0)) == XOR
3019 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3020 && ! side_effects_p (op1
))
3021 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3023 if (GET_CODE (op0
) == NOT
3024 && GET_CODE (XEXP (op0
, 0)) == XOR
3025 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3026 && ! side_effects_p (op1
))
3027 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3029 /* Convert (A | B) & A to A. */
3030 if (GET_CODE (op0
) == IOR
3031 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3032 || rtx_equal_p (XEXP (op0
, 1), op1
))
3033 && ! side_effects_p (XEXP (op0
, 0))
3034 && ! side_effects_p (XEXP (op0
, 1)))
3037 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3038 ((A & N) + B) & M -> (A + B) & M
3039 Similarly if (N & M) == 0,
3040 ((A | N) + B) & M -> (A + B) & M
3041 and for - instead of + and/or ^ instead of |.
3042 Also, if (N & M) == 0, then
3043 (A +- N) & M -> A & M. */
3044 if (CONST_INT_P (trueop1
)
3045 && HWI_COMPUTABLE_MODE_P (mode
)
3046 && ~UINTVAL (trueop1
)
3047 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3048 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3053 pmop
[0] = XEXP (op0
, 0);
3054 pmop
[1] = XEXP (op0
, 1);
3056 if (CONST_INT_P (pmop
[1])
3057 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3058 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3060 for (which
= 0; which
< 2; which
++)
3063 switch (GET_CODE (tem
))
3066 if (CONST_INT_P (XEXP (tem
, 1))
3067 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3068 == UINTVAL (trueop1
))
3069 pmop
[which
] = XEXP (tem
, 0);
3073 if (CONST_INT_P (XEXP (tem
, 1))
3074 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3075 pmop
[which
] = XEXP (tem
, 0);
3082 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3084 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3086 return simplify_gen_binary (code
, mode
, tem
, op1
);
3090 /* (and X (ior (not X) Y) -> (and X Y) */
3091 if (GET_CODE (op1
) == IOR
3092 && GET_CODE (XEXP (op1
, 0)) == NOT
3093 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3094 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3096 /* (and (ior (not X) Y) X) -> (and X Y) */
3097 if (GET_CODE (op0
) == IOR
3098 && GET_CODE (XEXP (op0
, 0)) == NOT
3099 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3100 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3102 /* (and X (ior Y (not X)) -> (and X Y) */
3103 if (GET_CODE (op1
) == IOR
3104 && GET_CODE (XEXP (op1
, 1)) == NOT
3105 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3106 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3108 /* (and (ior Y (not X)) X) -> (and X Y) */
3109 if (GET_CODE (op0
) == IOR
3110 && GET_CODE (XEXP (op0
, 1)) == NOT
3111 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3112 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3114 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3118 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3124 /* 0/x is 0 (or x&0 if x has side-effects). */
3125 if (trueop0
== CONST0_RTX (mode
))
3127 if (side_effects_p (op1
))
3128 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3132 if (trueop1
== CONST1_RTX (mode
))
3134 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3138 /* Convert divide by power of two into shift. */
3139 if (CONST_INT_P (trueop1
)
3140 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3141 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3145 /* Handle floating point and integers separately. */
3146 if (SCALAR_FLOAT_MODE_P (mode
))
3148 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3149 safe for modes with NaNs, since 0.0 / 0.0 will then be
3150 NaN rather than 0.0. Nor is it safe for modes with signed
3151 zeros, since dividing 0 by a negative number gives -0.0 */
3152 if (trueop0
== CONST0_RTX (mode
)
3153 && !HONOR_NANS (mode
)
3154 && !HONOR_SIGNED_ZEROS (mode
)
3155 && ! side_effects_p (op1
))
3158 if (trueop1
== CONST1_RTX (mode
)
3159 && !HONOR_SNANS (mode
))
3162 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3163 && trueop1
!= CONST0_RTX (mode
))
3165 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3168 if (real_equal (d1
, &dconstm1
)
3169 && !HONOR_SNANS (mode
))
3170 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3172 /* Change FP division by a constant into multiplication.
3173 Only do this with -freciprocal-math. */
3174 if (flag_reciprocal_math
3175 && !real_equal (d1
, &dconst0
))
3178 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3179 tem
= const_double_from_real_value (d
, mode
);
3180 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3184 else if (SCALAR_INT_MODE_P (mode
))
3186 /* 0/x is 0 (or x&0 if x has side-effects). */
3187 if (trueop0
== CONST0_RTX (mode
)
3188 && !cfun
->can_throw_non_call_exceptions
)
3190 if (side_effects_p (op1
))
3191 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3195 if (trueop1
== CONST1_RTX (mode
))
3197 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3202 if (trueop1
== constm1_rtx
)
3204 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3206 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3212 /* 0%x is 0 (or x&0 if x has side-effects). */
3213 if (trueop0
== CONST0_RTX (mode
))
3215 if (side_effects_p (op1
))
3216 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3219 /* x%1 is 0 (of x&0 if x has side-effects). */
3220 if (trueop1
== CONST1_RTX (mode
))
3222 if (side_effects_p (op0
))
3223 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3224 return CONST0_RTX (mode
);
3226 /* Implement modulus by power of two as AND. */
3227 if (CONST_INT_P (trueop1
)
3228 && exact_log2 (UINTVAL (trueop1
)) > 0)
3229 return simplify_gen_binary (AND
, mode
, op0
,
3230 gen_int_mode (INTVAL (op1
) - 1, mode
));
3234 /* 0%x is 0 (or x&0 if x has side-effects). */
3235 if (trueop0
== CONST0_RTX (mode
))
3237 if (side_effects_p (op1
))
3238 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3241 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3242 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3244 if (side_effects_p (op0
))
3245 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3246 return CONST0_RTX (mode
);
3252 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3253 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3254 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3256 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3257 if (CONST_INT_P (trueop1
)
3258 && IN_RANGE (INTVAL (trueop1
),
3259 GET_MODE_PRECISION (mode
) / 2 + (code
== ROTATE
),
3260 GET_MODE_PRECISION (mode
) - 1))
3261 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3262 mode
, op0
, GEN_INT (GET_MODE_PRECISION (mode
)
3263 - INTVAL (trueop1
)));
3267 if (trueop1
== CONST0_RTX (mode
))
3269 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3271 /* Rotating ~0 always results in ~0. */
3272 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3273 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3274 && ! side_effects_p (op1
))
3278 scalar constants c1, c2
3279 size (M2) > size (M1)
3280 c1 == size (M2) - size (M1)
3282 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3286 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3288 if (code
== ASHIFTRT
3289 && !VECTOR_MODE_P (mode
)
3291 && CONST_INT_P (op1
)
3292 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3293 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0
)))
3294 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3295 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3296 > GET_MODE_BITSIZE (mode
))
3297 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3298 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3299 - GET_MODE_BITSIZE (mode
)))
3300 && subreg_lowpart_p (op0
))
3302 rtx tmp
= GEN_INT (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3304 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
3305 tmp
= simplify_gen_binary (ASHIFTRT
,
3306 GET_MODE (SUBREG_REG (op0
)),
3307 XEXP (SUBREG_REG (op0
), 0),
3309 return lowpart_subreg (mode
, tmp
, inner_mode
);
3312 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3314 val
= INTVAL (op1
) & (GET_MODE_PRECISION (mode
) - 1);
3315 if (val
!= INTVAL (op1
))
3316 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3323 if (trueop1
== CONST0_RTX (mode
))
3325 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3327 goto canonicalize_shift
;
3330 if (trueop1
== CONST0_RTX (mode
))
3332 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3334 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3335 if (GET_CODE (op0
) == CLZ
3336 && CONST_INT_P (trueop1
)
3337 && STORE_FLAG_VALUE
== 1
3338 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3340 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3341 unsigned HOST_WIDE_INT zero_val
= 0;
3343 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3344 && zero_val
== GET_MODE_PRECISION (imode
)
3345 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3346 return simplify_gen_relational (EQ
, mode
, imode
,
3347 XEXP (op0
, 0), const0_rtx
);
3349 goto canonicalize_shift
;
3352 if (width
<= HOST_BITS_PER_WIDE_INT
3353 && mode_signbit_p (mode
, trueop1
)
3354 && ! side_effects_p (op0
))
3356 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3358 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3364 if (width
<= HOST_BITS_PER_WIDE_INT
3365 && CONST_INT_P (trueop1
)
3366 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3367 && ! side_effects_p (op0
))
3369 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3371 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3377 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3379 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3381 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3387 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3389 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3391 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3404 /* ??? There are simplifications that can be done. */
3408 if (!VECTOR_MODE_P (mode
))
3410 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3411 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3412 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3413 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3414 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3416 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3417 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3420 /* Extract a scalar element from a nested VEC_SELECT expression
3421 (with optional nested VEC_CONCAT expression). Some targets
3422 (i386) extract scalar element from a vector using chain of
3423 nested VEC_SELECT expressions. When input operand is a memory
3424 operand, this operation can be simplified to a simple scalar
3425 load from an offseted memory address. */
3426 if (GET_CODE (trueop0
) == VEC_SELECT
)
3428 rtx op0
= XEXP (trueop0
, 0);
3429 rtx op1
= XEXP (trueop0
, 1);
3431 machine_mode opmode
= GET_MODE (op0
);
3432 int elt_size
= GET_MODE_UNIT_SIZE (opmode
);
3433 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3435 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3441 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3442 gcc_assert (i
< n_elts
);
3444 /* Select element, pointed by nested selector. */
3445 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3447 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3448 if (GET_CODE (op0
) == VEC_CONCAT
)
3450 rtx op00
= XEXP (op0
, 0);
3451 rtx op01
= XEXP (op0
, 1);
3453 machine_mode mode00
, mode01
;
3454 int n_elts00
, n_elts01
;
3456 mode00
= GET_MODE (op00
);
3457 mode01
= GET_MODE (op01
);
3459 /* Find out number of elements of each operand. */
3460 if (VECTOR_MODE_P (mode00
))
3462 elt_size
= GET_MODE_UNIT_SIZE (mode00
);
3463 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3468 if (VECTOR_MODE_P (mode01
))
3470 elt_size
= GET_MODE_UNIT_SIZE (mode01
);
3471 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3476 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3478 /* Select correct operand of VEC_CONCAT
3479 and adjust selector. */
3480 if (elem
< n_elts01
)
3491 vec
= rtvec_alloc (1);
3492 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3494 tmp
= gen_rtx_fmt_ee (code
, mode
,
3495 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3498 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3499 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3500 return XEXP (trueop0
, 0);
3504 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3505 gcc_assert (GET_MODE_INNER (mode
)
3506 == GET_MODE_INNER (GET_MODE (trueop0
)));
3507 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3509 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3511 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3512 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3513 rtvec v
= rtvec_alloc (n_elts
);
3516 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3517 for (i
= 0; i
< n_elts
; i
++)
3519 rtx x
= XVECEXP (trueop1
, 0, i
);
3521 gcc_assert (CONST_INT_P (x
));
3522 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3526 return gen_rtx_CONST_VECTOR (mode
, v
);
3529 /* Recognize the identity. */
3530 if (GET_MODE (trueop0
) == mode
)
3532 bool maybe_ident
= true;
3533 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3535 rtx j
= XVECEXP (trueop1
, 0, i
);
3536 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3538 maybe_ident
= false;
3546 /* If we build {a,b} then permute it, build the result directly. */
3547 if (XVECLEN (trueop1
, 0) == 2
3548 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3549 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3550 && GET_CODE (trueop0
) == VEC_CONCAT
3551 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3552 && GET_MODE (XEXP (trueop0
, 0)) == mode
3553 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3554 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3556 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3557 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3560 gcc_assert (i0
< 4 && i1
< 4);
3561 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3562 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3564 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3567 if (XVECLEN (trueop1
, 0) == 2
3568 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3569 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3570 && GET_CODE (trueop0
) == VEC_CONCAT
3571 && GET_MODE (trueop0
) == mode
)
3573 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3574 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3577 gcc_assert (i0
< 2 && i1
< 2);
3578 subop0
= XEXP (trueop0
, i0
);
3579 subop1
= XEXP (trueop0
, i1
);
3581 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3584 /* If we select one half of a vec_concat, return that. */
3585 if (GET_CODE (trueop0
) == VEC_CONCAT
3586 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3588 rtx subop0
= XEXP (trueop0
, 0);
3589 rtx subop1
= XEXP (trueop0
, 1);
3590 machine_mode mode0
= GET_MODE (subop0
);
3591 machine_mode mode1
= GET_MODE (subop1
);
3592 int li
= GET_MODE_UNIT_SIZE (mode0
);
3593 int l0
= GET_MODE_SIZE (mode0
) / li
;
3594 int l1
= GET_MODE_SIZE (mode1
) / li
;
3595 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3596 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3598 bool success
= true;
3599 for (int i
= 1; i
< l0
; ++i
)
3601 rtx j
= XVECEXP (trueop1
, 0, i
);
3602 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3611 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3613 bool success
= true;
3614 for (int i
= 1; i
< l1
; ++i
)
3616 rtx j
= XVECEXP (trueop1
, 0, i
);
3617 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3629 if (XVECLEN (trueop1
, 0) == 1
3630 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3631 && GET_CODE (trueop0
) == VEC_CONCAT
)
3634 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3636 /* Try to find the element in the VEC_CONCAT. */
3637 while (GET_MODE (vec
) != mode
3638 && GET_CODE (vec
) == VEC_CONCAT
)
3640 HOST_WIDE_INT vec_size
;
3642 if (CONST_INT_P (XEXP (vec
, 0)))
3644 /* vec_concat of two const_ints doesn't make sense with
3645 respect to modes. */
3646 if (CONST_INT_P (XEXP (vec
, 1)))
3649 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3650 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3653 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3655 if (offset
< vec_size
)
3656 vec
= XEXP (vec
, 0);
3660 vec
= XEXP (vec
, 1);
3662 vec
= avoid_constant_pool_reference (vec
);
3665 if (GET_MODE (vec
) == mode
)
3669 /* If we select elements in a vec_merge that all come from the same
3670 operand, select from that operand directly. */
3671 if (GET_CODE (op0
) == VEC_MERGE
)
3673 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3674 if (CONST_INT_P (trueop02
))
3676 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3677 bool all_operand0
= true;
3678 bool all_operand1
= true;
3679 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3681 rtx j
= XVECEXP (trueop1
, 0, i
);
3682 if (sel
& (HOST_WIDE_INT_1U
<< UINTVAL (j
)))
3683 all_operand1
= false;
3685 all_operand0
= false;
3687 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3688 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3689 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3690 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3694 /* If we have two nested selects that are inverses of each
3695 other, replace them with the source operand. */
3696 if (GET_CODE (trueop0
) == VEC_SELECT
3697 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3699 rtx op0_subop1
= XEXP (trueop0
, 1);
3700 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3701 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3703 /* Apply the outer ordering vector to the inner one. (The inner
3704 ordering vector is expressly permitted to be of a different
3705 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3706 then the two VEC_SELECTs cancel. */
3707 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3709 rtx x
= XVECEXP (trueop1
, 0, i
);
3710 if (!CONST_INT_P (x
))
3712 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3713 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3716 return XEXP (trueop0
, 0);
3722 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3723 ? GET_MODE (trueop0
)
3724 : GET_MODE_INNER (mode
));
3725 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3726 ? GET_MODE (trueop1
)
3727 : GET_MODE_INNER (mode
));
3729 gcc_assert (VECTOR_MODE_P (mode
));
3730 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3731 == GET_MODE_SIZE (mode
));
3733 if (VECTOR_MODE_P (op0_mode
))
3734 gcc_assert (GET_MODE_INNER (mode
)
3735 == GET_MODE_INNER (op0_mode
));
3737 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3739 if (VECTOR_MODE_P (op1_mode
))
3740 gcc_assert (GET_MODE_INNER (mode
)
3741 == GET_MODE_INNER (op1_mode
));
3743 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3745 if ((GET_CODE (trueop0
) == CONST_VECTOR
3746 || CONST_SCALAR_INT_P (trueop0
)
3747 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3748 && (GET_CODE (trueop1
) == CONST_VECTOR
3749 || CONST_SCALAR_INT_P (trueop1
)
3750 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3752 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3753 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3754 rtvec v
= rtvec_alloc (n_elts
);
3756 unsigned in_n_elts
= 1;
3758 if (VECTOR_MODE_P (op0_mode
))
3759 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3760 for (i
= 0; i
< n_elts
; i
++)
3764 if (!VECTOR_MODE_P (op0_mode
))
3765 RTVEC_ELT (v
, i
) = trueop0
;
3767 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3771 if (!VECTOR_MODE_P (op1_mode
))
3772 RTVEC_ELT (v
, i
) = trueop1
;
3774 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3779 return gen_rtx_CONST_VECTOR (mode
, v
);
3782 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3783 Restrict the transformation to avoid generating a VEC_SELECT with a
3784 mode unrelated to its operand. */
3785 if (GET_CODE (trueop0
) == VEC_SELECT
3786 && GET_CODE (trueop1
) == VEC_SELECT
3787 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3788 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3790 rtx par0
= XEXP (trueop0
, 1);
3791 rtx par1
= XEXP (trueop1
, 1);
3792 int len0
= XVECLEN (par0
, 0);
3793 int len1
= XVECLEN (par1
, 0);
3794 rtvec vec
= rtvec_alloc (len0
+ len1
);
3795 for (int i
= 0; i
< len0
; i
++)
3796 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3797 for (int i
= 0; i
< len1
; i
++)
3798 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3799 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3800 gen_rtx_PARALLEL (VOIDmode
, vec
));
3813 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
3816 unsigned int width
= GET_MODE_PRECISION (mode
);
3818 if (VECTOR_MODE_P (mode
)
3819 && code
!= VEC_CONCAT
3820 && GET_CODE (op0
) == CONST_VECTOR
3821 && GET_CODE (op1
) == CONST_VECTOR
)
3823 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3824 machine_mode op0mode
= GET_MODE (op0
);
3825 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3826 machine_mode op1mode
= GET_MODE (op1
);
3827 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3828 rtvec v
= rtvec_alloc (n_elts
);
3831 gcc_assert (op0_n_elts
== n_elts
);
3832 gcc_assert (op1_n_elts
== n_elts
);
3833 for (i
= 0; i
< n_elts
; i
++)
3835 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3836 CONST_VECTOR_ELT (op0
, i
),
3837 CONST_VECTOR_ELT (op1
, i
));
3840 RTVEC_ELT (v
, i
) = x
;
3843 return gen_rtx_CONST_VECTOR (mode
, v
);
3846 if (VECTOR_MODE_P (mode
)
3847 && code
== VEC_CONCAT
3848 && (CONST_SCALAR_INT_P (op0
)
3849 || GET_CODE (op0
) == CONST_FIXED
3850 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3851 && (CONST_SCALAR_INT_P (op1
)
3852 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3853 || GET_CODE (op1
) == CONST_FIXED
))
3855 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3856 rtvec v
= rtvec_alloc (n_elts
);
3858 gcc_assert (n_elts
>= 2);
3861 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3862 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3864 RTVEC_ELT (v
, 0) = op0
;
3865 RTVEC_ELT (v
, 1) = op1
;
3869 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3870 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3873 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3874 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3875 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3877 for (i
= 0; i
< op0_n_elts
; ++i
)
3878 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3879 for (i
= 0; i
< op1_n_elts
; ++i
)
3880 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3883 return gen_rtx_CONST_VECTOR (mode
, v
);
3886 if (SCALAR_FLOAT_MODE_P (mode
)
3887 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3888 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3889 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3900 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3902 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3904 for (i
= 0; i
< 4; i
++)
3921 real_from_target (&r
, tmp0
, mode
);
3922 return const_double_from_real_value (r
, mode
);
3926 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3927 const REAL_VALUE_TYPE
*opr0
, *opr1
;
3930 opr0
= CONST_DOUBLE_REAL_VALUE (op0
);
3931 opr1
= CONST_DOUBLE_REAL_VALUE (op1
);
3933 if (HONOR_SNANS (mode
)
3934 && (REAL_VALUE_ISSIGNALING_NAN (*opr0
)
3935 || REAL_VALUE_ISSIGNALING_NAN (*opr1
)))
3938 real_convert (&f0
, mode
, opr0
);
3939 real_convert (&f1
, mode
, opr1
);
3942 && real_equal (&f1
, &dconst0
)
3943 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3946 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3947 && flag_trapping_math
3948 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3950 int s0
= REAL_VALUE_NEGATIVE (f0
);
3951 int s1
= REAL_VALUE_NEGATIVE (f1
);
3956 /* Inf + -Inf = NaN plus exception. */
3961 /* Inf - Inf = NaN plus exception. */
3966 /* Inf / Inf = NaN plus exception. */
3973 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3974 && flag_trapping_math
3975 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
3976 || (REAL_VALUE_ISINF (f1
)
3977 && real_equal (&f0
, &dconst0
))))
3978 /* Inf * 0 = NaN plus exception. */
3981 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3983 real_convert (&result
, mode
, &value
);
3985 /* Don't constant fold this floating point operation if
3986 the result has overflowed and flag_trapping_math. */
3988 if (flag_trapping_math
3989 && MODE_HAS_INFINITIES (mode
)
3990 && REAL_VALUE_ISINF (result
)
3991 && !REAL_VALUE_ISINF (f0
)
3992 && !REAL_VALUE_ISINF (f1
))
3993 /* Overflow plus exception. */
3996 /* Don't constant fold this floating point operation if the
3997 result may dependent upon the run-time rounding mode and
3998 flag_rounding_math is set, or if GCC's software emulation
3999 is unable to accurately represent the result. */
4001 if ((flag_rounding_math
4002 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
4003 && (inexact
|| !real_identical (&result
, &value
)))
4006 return const_double_from_real_value (result
, mode
);
4010 /* We can fold some multi-word operations. */
4011 if ((GET_MODE_CLASS (mode
) == MODE_INT
4012 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
4013 && CONST_SCALAR_INT_P (op0
)
4014 && CONST_SCALAR_INT_P (op1
))
4018 rtx_mode_t pop0
= std::make_pair (op0
, mode
);
4019 rtx_mode_t pop1
= std::make_pair (op1
, mode
);
4021 #if TARGET_SUPPORTS_WIDE_INT == 0
4022 /* This assert keeps the simplification from producing a result
4023 that cannot be represented in a CONST_DOUBLE but a lot of
4024 upstream callers expect that this function never fails to
4025 simplify something and so you if you added this to the test
4026 above the code would die later anyway. If this assert
4027 happens, you just need to make the port support wide int. */
4028 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
4033 result
= wi::sub (pop0
, pop1
);
4037 result
= wi::add (pop0
, pop1
);
4041 result
= wi::mul (pop0
, pop1
);
4045 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4051 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
4057 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4063 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4069 result
= wi::bit_and (pop0
, pop1
);
4073 result
= wi::bit_or (pop0
, pop1
);
4077 result
= wi::bit_xor (pop0
, pop1
);
4081 result
= wi::smin (pop0
, pop1
);
4085 result
= wi::smax (pop0
, pop1
);
4089 result
= wi::umin (pop0
, pop1
);
4093 result
= wi::umax (pop0
, pop1
);
4100 wide_int wop1
= pop1
;
4101 if (SHIFT_COUNT_TRUNCATED
)
4102 wop1
= wi::umod_trunc (wop1
, width
);
4103 else if (wi::geu_p (wop1
, width
))
4109 result
= wi::lrshift (pop0
, wop1
);
4113 result
= wi::arshift (pop0
, wop1
);
4117 result
= wi::lshift (pop0
, wop1
);
4128 if (wi::neg_p (pop1
))
4134 result
= wi::lrotate (pop0
, pop1
);
4138 result
= wi::rrotate (pop0
, pop1
);
4149 return immed_wide_int_const (result
, mode
);
4157 /* Return a positive integer if X should sort after Y. The value
4158 returned is 1 if and only if X and Y are both regs. */
4161 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4165 result
= (commutative_operand_precedence (y
)
4166 - commutative_operand_precedence (x
));
4168 return result
+ result
;
4170 /* Group together equal REGs to do more simplification. */
4171 if (REG_P (x
) && REG_P (y
))
4172 return REGNO (x
) > REGNO (y
);
4177 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4178 operands may be another PLUS or MINUS.
4180 Rather than test for specific case, we do this by a brute-force method
4181 and do all possible simplifications until no more changes occur. Then
4182 we rebuild the operation.
4184 May return NULL_RTX when no changes were made. */
4187 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4190 struct simplify_plus_minus_op_data
4197 int changed
, n_constants
, canonicalized
= 0;
4200 memset (ops
, 0, sizeof ops
);
4202 /* Set up the two operands and then expand them until nothing has been
4203 changed. If we run out of room in our array, give up; this should
4204 almost never happen. */
4209 ops
[1].neg
= (code
== MINUS
);
4216 for (i
= 0; i
< n_ops
; i
++)
4218 rtx this_op
= ops
[i
].op
;
4219 int this_neg
= ops
[i
].neg
;
4220 enum rtx_code this_code
= GET_CODE (this_op
);
4226 if (n_ops
== ARRAY_SIZE (ops
))
4229 ops
[n_ops
].op
= XEXP (this_op
, 1);
4230 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4233 ops
[i
].op
= XEXP (this_op
, 0);
4235 /* If this operand was negated then we will potentially
4236 canonicalize the expression. Similarly if we don't
4237 place the operands adjacent we're re-ordering the
4238 expression and thus might be performing a
4239 canonicalization. Ignore register re-ordering.
4240 ??? It might be better to shuffle the ops array here,
4241 but then (plus (plus (A, B), plus (C, D))) wouldn't
4242 be seen as non-canonical. */
4245 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4250 ops
[i
].op
= XEXP (this_op
, 0);
4251 ops
[i
].neg
= ! this_neg
;
4257 if (n_ops
!= ARRAY_SIZE (ops
)
4258 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4259 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4260 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4262 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4263 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4264 ops
[n_ops
].neg
= this_neg
;
4272 /* ~a -> (-a - 1) */
4273 if (n_ops
!= ARRAY_SIZE (ops
))
4275 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4276 ops
[n_ops
++].neg
= this_neg
;
4277 ops
[i
].op
= XEXP (this_op
, 0);
4278 ops
[i
].neg
= !this_neg
;
4288 ops
[i
].op
= neg_const_int (mode
, this_op
);
4302 if (n_constants
> 1)
4305 gcc_assert (n_ops
>= 2);
4307 /* If we only have two operands, we can avoid the loops. */
4310 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4313 /* Get the two operands. Be careful with the order, especially for
4314 the cases where code == MINUS. */
4315 if (ops
[0].neg
&& ops
[1].neg
)
4317 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4320 else if (ops
[0].neg
)
4331 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4334 /* Now simplify each pair of operands until nothing changes. */
4337 /* Insertion sort is good enough for a small array. */
4338 for (i
= 1; i
< n_ops
; i
++)
4340 struct simplify_plus_minus_op_data save
;
4344 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4347 /* Just swapping registers doesn't count as canonicalization. */
4353 ops
[j
+ 1] = ops
[j
];
4355 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4360 for (i
= n_ops
- 1; i
> 0; i
--)
4361 for (j
= i
- 1; j
>= 0; j
--)
4363 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4364 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4366 if (lhs
!= 0 && rhs
!= 0)
4368 enum rtx_code ncode
= PLUS
;
4374 std::swap (lhs
, rhs
);
4376 else if (swap_commutative_operands_p (lhs
, rhs
))
4377 std::swap (lhs
, rhs
);
4379 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4380 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4382 rtx tem_lhs
, tem_rhs
;
4384 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4385 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4386 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4389 if (tem
&& !CONSTANT_P (tem
))
4390 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4393 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4397 /* Reject "simplifications" that just wrap the two
4398 arguments in a CONST. Failure to do so can result
4399 in infinite recursion with simplify_binary_operation
4400 when it calls us to simplify CONST operations.
4401 Also, if we find such a simplification, don't try
4402 any more combinations with this rhs: We must have
4403 something like symbol+offset, ie. one of the
4404 trivial CONST expressions we handle later. */
4405 if (GET_CODE (tem
) == CONST
4406 && GET_CODE (XEXP (tem
, 0)) == ncode
4407 && XEXP (XEXP (tem
, 0), 0) == lhs
4408 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4411 if (GET_CODE (tem
) == NEG
)
4412 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4413 if (CONST_INT_P (tem
) && lneg
)
4414 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4418 ops
[j
].op
= NULL_RTX
;
4428 /* Pack all the operands to the lower-numbered entries. */
4429 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4438 /* If nothing changed, check that rematerialization of rtl instructions
4439 is still required. */
4442 /* Perform rematerialization if only all operands are registers and
4443 all operations are PLUS. */
4444 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4445 around rs6000 and how it uses the CA register. See PR67145. */
4446 for (i
= 0; i
< n_ops
; i
++)
4448 || !REG_P (ops
[i
].op
)
4449 || (REGNO (ops
[i
].op
) < FIRST_PSEUDO_REGISTER
4450 && fixed_regs
[REGNO (ops
[i
].op
)]
4451 && !global_regs
[REGNO (ops
[i
].op
)]
4452 && ops
[i
].op
!= frame_pointer_rtx
4453 && ops
[i
].op
!= arg_pointer_rtx
4454 && ops
[i
].op
!= stack_pointer_rtx
))
4459 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4461 && CONST_INT_P (ops
[1].op
)
4462 && CONSTANT_P (ops
[0].op
)
4464 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4466 /* We suppressed creation of trivial CONST expressions in the
4467 combination loop to avoid recursion. Create one manually now.
4468 The combination loop should have ensured that there is exactly
4469 one CONST_INT, and the sort will have ensured that it is last
4470 in the array and that any other constant will be next-to-last. */
4473 && CONST_INT_P (ops
[n_ops
- 1].op
)
4474 && CONSTANT_P (ops
[n_ops
- 2].op
))
4476 rtx value
= ops
[n_ops
- 1].op
;
4477 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4478 value
= neg_const_int (mode
, value
);
4479 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4484 /* Put a non-negated operand first, if possible. */
4486 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4489 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4498 /* Now make the result by performing the requested operations. */
4501 for (i
= 1; i
< n_ops
; i
++)
4502 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4503 mode
, result
, ops
[i
].op
);
4508 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4510 plus_minus_operand_p (const_rtx x
)
4512 return GET_CODE (x
) == PLUS
4513 || GET_CODE (x
) == MINUS
4514 || (GET_CODE (x
) == CONST
4515 && GET_CODE (XEXP (x
, 0)) == PLUS
4516 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4517 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4520 /* Like simplify_binary_operation except used for relational operators.
4521 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4522 not also be VOIDmode.
4524 CMP_MODE specifies in which mode the comparison is done in, so it is
4525 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4526 the operands or, if both are VOIDmode, the operands are compared in
4527 "infinite precision". */
4529 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4530 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4532 rtx tem
, trueop0
, trueop1
;
4534 if (cmp_mode
== VOIDmode
)
4535 cmp_mode
= GET_MODE (op0
);
4536 if (cmp_mode
== VOIDmode
)
4537 cmp_mode
= GET_MODE (op1
);
4539 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4542 if (SCALAR_FLOAT_MODE_P (mode
))
4544 if (tem
== const0_rtx
)
4545 return CONST0_RTX (mode
);
4546 #ifdef FLOAT_STORE_FLAG_VALUE
4548 REAL_VALUE_TYPE val
;
4549 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4550 return const_double_from_real_value (val
, mode
);
4556 if (VECTOR_MODE_P (mode
))
4558 if (tem
== const0_rtx
)
4559 return CONST0_RTX (mode
);
4560 #ifdef VECTOR_STORE_FLAG_VALUE
4565 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4566 if (val
== NULL_RTX
)
4568 if (val
== const1_rtx
)
4569 return CONST1_RTX (mode
);
4571 units
= GET_MODE_NUNITS (mode
);
4572 v
= rtvec_alloc (units
);
4573 for (i
= 0; i
< units
; i
++)
4574 RTVEC_ELT (v
, i
) = val
;
4575 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4585 /* For the following tests, ensure const0_rtx is op1. */
4586 if (swap_commutative_operands_p (op0
, op1
)
4587 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4588 std::swap (op0
, op1
), code
= swap_condition (code
);
4590 /* If op0 is a compare, extract the comparison arguments from it. */
4591 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4592 return simplify_gen_relational (code
, mode
, VOIDmode
,
4593 XEXP (op0
, 0), XEXP (op0
, 1));
4595 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4599 trueop0
= avoid_constant_pool_reference (op0
);
4600 trueop1
= avoid_constant_pool_reference (op1
);
4601 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4605 /* This part of simplify_relational_operation is only used when CMP_MODE
4606 is not in class MODE_CC (i.e. it is a real comparison).
4608 MODE is the mode of the result, while CMP_MODE specifies in which
4609 mode the comparison is done in, so it is the mode of the operands. */
4612 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4613 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4615 enum rtx_code op0code
= GET_CODE (op0
);
4617 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4619 /* If op0 is a comparison, extract the comparison arguments
4623 if (GET_MODE (op0
) == mode
)
4624 return simplify_rtx (op0
);
4626 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4627 XEXP (op0
, 0), XEXP (op0
, 1));
4629 else if (code
== EQ
)
4631 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4632 if (new_code
!= UNKNOWN
)
4633 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4634 XEXP (op0
, 0), XEXP (op0
, 1));
4638 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4639 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4640 if ((code
== LTU
|| code
== GEU
)
4641 && GET_CODE (op0
) == PLUS
4642 && CONST_INT_P (XEXP (op0
, 1))
4643 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4644 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4645 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4646 && XEXP (op0
, 1) != const0_rtx
)
4649 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4650 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4651 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4654 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4655 if ((code
== LTU
|| code
== GEU
)
4656 && GET_CODE (op0
) == PLUS
4657 && rtx_equal_p (op1
, XEXP (op0
, 1))
4658 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4659 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4660 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4661 copy_rtx (XEXP (op0
, 0)));
4663 if (op1
== const0_rtx
)
4665 /* Canonicalize (GTU x 0) as (NE x 0). */
4667 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4668 /* Canonicalize (LEU x 0) as (EQ x 0). */
4670 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4672 else if (op1
== const1_rtx
)
4677 /* Canonicalize (GE x 1) as (GT x 0). */
4678 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4681 /* Canonicalize (GEU x 1) as (NE x 0). */
4682 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4685 /* Canonicalize (LT x 1) as (LE x 0). */
4686 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4689 /* Canonicalize (LTU x 1) as (EQ x 0). */
4690 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4696 else if (op1
== constm1_rtx
)
4698 /* Canonicalize (LE x -1) as (LT x 0). */
4700 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4701 /* Canonicalize (GT x -1) as (GE x 0). */
4703 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4706 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4707 if ((code
== EQ
|| code
== NE
)
4708 && (op0code
== PLUS
|| op0code
== MINUS
)
4710 && CONSTANT_P (XEXP (op0
, 1))
4711 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4713 rtx x
= XEXP (op0
, 0);
4714 rtx c
= XEXP (op0
, 1);
4715 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4716 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4718 /* Detect an infinite recursive condition, where we oscillate at this
4719 simplification case between:
4720 A + B == C <---> C - B == A,
4721 where A, B, and C are all constants with non-simplifiable expressions,
4722 usually SYMBOL_REFs. */
4723 if (GET_CODE (tem
) == invcode
4725 && rtx_equal_p (c
, XEXP (tem
, 1)))
4728 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4731 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4732 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4734 && op1
== const0_rtx
4735 && GET_MODE_CLASS (mode
) == MODE_INT
4736 && cmp_mode
!= VOIDmode
4737 /* ??? Work-around BImode bugs in the ia64 backend. */
4739 && cmp_mode
!= BImode
4740 && nonzero_bits (op0
, cmp_mode
) == 1
4741 && STORE_FLAG_VALUE
== 1)
4742 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4743 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4744 : lowpart_subreg (mode
, op0
, cmp_mode
);
4746 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4747 if ((code
== EQ
|| code
== NE
)
4748 && op1
== const0_rtx
4750 return simplify_gen_relational (code
, mode
, cmp_mode
,
4751 XEXP (op0
, 0), XEXP (op0
, 1));
4753 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4754 if ((code
== EQ
|| code
== NE
)
4756 && rtx_equal_p (XEXP (op0
, 0), op1
)
4757 && !side_effects_p (XEXP (op0
, 0)))
4758 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4761 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4762 if ((code
== EQ
|| code
== NE
)
4764 && rtx_equal_p (XEXP (op0
, 1), op1
)
4765 && !side_effects_p (XEXP (op0
, 1)))
4766 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4769 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4770 if ((code
== EQ
|| code
== NE
)
4772 && CONST_SCALAR_INT_P (op1
)
4773 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4774 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4775 simplify_gen_binary (XOR
, cmp_mode
,
4776 XEXP (op0
, 1), op1
));
4778 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4779 can be implemented with a BICS instruction on some targets, or
4780 constant-folded if y is a constant. */
4781 if ((code
== EQ
|| code
== NE
)
4783 && rtx_equal_p (XEXP (op0
, 0), op1
)
4784 && !side_effects_p (op1
)
4785 && op1
!= CONST0_RTX (cmp_mode
))
4787 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4788 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
4790 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4791 CONST0_RTX (cmp_mode
));
4794 /* Likewise for (eq/ne (and x y) y). */
4795 if ((code
== EQ
|| code
== NE
)
4797 && rtx_equal_p (XEXP (op0
, 1), op1
)
4798 && !side_effects_p (op1
)
4799 && op1
!= CONST0_RTX (cmp_mode
))
4801 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0), cmp_mode
);
4802 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
4804 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4805 CONST0_RTX (cmp_mode
));
4808 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4809 if ((code
== EQ
|| code
== NE
)
4810 && GET_CODE (op0
) == BSWAP
4811 && CONST_SCALAR_INT_P (op1
))
4812 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4813 simplify_gen_unary (BSWAP
, cmp_mode
,
4816 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4817 if ((code
== EQ
|| code
== NE
)
4818 && GET_CODE (op0
) == BSWAP
4819 && GET_CODE (op1
) == BSWAP
)
4820 return simplify_gen_relational (code
, mode
, cmp_mode
,
4821 XEXP (op0
, 0), XEXP (op1
, 0));
4823 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4829 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4830 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4831 XEXP (op0
, 0), const0_rtx
);
4836 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4837 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4838 XEXP (op0
, 0), const0_rtx
);
4857 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4858 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4859 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4860 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4861 For floating-point comparisons, assume that the operands were ordered. */
4864 comparison_result (enum rtx_code code
, int known_results
)
4870 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4873 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4877 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4880 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4884 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4887 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4890 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4892 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4895 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4897 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4900 return const_true_rtx
;
4908 /* Check if the given comparison (done in the given MODE) is actually
4909 a tautology or a contradiction. If the mode is VOID_mode, the
4910 comparison is done in "infinite precision". If no simplification
4911 is possible, this function returns zero. Otherwise, it returns
4912 either const_true_rtx or const0_rtx. */
4915 simplify_const_relational_operation (enum rtx_code code
,
4923 gcc_assert (mode
!= VOIDmode
4924 || (GET_MODE (op0
) == VOIDmode
4925 && GET_MODE (op1
) == VOIDmode
));
4927 /* If op0 is a compare, extract the comparison arguments from it. */
4928 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4930 op1
= XEXP (op0
, 1);
4931 op0
= XEXP (op0
, 0);
4933 if (GET_MODE (op0
) != VOIDmode
)
4934 mode
= GET_MODE (op0
);
4935 else if (GET_MODE (op1
) != VOIDmode
)
4936 mode
= GET_MODE (op1
);
4941 /* We can't simplify MODE_CC values since we don't know what the
4942 actual comparison is. */
4943 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4946 /* Make sure the constant is second. */
4947 if (swap_commutative_operands_p (op0
, op1
))
4949 std::swap (op0
, op1
);
4950 code
= swap_condition (code
);
4953 trueop0
= avoid_constant_pool_reference (op0
);
4954 trueop1
= avoid_constant_pool_reference (op1
);
4956 /* For integer comparisons of A and B maybe we can simplify A - B and can
4957 then simplify a comparison of that with zero. If A and B are both either
4958 a register or a CONST_INT, this can't help; testing for these cases will
4959 prevent infinite recursion here and speed things up.
4961 We can only do this for EQ and NE comparisons as otherwise we may
4962 lose or introduce overflow which we cannot disregard as undefined as
4963 we do not know the signedness of the operation on either the left or
4964 the right hand side of the comparison. */
4966 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4967 && (code
== EQ
|| code
== NE
)
4968 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4969 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4970 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4971 /* We cannot do this if tem is a nonzero address. */
4972 && ! nonzero_address_p (tem
))
4973 return simplify_const_relational_operation (signed_condition (code
),
4974 mode
, tem
, const0_rtx
);
4976 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4977 return const_true_rtx
;
4979 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4982 /* For modes without NaNs, if the two operands are equal, we know the
4983 result except if they have side-effects. Even with NaNs we know
4984 the result of unordered comparisons and, if signaling NaNs are
4985 irrelevant, also the result of LT/GT/LTGT. */
4986 if ((! HONOR_NANS (trueop0
)
4987 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4988 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4989 && ! HONOR_SNANS (trueop0
)))
4990 && rtx_equal_p (trueop0
, trueop1
)
4991 && ! side_effects_p (trueop0
))
4992 return comparison_result (code
, CMP_EQ
);
4994 /* If the operands are floating-point constants, see if we can fold
4996 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4997 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4998 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5000 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
5001 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
5003 /* Comparisons are unordered iff at least one of the values is NaN. */
5004 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
5014 return const_true_rtx
;
5027 return comparison_result (code
,
5028 (real_equal (d0
, d1
) ? CMP_EQ
:
5029 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
5032 /* Otherwise, see if the operands are both integers. */
5033 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5034 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
5036 /* It would be nice if we really had a mode here. However, the
5037 largest int representable on the target is as good as
5039 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
5040 rtx_mode_t ptrueop0
= std::make_pair (trueop0
, cmode
);
5041 rtx_mode_t ptrueop1
= std::make_pair (trueop1
, cmode
);
5043 if (wi::eq_p (ptrueop0
, ptrueop1
))
5044 return comparison_result (code
, CMP_EQ
);
5047 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
5048 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
5049 return comparison_result (code
, cr
);
5053 /* Optimize comparisons with upper and lower bounds. */
5054 if (HWI_COMPUTABLE_MODE_P (mode
)
5055 && CONST_INT_P (trueop1
)
5056 && !side_effects_p (trueop0
))
5059 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
5060 HOST_WIDE_INT val
= INTVAL (trueop1
);
5061 HOST_WIDE_INT mmin
, mmax
;
5071 /* Get a reduced range if the sign bit is zero. */
5072 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
5079 rtx mmin_rtx
, mmax_rtx
;
5080 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
5082 mmin
= INTVAL (mmin_rtx
);
5083 mmax
= INTVAL (mmax_rtx
);
5086 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
5088 mmin
>>= (sign_copies
- 1);
5089 mmax
>>= (sign_copies
- 1);
5095 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5097 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5098 return const_true_rtx
;
5099 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5104 return const_true_rtx
;
5109 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5111 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5112 return const_true_rtx
;
5113 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5118 return const_true_rtx
;
5124 /* x == y is always false for y out of range. */
5125 if (val
< mmin
|| val
> mmax
)
5129 /* x > y is always false for y >= mmax, always true for y < mmin. */
5131 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5133 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5134 return const_true_rtx
;
5140 return const_true_rtx
;
5143 /* x < y is always false for y <= mmin, always true for y > mmax. */
5145 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5147 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5148 return const_true_rtx
;
5154 return const_true_rtx
;
5158 /* x != y is always true for y out of range. */
5159 if (val
< mmin
|| val
> mmax
)
5160 return const_true_rtx
;
5168 /* Optimize integer comparisons with zero. */
5169 if (trueop1
== const0_rtx
&& !side_effects_p (trueop0
))
5171 /* Some addresses are known to be nonzero. We don't know
5172 their sign, but equality comparisons are known. */
5173 if (nonzero_address_p (trueop0
))
5175 if (code
== EQ
|| code
== LEU
)
5177 if (code
== NE
|| code
== GTU
)
5178 return const_true_rtx
;
5181 /* See if the first operand is an IOR with a constant. If so, we
5182 may be able to determine the result of this comparison. */
5183 if (GET_CODE (op0
) == IOR
)
5185 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5186 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5188 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5189 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5190 && (UINTVAL (inner_const
)
5191 & ((unsigned HOST_WIDE_INT
) 1
5201 return const_true_rtx
;
5205 return const_true_rtx
;
5219 /* Optimize comparison of ABS with zero. */
5220 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5221 && (GET_CODE (trueop0
) == ABS
5222 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5223 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5228 /* Optimize abs(x) < 0.0. */
5229 if (!HONOR_SNANS (mode
)
5230 && (!INTEGRAL_MODE_P (mode
)
5231 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5233 if (INTEGRAL_MODE_P (mode
)
5234 && (issue_strict_overflow_warning
5235 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5236 warning (OPT_Wstrict_overflow
,
5237 ("assuming signed overflow does not occur when "
5238 "assuming abs (x) < 0 is false"));
5244 /* Optimize abs(x) >= 0.0. */
5245 if (!HONOR_NANS (mode
)
5246 && (!INTEGRAL_MODE_P (mode
)
5247 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5249 if (INTEGRAL_MODE_P (mode
)
5250 && (issue_strict_overflow_warning
5251 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5252 warning (OPT_Wstrict_overflow
,
5253 ("assuming signed overflow does not occur when "
5254 "assuming abs (x) >= 0 is true"));
5255 return const_true_rtx
;
5260 /* Optimize ! (abs(x) < 0.0). */
5261 return const_true_rtx
;
5271 /* Simplify CODE, an operation with result mode MODE and three operands,
5272 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5273 a constant. Return 0 if no simplifications is possible. */
5276 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5277 machine_mode op0_mode
, rtx op0
, rtx op1
,
5280 unsigned int width
= GET_MODE_PRECISION (mode
);
5281 bool any_change
= false;
5284 /* VOIDmode means "infinite" precision. */
5286 width
= HOST_BITS_PER_WIDE_INT
;
5291 /* Simplify negations around the multiplication. */
5292 /* -a * -b + c => a * b + c. */
5293 if (GET_CODE (op0
) == NEG
)
5295 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5297 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5299 else if (GET_CODE (op1
) == NEG
)
5301 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5303 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5306 /* Canonicalize the two multiplication operands. */
5307 /* a * -b + c => -b * a + c. */
5308 if (swap_commutative_operands_p (op0
, op1
))
5309 std::swap (op0
, op1
), any_change
= true;
5312 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5317 if (CONST_INT_P (op0
)
5318 && CONST_INT_P (op1
)
5319 && CONST_INT_P (op2
)
5320 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5321 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5323 /* Extracting a bit-field from a constant */
5324 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5325 HOST_WIDE_INT op1val
= INTVAL (op1
);
5326 HOST_WIDE_INT op2val
= INTVAL (op2
);
5327 if (BITS_BIG_ENDIAN
)
5328 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5332 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5334 /* First zero-extend. */
5335 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5336 /* If desired, propagate sign bit. */
5337 if (code
== SIGN_EXTRACT
5338 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5340 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5343 return gen_int_mode (val
, mode
);
5348 if (CONST_INT_P (op0
))
5349 return op0
!= const0_rtx
? op1
: op2
;
5351 /* Convert c ? a : a into "a". */
5352 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5355 /* Convert a != b ? a : b into "a". */
5356 if (GET_CODE (op0
) == NE
5357 && ! side_effects_p (op0
)
5358 && ! HONOR_NANS (mode
)
5359 && ! HONOR_SIGNED_ZEROS (mode
)
5360 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5361 && rtx_equal_p (XEXP (op0
, 1), op2
))
5362 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5363 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5366 /* Convert a == b ? a : b into "b". */
5367 if (GET_CODE (op0
) == EQ
5368 && ! side_effects_p (op0
)
5369 && ! HONOR_NANS (mode
)
5370 && ! HONOR_SIGNED_ZEROS (mode
)
5371 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5372 && rtx_equal_p (XEXP (op0
, 1), op2
))
5373 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5374 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5377 /* Convert (!c) != {0,...,0} ? a : b into
5378 c != {0,...,0} ? b : a for vector modes. */
5379 if (VECTOR_MODE_P (GET_MODE (op1
))
5380 && GET_CODE (op0
) == NE
5381 && GET_CODE (XEXP (op0
, 0)) == NOT
5382 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
5384 rtx cv
= XEXP (op0
, 1);
5385 int nunits
= CONST_VECTOR_NUNITS (cv
);
5387 for (int i
= 0; i
< nunits
; ++i
)
5388 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
5395 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
5396 XEXP (XEXP (op0
, 0), 0),
5398 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
5403 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5405 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5406 ? GET_MODE (XEXP (op0
, 1))
5407 : GET_MODE (XEXP (op0
, 0)));
5410 /* Look for happy constants in op1 and op2. */
5411 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5413 HOST_WIDE_INT t
= INTVAL (op1
);
5414 HOST_WIDE_INT f
= INTVAL (op2
);
5416 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5417 code
= GET_CODE (op0
);
5418 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5421 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5429 return simplify_gen_relational (code
, mode
, cmp_mode
,
5430 XEXP (op0
, 0), XEXP (op0
, 1));
5433 if (cmp_mode
== VOIDmode
)
5434 cmp_mode
= op0_mode
;
5435 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5436 cmp_mode
, XEXP (op0
, 0),
5439 /* See if any simplifications were possible. */
5442 if (CONST_INT_P (temp
))
5443 return temp
== const0_rtx
? op2
: op1
;
5445 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5451 gcc_assert (GET_MODE (op0
) == mode
);
5452 gcc_assert (GET_MODE (op1
) == mode
);
5453 gcc_assert (VECTOR_MODE_P (mode
));
5454 trueop2
= avoid_constant_pool_reference (op2
);
5455 if (CONST_INT_P (trueop2
))
5457 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
5458 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5459 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5460 unsigned HOST_WIDE_INT mask
;
5461 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5464 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5466 if (!(sel
& mask
) && !side_effects_p (op0
))
5468 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5471 rtx trueop0
= avoid_constant_pool_reference (op0
);
5472 rtx trueop1
= avoid_constant_pool_reference (op1
);
5473 if (GET_CODE (trueop0
) == CONST_VECTOR
5474 && GET_CODE (trueop1
) == CONST_VECTOR
)
5476 rtvec v
= rtvec_alloc (n_elts
);
5479 for (i
= 0; i
< n_elts
; i
++)
5480 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5481 ? CONST_VECTOR_ELT (trueop0
, i
)
5482 : CONST_VECTOR_ELT (trueop1
, i
));
5483 return gen_rtx_CONST_VECTOR (mode
, v
);
5486 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5487 if no element from a appears in the result. */
5488 if (GET_CODE (op0
) == VEC_MERGE
)
5490 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5491 if (CONST_INT_P (tem
))
5493 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5494 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5495 return simplify_gen_ternary (code
, mode
, mode
,
5496 XEXP (op0
, 1), op1
, op2
);
5497 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5498 return simplify_gen_ternary (code
, mode
, mode
,
5499 XEXP (op0
, 0), op1
, op2
);
5502 if (GET_CODE (op1
) == VEC_MERGE
)
5504 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5505 if (CONST_INT_P (tem
))
5507 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5508 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5509 return simplify_gen_ternary (code
, mode
, mode
,
5510 op0
, XEXP (op1
, 1), op2
);
5511 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5512 return simplify_gen_ternary (code
, mode
, mode
,
5513 op0
, XEXP (op1
, 0), op2
);
5517 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5519 if (GET_CODE (op0
) == VEC_DUPLICATE
5520 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5521 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5522 && mode_nunits
[GET_MODE (XEXP (op0
, 0))] == 1)
5524 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5525 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5527 if (XEXP (XEXP (op0
, 0), 0) == op1
5528 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5534 if (rtx_equal_p (op0
, op1
)
5535 && !side_effects_p (op2
) && !side_effects_p (op1
))
5547 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5548 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5549 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5551 Works by unpacking OP into a collection of 8-bit values
5552 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5553 and then repacking them again for OUTERMODE. */
5556 simplify_immed_subreg (machine_mode outermode
, rtx op
,
5557 machine_mode innermode
, unsigned int byte
)
5561 value_mask
= (1 << value_bit
) - 1
5563 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5572 rtvec result_v
= NULL
;
5573 enum mode_class outer_class
;
5574 machine_mode outer_submode
;
5577 /* Some ports misuse CCmode. */
5578 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5581 /* We have no way to represent a complex constant at the rtl level. */
5582 if (COMPLEX_MODE_P (outermode
))
5585 /* We support any size mode. */
5586 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5587 GET_MODE_BITSIZE (innermode
));
5589 /* Unpack the value. */
5591 if (GET_CODE (op
) == CONST_VECTOR
)
5593 num_elem
= CONST_VECTOR_NUNITS (op
);
5594 elems
= &CONST_VECTOR_ELT (op
, 0);
5595 elem_bitsize
= GET_MODE_UNIT_BITSIZE (innermode
);
5601 elem_bitsize
= max_bitsize
;
5603 /* If this asserts, it is too complicated; reducing value_bit may help. */
5604 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5605 /* I don't know how to handle endianness of sub-units. */
5606 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5608 for (elem
= 0; elem
< num_elem
; elem
++)
5611 rtx el
= elems
[elem
];
5613 /* Vectors are kept in target memory order. (This is probably
5616 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5617 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5619 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5620 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5621 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5622 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5623 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5626 switch (GET_CODE (el
))
5630 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5632 *vp
++ = INTVAL (el
) >> i
;
5633 /* CONST_INTs are always logically sign-extended. */
5634 for (; i
< elem_bitsize
; i
+= value_bit
)
5635 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5638 case CONST_WIDE_INT
:
5640 rtx_mode_t val
= std::make_pair (el
, innermode
);
5641 unsigned char extend
= wi::sign_mask (val
);
5643 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5644 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5645 for (; i
< elem_bitsize
; i
+= value_bit
)
5651 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5653 unsigned char extend
= 0;
5654 /* If this triggers, someone should have generated a
5655 CONST_INT instead. */
5656 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5658 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5659 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5660 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5663 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5667 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5669 for (; i
< elem_bitsize
; i
+= value_bit
)
5674 /* This is big enough for anything on the platform. */
5675 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5676 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5678 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5679 gcc_assert (bitsize
<= elem_bitsize
);
5680 gcc_assert (bitsize
% value_bit
== 0);
5682 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5685 /* real_to_target produces its result in words affected by
5686 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5687 and use WORDS_BIG_ENDIAN instead; see the documentation
5688 of SUBREG in rtl.texi. */
5689 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5692 if (WORDS_BIG_ENDIAN
)
5693 ibase
= bitsize
- 1 - i
;
5696 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5699 /* It shouldn't matter what's done here, so fill it with
5701 for (; i
< elem_bitsize
; i
+= value_bit
)
5707 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5709 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5710 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5714 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5715 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5716 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5718 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5719 >> (i
- HOST_BITS_PER_WIDE_INT
);
5720 for (; i
< elem_bitsize
; i
+= value_bit
)
5730 /* Now, pick the right byte to start with. */
5731 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5732 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5733 will already have offset 0. */
5734 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5736 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5738 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5739 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5740 byte
= (subword_byte
% UNITS_PER_WORD
5741 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5744 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5745 so if it's become negative it will instead be very large.) */
5746 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5748 /* Convert from bytes to chunks of size value_bit. */
5749 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5751 /* Re-pack the value. */
5752 num_elem
= GET_MODE_NUNITS (outermode
);
5754 if (VECTOR_MODE_P (outermode
))
5756 result_v
= rtvec_alloc (num_elem
);
5757 elems
= &RTVEC_ELT (result_v
, 0);
5762 outer_submode
= GET_MODE_INNER (outermode
);
5763 outer_class
= GET_MODE_CLASS (outer_submode
);
5764 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5766 gcc_assert (elem_bitsize
% value_bit
== 0);
5767 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5769 for (elem
= 0; elem
< num_elem
; elem
++)
5773 /* Vectors are stored in target memory order. (This is probably
5776 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5777 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5779 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5780 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5781 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5782 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5783 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5786 switch (outer_class
)
5789 case MODE_PARTIAL_INT
:
5794 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5795 / HOST_BITS_PER_WIDE_INT
;
5796 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5799 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
5801 for (u
= 0; u
< units
; u
++)
5803 unsigned HOST_WIDE_INT buf
= 0;
5805 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5807 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5810 base
+= HOST_BITS_PER_WIDE_INT
;
5812 r
= wide_int::from_array (tmp
, units
,
5813 GET_MODE_PRECISION (outer_submode
));
5814 #if TARGET_SUPPORTS_WIDE_INT == 0
5815 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5816 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
5819 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5824 case MODE_DECIMAL_FLOAT
:
5827 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5829 /* real_from_target wants its input in words affected by
5830 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5831 and use WORDS_BIG_ENDIAN instead; see the documentation
5832 of SUBREG in rtl.texi. */
5833 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5835 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5838 if (WORDS_BIG_ENDIAN
)
5839 ibase
= elem_bitsize
- 1 - i
;
5842 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5845 real_from_target (&r
, tmp
, outer_submode
);
5846 elems
[elem
] = const_double_from_real_value (r
, outer_submode
);
5858 f
.mode
= outer_submode
;
5861 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5863 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5864 for (; i
< elem_bitsize
; i
+= value_bit
)
5865 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5866 << (i
- HOST_BITS_PER_WIDE_INT
));
5868 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5876 if (VECTOR_MODE_P (outermode
))
5877 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5882 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5883 Return 0 if no simplifications are possible. */
5885 simplify_subreg (machine_mode outermode
, rtx op
,
5886 machine_mode innermode
, unsigned int byte
)
5888 /* Little bit of sanity checking. */
5889 gcc_assert (innermode
!= VOIDmode
);
5890 gcc_assert (outermode
!= VOIDmode
);
5891 gcc_assert (innermode
!= BLKmode
);
5892 gcc_assert (outermode
!= BLKmode
);
5894 gcc_assert (GET_MODE (op
) == innermode
5895 || GET_MODE (op
) == VOIDmode
);
5897 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5900 if (byte
>= GET_MODE_SIZE (innermode
))
5903 if (outermode
== innermode
&& !byte
)
5906 if (CONST_SCALAR_INT_P (op
)
5907 || CONST_DOUBLE_AS_FLOAT_P (op
)
5908 || GET_CODE (op
) == CONST_FIXED
5909 || GET_CODE (op
) == CONST_VECTOR
)
5910 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5912 /* Changing mode twice with SUBREG => just change it once,
5913 or not at all if changing back op starting mode. */
5914 if (GET_CODE (op
) == SUBREG
)
5916 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5917 int final_offset
= byte
+ SUBREG_BYTE (op
);
5920 if (outermode
== innermostmode
5921 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5922 return SUBREG_REG (op
);
5924 /* The SUBREG_BYTE represents offset, as if the value were stored
5925 in memory. Irritating exception is paradoxical subreg, where
5926 we define SUBREG_BYTE to be 0. On big endian machines, this
5927 value should be negative. For a moment, undo this exception. */
5928 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5930 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5931 if (WORDS_BIG_ENDIAN
)
5932 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5933 if (BYTES_BIG_ENDIAN
)
5934 final_offset
+= difference
% UNITS_PER_WORD
;
5936 if (SUBREG_BYTE (op
) == 0
5937 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5939 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5940 if (WORDS_BIG_ENDIAN
)
5941 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5942 if (BYTES_BIG_ENDIAN
)
5943 final_offset
+= difference
% UNITS_PER_WORD
;
5946 /* See whether resulting subreg will be paradoxical. */
5947 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5949 /* In nonparadoxical subregs we can't handle negative offsets. */
5950 if (final_offset
< 0)
5952 /* Bail out in case resulting subreg would be incorrect. */
5953 if (final_offset
% GET_MODE_SIZE (outermode
)
5954 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5960 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5962 /* In paradoxical subreg, see if we are still looking on lower part.
5963 If so, our SUBREG_BYTE will be 0. */
5964 if (WORDS_BIG_ENDIAN
)
5965 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5966 if (BYTES_BIG_ENDIAN
)
5967 offset
+= difference
% UNITS_PER_WORD
;
5968 if (offset
== final_offset
)
5974 /* Recurse for further possible simplifications. */
5975 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5979 if (validate_subreg (outermode
, innermostmode
,
5980 SUBREG_REG (op
), final_offset
))
5982 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5983 if (SUBREG_PROMOTED_VAR_P (op
)
5984 && SUBREG_PROMOTED_SIGN (op
) >= 0
5985 && GET_MODE_CLASS (outermode
) == MODE_INT
5986 && IN_RANGE (GET_MODE_SIZE (outermode
),
5987 GET_MODE_SIZE (innermode
),
5988 GET_MODE_SIZE (innermostmode
))
5989 && subreg_lowpart_p (newx
))
5991 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5992 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
5999 /* SUBREG of a hard register => just change the register number
6000 and/or mode. If the hard register is not valid in that mode,
6001 suppress this simplification. If the hard register is the stack,
6002 frame, or argument pointer, leave this as a SUBREG. */
6004 if (REG_P (op
) && HARD_REGISTER_P (op
))
6006 unsigned int regno
, final_regno
;
6009 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6010 if (HARD_REGISTER_NUM_P (final_regno
))
6013 int final_offset
= byte
;
6015 /* Adjust offset for paradoxical subregs. */
6017 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
6019 int difference
= (GET_MODE_SIZE (innermode
)
6020 - GET_MODE_SIZE (outermode
));
6021 if (WORDS_BIG_ENDIAN
)
6022 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6023 if (BYTES_BIG_ENDIAN
)
6024 final_offset
+= difference
% UNITS_PER_WORD
;
6027 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
6029 /* Propagate original regno. We don't have any way to specify
6030 the offset inside original regno, so do so only for lowpart.
6031 The information is used only by alias analysis that can not
6032 grog partial register anyway. */
6034 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
6035 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6040 /* If we have a SUBREG of a register that we are replacing and we are
6041 replacing it with a MEM, make a new MEM and try replacing the
6042 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6043 or if we would be widening it. */
6046 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6047 /* Allow splitting of volatile memory references in case we don't
6048 have instruction to move the whole thing. */
6049 && (! MEM_VOLATILE_P (op
)
6050 || ! have_insn_for (SET
, innermode
))
6051 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
6052 return adjust_address_nv (op
, outermode
, byte
);
6054 /* Handle complex values represented as CONCAT
6055 of real and imaginary part. */
6056 if (GET_CODE (op
) == CONCAT
)
6058 unsigned int part_size
, final_offset
;
6061 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
6062 if (byte
< part_size
)
6064 part
= XEXP (op
, 0);
6065 final_offset
= byte
;
6069 part
= XEXP (op
, 1);
6070 final_offset
= byte
- part_size
;
6073 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
6076 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
6079 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
6080 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6084 /* A SUBREG resulting from a zero extension may fold to zero if
6085 it extracts higher bits that the ZERO_EXTEND's source bits. */
6086 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6088 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6089 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
6090 return CONST0_RTX (outermode
);
6093 if (SCALAR_INT_MODE_P (outermode
)
6094 && SCALAR_INT_MODE_P (innermode
)
6095 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
6096 && byte
== subreg_lowpart_offset (outermode
, innermode
))
6098 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
6106 /* Make a SUBREG operation or equivalent if it folds. */
6109 simplify_gen_subreg (machine_mode outermode
, rtx op
,
6110 machine_mode innermode
, unsigned int byte
)
6114 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6118 if (GET_CODE (op
) == SUBREG
6119 || GET_CODE (op
) == CONCAT
6120 || GET_MODE (op
) == VOIDmode
)
6123 if (validate_subreg (outermode
, innermode
, op
, byte
))
6124 return gen_rtx_SUBREG (outermode
, op
, byte
);
6129 /* Generates a subreg to get the least significant part of EXPR (in mode
6130 INNER_MODE) to OUTER_MODE. */
6133 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
6134 machine_mode inner_mode
)
6136 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
6137 subreg_lowpart_offset (outer_mode
, inner_mode
));
6140 /* Simplify X, an rtx expression.
6142 Return the simplified expression or NULL if no simplifications
6145 This is the preferred entry point into the simplification routines;
6146 however, we still allow passes to call the more specific routines.
6148 Right now GCC has three (yes, three) major bodies of RTL simplification
6149 code that need to be unified.
6151 1. fold_rtx in cse.c. This code uses various CSE specific
6152 information to aid in RTL simplification.
6154 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6155 it uses combine specific information to aid in RTL
6158 3. The routines in this file.
6161 Long term we want to only have one body of simplification code; to
6162 get to that state I recommend the following steps:
6164 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6165 which are not pass dependent state into these routines.
6167 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6168 use this routine whenever possible.
6170 3. Allow for pass dependent state to be provided to these
6171 routines and add simplifications based on the pass dependent
6172 state. Remove code from cse.c & combine.c that becomes
6175 It will take time, but ultimately the compiler will be easier to
6176 maintain and improve. It's totally silly that when we add a
6177 simplification that it needs to be added to 4 places (3 for RTL
6178 simplification and 1 for tree simplification. */
6181 simplify_rtx (const_rtx x
)
6183 const enum rtx_code code
= GET_CODE (x
);
6184 const machine_mode mode
= GET_MODE (x
);
6186 switch (GET_RTX_CLASS (code
))
6189 return simplify_unary_operation (code
, mode
,
6190 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6191 case RTX_COMM_ARITH
:
6192 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6193 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6195 /* Fall through.... */
6198 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6201 case RTX_BITFIELD_OPS
:
6202 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6203 XEXP (x
, 0), XEXP (x
, 1),
6207 case RTX_COMM_COMPARE
:
6208 return simplify_relational_operation (code
, mode
,
6209 ((GET_MODE (XEXP (x
, 0))
6211 ? GET_MODE (XEXP (x
, 0))
6212 : GET_MODE (XEXP (x
, 1))),
6218 return simplify_subreg (mode
, SUBREG_REG (x
),
6219 GET_MODE (SUBREG_REG (x
)),
6226 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6227 if (GET_CODE (XEXP (x
, 0)) == HIGH
6228 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))