1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
36 #include "diagnostic-core.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
49 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
50 static bool plus_minus_operand_p (const_rtx
);
51 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
52 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
53 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
55 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
57 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
58 enum machine_mode
, rtx
, rtx
);
59 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
60 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
63 /* Negate a CONST_INT rtx, truncating (because a conversion from a
64 maximally negative number can overflow). */
66 neg_const_int (enum machine_mode mode
, const_rtx i
)
68 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
71 /* Test whether expression, X, is an immediate constant that represents
72 the most significant bit of machine mode MODE. */
75 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
77 unsigned HOST_WIDE_INT val
;
80 if (GET_MODE_CLASS (mode
) != MODE_INT
)
83 width
= GET_MODE_PRECISION (mode
);
87 if (width
<= HOST_BITS_PER_WIDE_INT
90 else if (width
<= HOST_BITS_PER_DOUBLE_INT
91 && CONST_DOUBLE_AS_INT_P (x
)
92 && CONST_DOUBLE_LOW (x
) == 0)
94 val
= CONST_DOUBLE_HIGH (x
);
95 width
-= HOST_BITS_PER_WIDE_INT
;
98 /* FIXME: We don't yet have a representation for wider modes. */
101 if (width
< HOST_BITS_PER_WIDE_INT
)
102 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
103 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
106 /* Test whether VAL is equal to the most significant bit of mode MODE
107 (after masking with the mode mask of MODE). Returns false if the
108 precision of MODE is too large to handle. */
111 val_signbit_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
115 if (GET_MODE_CLASS (mode
) != MODE_INT
)
118 width
= GET_MODE_PRECISION (mode
);
119 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
122 val
&= GET_MODE_MASK (mode
);
123 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
126 /* Test whether the most significant bit of mode MODE is set in VAL.
127 Returns false if the precision of MODE is too large to handle. */
129 val_signbit_known_set_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
133 if (GET_MODE_CLASS (mode
) != MODE_INT
)
136 width
= GET_MODE_PRECISION (mode
);
137 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
140 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
144 /* Test whether the most significant bit of mode MODE is clear in VAL.
145 Returns false if the precision of MODE is too large to handle. */
147 val_signbit_known_clear_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
151 if (GET_MODE_CLASS (mode
) != MODE_INT
)
154 width
= GET_MODE_PRECISION (mode
);
155 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
158 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
162 /* Make a binary operation by properly ordering the operands and
163 seeing if the expression folds. */
166 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
171 /* If this simplifies, do it. */
172 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
176 /* Put complex operands first and constants second if commutative. */
177 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
178 && swap_commutative_operands_p (op0
, op1
))
179 tem
= op0
, op0
= op1
, op1
= tem
;
181 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
184 /* If X is a MEM referencing the constant pool, return the real value.
185 Otherwise return X. */
187 avoid_constant_pool_reference (rtx x
)
190 enum machine_mode cmode
;
191 HOST_WIDE_INT offset
= 0;
193 switch (GET_CODE (x
))
199 /* Handle float extensions of constant pool references. */
201 c
= avoid_constant_pool_reference (tmp
);
202 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
206 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
207 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
215 if (GET_MODE (x
) == BLKmode
)
220 /* Call target hook to avoid the effects of -fpic etc.... */
221 addr
= targetm
.delegitimize_address (addr
);
223 /* Split the address into a base and integer offset. */
224 if (GET_CODE (addr
) == CONST
225 && GET_CODE (XEXP (addr
, 0)) == PLUS
226 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
228 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
229 addr
= XEXP (XEXP (addr
, 0), 0);
232 if (GET_CODE (addr
) == LO_SUM
)
233 addr
= XEXP (addr
, 1);
235 /* If this is a constant pool reference, we can turn it into its
236 constant and hope that simplifications happen. */
237 if (GET_CODE (addr
) == SYMBOL_REF
238 && CONSTANT_POOL_ADDRESS_P (addr
))
240 c
= get_pool_constant (addr
);
241 cmode
= get_pool_mode (addr
);
243 /* If we're accessing the constant in a different mode than it was
244 originally stored, attempt to fix that up via subreg simplifications.
245 If that fails we have no choice but to return the original memory. */
246 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
247 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
249 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
250 if (tem
&& CONSTANT_P (tem
))
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
265 delegitimize_mem_from_attrs (rtx x
)
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
271 && MEM_OFFSET_KNOWN_P (x
))
273 tree decl
= MEM_EXPR (x
);
274 enum machine_mode mode
= GET_MODE (x
);
275 HOST_WIDE_INT offset
= 0;
277 switch (TREE_CODE (decl
))
287 case ARRAY_RANGE_REF
:
292 case VIEW_CONVERT_EXPR
:
294 HOST_WIDE_INT bitsize
, bitpos
;
296 int unsignedp
, volatilep
= 0;
298 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
299 &mode
, &unsignedp
, &volatilep
, false);
300 if (bitsize
!= GET_MODE_BITSIZE (mode
)
301 || (bitpos
% BITS_PER_UNIT
)
302 || (toffset
&& !tree_fits_shwi_p (toffset
)))
306 offset
+= bitpos
/ BITS_PER_UNIT
;
308 offset
+= tree_to_shwi (toffset
);
315 && mode
== GET_MODE (x
)
316 && TREE_CODE (decl
) == VAR_DECL
317 && (TREE_STATIC (decl
)
318 || DECL_THREAD_LOCAL_P (decl
))
319 && DECL_RTL_SET_P (decl
)
320 && MEM_P (DECL_RTL (decl
)))
324 offset
+= MEM_OFFSET (x
);
326 newx
= DECL_RTL (decl
);
330 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
339 || (GET_CODE (o
) == PLUS
340 && GET_CODE (XEXP (o
, 1)) == CONST_INT
341 && (offset
== INTVAL (XEXP (o
, 1))
342 || (GET_CODE (n
) == PLUS
343 && GET_CODE (XEXP (n
, 1)) == CONST_INT
344 && (INTVAL (XEXP (n
, 1)) + offset
345 == INTVAL (XEXP (o
, 1)))
346 && (n
= XEXP (n
, 0))))
347 && (o
= XEXP (o
, 0))))
348 && rtx_equal_p (o
, n
)))
349 x
= adjust_address_nv (newx
, mode
, offset
);
351 else if (GET_MODE (x
) == GET_MODE (newx
)
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
364 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
365 enum machine_mode op_mode
)
369 /* If this simplifies, use it. */
370 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
373 return gen_rtx_fmt_e (code
, mode
, op
);
376 /* Likewise for ternary operations. */
379 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
380 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
384 /* If this simplifies, use it. */
385 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
389 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
396 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
397 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
401 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
405 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
414 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
415 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
417 enum rtx_code code
= GET_CODE (x
);
418 enum machine_mode mode
= GET_MODE (x
);
419 enum machine_mode op_mode
;
421 rtx op0
, op1
, op2
, newx
, op
;
425 if (__builtin_expect (fn
!= NULL
, 0))
427 newx
= fn (x
, old_rtx
, data
);
431 else if (rtx_equal_p (x
, old_rtx
))
432 return copy_rtx ((rtx
) data
);
434 switch (GET_RTX_CLASS (code
))
438 op_mode
= GET_MODE (op0
);
439 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
440 if (op0
== XEXP (x
, 0))
442 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
446 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
447 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
448 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
450 return simplify_gen_binary (code
, mode
, op0
, op1
);
453 case RTX_COMM_COMPARE
:
456 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
457 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
458 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
459 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
461 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
464 case RTX_BITFIELD_OPS
:
466 op_mode
= GET_MODE (op0
);
467 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
468 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
469 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
470 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
472 if (op_mode
== VOIDmode
)
473 op_mode
= GET_MODE (op0
);
474 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
479 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
480 if (op0
== SUBREG_REG (x
))
482 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
483 GET_MODE (SUBREG_REG (x
)),
485 return op0
? op0
: x
;
492 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
493 if (op0
== XEXP (x
, 0))
495 return replace_equiv_address_nv (x
, op0
);
497 else if (code
== LO_SUM
)
499 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
500 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
506 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
508 return gen_rtx_LO_SUM (mode
, op0
, op1
);
517 fmt
= GET_RTX_FORMAT (code
);
518 for (i
= 0; fmt
[i
]; i
++)
523 newvec
= XVEC (newx
, i
);
524 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
526 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
528 if (op
!= RTVEC_ELT (vec
, j
))
532 newvec
= shallow_copy_rtvec (vec
);
534 newx
= shallow_copy_rtx (x
);
535 XVEC (newx
, i
) = newvec
;
537 RTVEC_ELT (newvec
, j
) = op
;
545 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
546 if (op
!= XEXP (x
, i
))
549 newx
= shallow_copy_rtx (x
);
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
562 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
564 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
567 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
568 Only handle cases where the truncated value is inherently an rvalue.
570 RTL provides two ways of truncating a value:
572 1. a lowpart subreg. This form is only a truncation when both
573 the outer and inner modes (here MODE and OP_MODE respectively)
574 are scalar integers, and only then when the subreg is used as
577 It is only valid to form such truncating subregs if the
578 truncation requires no action by the target. The onus for
579 proving this is on the creator of the subreg -- e.g. the
580 caller to simplify_subreg or simplify_gen_subreg -- and typically
581 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
583 2. a TRUNCATE. This form handles both scalar and compound integers.
585 The first form is preferred where valid. However, the TRUNCATE
586 handling in simplify_unary_operation turns the second form into the
587 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
588 so it is generally safe to form rvalue truncations using:
590 simplify_gen_unary (TRUNCATE, ...)
592 and leave simplify_unary_operation to work out which representation
595 Because of the proof requirements on (1), simplify_truncation must
596 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
597 regardless of whether the outer truncation came from a SUBREG or a
598 TRUNCATE. For example, if the caller has proven that an SImode
603 is a no-op and can be represented as a subreg, it does not follow
604 that SImode truncations of X and Y are also no-ops. On a target
605 like 64-bit MIPS that requires SImode values to be stored in
606 sign-extended form, an SImode truncation of:
608 (and:DI (reg:DI X) (const_int 63))
610 is trivially a no-op because only the lower 6 bits can be set.
611 However, X is still an arbitrary 64-bit number and so we cannot
612 assume that truncating it too is a no-op. */
615 simplify_truncation (enum machine_mode mode
, rtx op
,
616 enum machine_mode op_mode
)
618 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
619 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
620 gcc_assert (precision
<= op_precision
);
622 /* Optimize truncations of zero and sign extended values. */
623 if (GET_CODE (op
) == ZERO_EXTEND
624 || GET_CODE (op
) == SIGN_EXTEND
)
626 /* There are three possibilities. If MODE is the same as the
627 origmode, we can omit both the extension and the subreg.
628 If MODE is not larger than the origmode, we can apply the
629 truncation without the extension. Finally, if the outermode
630 is larger than the origmode, we can just extend to the appropriate
632 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
633 if (mode
== origmode
)
635 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
636 return simplify_gen_unary (TRUNCATE
, mode
,
637 XEXP (op
, 0), origmode
);
639 return simplify_gen_unary (GET_CODE (op
), mode
,
640 XEXP (op
, 0), origmode
);
643 /* If the machine can perform operations in the truncated mode, distribute
644 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
645 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
647 #ifdef WORD_REGISTER_OPERATIONS
648 && precision
>= BITS_PER_WORD
650 && (GET_CODE (op
) == PLUS
651 || GET_CODE (op
) == MINUS
652 || GET_CODE (op
) == MULT
))
654 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
657 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
659 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
663 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
664 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
665 the outer subreg is effectively a truncation to the original mode. */
666 if ((GET_CODE (op
) == LSHIFTRT
667 || GET_CODE (op
) == ASHIFTRT
)
668 /* Ensure that OP_MODE is at least twice as wide as MODE
669 to avoid the possibility that an outer LSHIFTRT shifts by more
670 than the sign extension's sign_bit_copies and introduces zeros
671 into the high bits of the result. */
672 && 2 * precision
<= op_precision
673 && CONST_INT_P (XEXP (op
, 1))
674 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
675 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
676 && UINTVAL (XEXP (op
, 1)) < precision
)
677 return simplify_gen_binary (ASHIFTRT
, mode
,
678 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
680 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
681 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
682 the outer subreg is effectively a truncation to the original mode. */
683 if ((GET_CODE (op
) == LSHIFTRT
684 || GET_CODE (op
) == ASHIFTRT
)
685 && CONST_INT_P (XEXP (op
, 1))
686 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
687 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
688 && UINTVAL (XEXP (op
, 1)) < precision
)
689 return simplify_gen_binary (LSHIFTRT
, mode
,
690 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
692 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
693 to (ashift:QI (x:QI) C), where C is a suitable small constant and
694 the outer subreg is effectively a truncation to the original mode. */
695 if (GET_CODE (op
) == ASHIFT
696 && CONST_INT_P (XEXP (op
, 1))
697 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
698 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
699 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
700 && UINTVAL (XEXP (op
, 1)) < precision
)
701 return simplify_gen_binary (ASHIFT
, mode
,
702 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
704 /* Recognize a word extraction from a multi-word subreg. */
705 if ((GET_CODE (op
) == LSHIFTRT
706 || GET_CODE (op
) == ASHIFTRT
)
707 && SCALAR_INT_MODE_P (mode
)
708 && SCALAR_INT_MODE_P (op_mode
)
709 && precision
>= BITS_PER_WORD
710 && 2 * precision
<= op_precision
711 && CONST_INT_P (XEXP (op
, 1))
712 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
713 && UINTVAL (XEXP (op
, 1)) < op_precision
)
715 int byte
= subreg_lowpart_offset (mode
, op_mode
);
716 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
717 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
719 ? byte
- shifted_bytes
720 : byte
+ shifted_bytes
));
723 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
724 and try replacing the TRUNCATE and shift with it. Don't do this
725 if the MEM has a mode-dependent address. */
726 if ((GET_CODE (op
) == LSHIFTRT
727 || GET_CODE (op
) == ASHIFTRT
)
728 && SCALAR_INT_MODE_P (op_mode
)
729 && MEM_P (XEXP (op
, 0))
730 && CONST_INT_P (XEXP (op
, 1))
731 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
732 && INTVAL (XEXP (op
, 1)) > 0
733 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
734 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
735 MEM_ADDR_SPACE (XEXP (op
, 0)))
736 && ! MEM_VOLATILE_P (XEXP (op
, 0))
737 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
738 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
740 int byte
= subreg_lowpart_offset (mode
, op_mode
);
741 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
742 return adjust_address_nv (XEXP (op
, 0), mode
,
744 ? byte
- shifted_bytes
745 : byte
+ shifted_bytes
));
748 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
749 (OP:SI foo:SI) if OP is NEG or ABS. */
750 if ((GET_CODE (op
) == ABS
751 || GET_CODE (op
) == NEG
)
752 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
753 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
754 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
755 return simplify_gen_unary (GET_CODE (op
), mode
,
756 XEXP (XEXP (op
, 0), 0), mode
);
758 /* (truncate:A (subreg:B (truncate:C X) 0)) is
760 if (GET_CODE (op
) == SUBREG
761 && SCALAR_INT_MODE_P (mode
)
762 && SCALAR_INT_MODE_P (op_mode
)
763 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
764 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
765 && subreg_lowpart_p (op
))
767 rtx inner
= XEXP (SUBREG_REG (op
), 0);
768 if (GET_MODE_PRECISION (mode
)
769 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
770 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
772 /* If subreg above is paradoxical and C is narrower
773 than A, return (subreg:A (truncate:C X) 0). */
774 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
775 GET_MODE (SUBREG_REG (op
)), 0);
778 /* (truncate:A (truncate:B X)) is (truncate:A X). */
779 if (GET_CODE (op
) == TRUNCATE
)
780 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
781 GET_MODE (XEXP (op
, 0)));
786 /* Try to simplify a unary operation CODE whose output mode is to be
787 MODE with input operand OP whose mode was originally OP_MODE.
788 Return zero if no simplification can be made. */
790 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
791 rtx op
, enum machine_mode op_mode
)
795 trueop
= avoid_constant_pool_reference (op
);
797 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
801 return simplify_unary_operation_1 (code
, mode
, op
);
804 /* Perform some simplifications we can do even if the operands
807 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
809 enum rtx_code reversed
;
815 /* (not (not X)) == X. */
816 if (GET_CODE (op
) == NOT
)
819 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
820 comparison is all ones. */
821 if (COMPARISON_P (op
)
822 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
823 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
824 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
825 XEXP (op
, 0), XEXP (op
, 1));
827 /* (not (plus X -1)) can become (neg X). */
828 if (GET_CODE (op
) == PLUS
829 && XEXP (op
, 1) == constm1_rtx
)
830 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
832 /* Similarly, (not (neg X)) is (plus X -1). */
833 if (GET_CODE (op
) == NEG
)
834 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
837 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
838 if (GET_CODE (op
) == XOR
839 && CONST_INT_P (XEXP (op
, 1))
840 && (temp
= simplify_unary_operation (NOT
, mode
,
841 XEXP (op
, 1), mode
)) != 0)
842 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
844 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
845 if (GET_CODE (op
) == PLUS
846 && CONST_INT_P (XEXP (op
, 1))
847 && mode_signbit_p (mode
, XEXP (op
, 1))
848 && (temp
= simplify_unary_operation (NOT
, mode
,
849 XEXP (op
, 1), mode
)) != 0)
850 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
853 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
854 operands other than 1, but that is not valid. We could do a
855 similar simplification for (not (lshiftrt C X)) where C is
856 just the sign bit, but this doesn't seem common enough to
858 if (GET_CODE (op
) == ASHIFT
859 && XEXP (op
, 0) == const1_rtx
)
861 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
862 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
865 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
866 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
867 so we can perform the above simplification. */
868 if (STORE_FLAG_VALUE
== -1
869 && GET_CODE (op
) == ASHIFTRT
870 && GET_CODE (XEXP (op
, 1))
871 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
872 return simplify_gen_relational (GE
, mode
, VOIDmode
,
873 XEXP (op
, 0), const0_rtx
);
876 if (GET_CODE (op
) == SUBREG
877 && subreg_lowpart_p (op
)
878 && (GET_MODE_SIZE (GET_MODE (op
))
879 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
880 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
881 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
883 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
886 x
= gen_rtx_ROTATE (inner_mode
,
887 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
889 XEXP (SUBREG_REG (op
), 1));
890 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
895 /* Apply De Morgan's laws to reduce number of patterns for machines
896 with negating logical insns (and-not, nand, etc.). If result has
897 only one NOT, put it first, since that is how the patterns are
899 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
901 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
902 enum machine_mode op_mode
;
904 op_mode
= GET_MODE (in1
);
905 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
907 op_mode
= GET_MODE (in2
);
908 if (op_mode
== VOIDmode
)
910 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
912 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
915 in2
= in1
; in1
= tem
;
918 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
922 /* (not (bswap x)) -> (bswap (not x)). */
923 if (GET_CODE (op
) == BSWAP
)
925 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
926 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
931 /* (neg (neg X)) == X. */
932 if (GET_CODE (op
) == NEG
)
935 /* (neg (plus X 1)) can become (not X). */
936 if (GET_CODE (op
) == PLUS
937 && XEXP (op
, 1) == const1_rtx
)
938 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
940 /* Similarly, (neg (not X)) is (plus X 1). */
941 if (GET_CODE (op
) == NOT
)
942 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
945 /* (neg (minus X Y)) can become (minus Y X). This transformation
946 isn't safe for modes with signed zeros, since if X and Y are
947 both +0, (minus Y X) is the same as (minus X Y). If the
948 rounding mode is towards +infinity (or -infinity) then the two
949 expressions will be rounded differently. */
950 if (GET_CODE (op
) == MINUS
951 && !HONOR_SIGNED_ZEROS (mode
)
952 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
953 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
955 if (GET_CODE (op
) == PLUS
956 && !HONOR_SIGNED_ZEROS (mode
)
957 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
959 /* (neg (plus A C)) is simplified to (minus -C A). */
960 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
961 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
963 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
965 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
968 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
969 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
970 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
973 /* (neg (mult A B)) becomes (mult A (neg B)).
974 This works even for floating-point values. */
975 if (GET_CODE (op
) == MULT
976 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
978 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
979 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
982 /* NEG commutes with ASHIFT since it is multiplication. Only do
983 this if we can then eliminate the NEG (e.g., if the operand
985 if (GET_CODE (op
) == ASHIFT
)
987 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
989 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
992 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
993 C is equal to the width of MODE minus 1. */
994 if (GET_CODE (op
) == ASHIFTRT
995 && CONST_INT_P (XEXP (op
, 1))
996 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
997 return simplify_gen_binary (LSHIFTRT
, mode
,
998 XEXP (op
, 0), XEXP (op
, 1));
1000 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1001 C is equal to the width of MODE minus 1. */
1002 if (GET_CODE (op
) == LSHIFTRT
1003 && CONST_INT_P (XEXP (op
, 1))
1004 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1005 return simplify_gen_binary (ASHIFTRT
, mode
,
1006 XEXP (op
, 0), XEXP (op
, 1));
1008 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1009 if (GET_CODE (op
) == XOR
1010 && XEXP (op
, 1) == const1_rtx
1011 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1012 return plus_constant (mode
, XEXP (op
, 0), -1);
1014 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1015 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1016 if (GET_CODE (op
) == LT
1017 && XEXP (op
, 1) == const0_rtx
1018 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1020 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
1021 int isize
= GET_MODE_PRECISION (inner
);
1022 if (STORE_FLAG_VALUE
== 1)
1024 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1025 GEN_INT (isize
- 1));
1028 if (GET_MODE_PRECISION (mode
) > isize
)
1029 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1030 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1032 else if (STORE_FLAG_VALUE
== -1)
1034 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1035 GEN_INT (isize
- 1));
1038 if (GET_MODE_PRECISION (mode
) > isize
)
1039 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1040 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1046 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1047 with the umulXi3_highpart patterns. */
1048 if (GET_CODE (op
) == LSHIFTRT
1049 && GET_CODE (XEXP (op
, 0)) == MULT
)
1052 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1054 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1056 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1060 /* We can't handle truncation to a partial integer mode here
1061 because we don't know the real bitsize of the partial
1066 if (GET_MODE (op
) != VOIDmode
)
1068 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1073 /* If we know that the value is already truncated, we can
1074 replace the TRUNCATE with a SUBREG. */
1075 if (GET_MODE_NUNITS (mode
) == 1
1076 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1077 || truncated_to_mode (mode
, op
)))
1079 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1084 /* A truncate of a comparison can be replaced with a subreg if
1085 STORE_FLAG_VALUE permits. This is like the previous test,
1086 but it works even if the comparison is done in a mode larger
1087 than HOST_BITS_PER_WIDE_INT. */
1088 if (HWI_COMPUTABLE_MODE_P (mode
)
1089 && COMPARISON_P (op
)
1090 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1092 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1097 /* A truncate of a memory is just loading the low part of the memory
1098 if we are not changing the meaning of the address. */
1099 if (GET_CODE (op
) == MEM
1100 && !VECTOR_MODE_P (mode
)
1101 && !MEM_VOLATILE_P (op
)
1102 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1104 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1111 case FLOAT_TRUNCATE
:
1112 if (DECIMAL_FLOAT_MODE_P (mode
))
1115 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1116 if (GET_CODE (op
) == FLOAT_EXTEND
1117 && GET_MODE (XEXP (op
, 0)) == mode
)
1118 return XEXP (op
, 0);
1120 /* (float_truncate:SF (float_truncate:DF foo:XF))
1121 = (float_truncate:SF foo:XF).
1122 This may eliminate double rounding, so it is unsafe.
1124 (float_truncate:SF (float_extend:XF foo:DF))
1125 = (float_truncate:SF foo:DF).
1127 (float_truncate:DF (float_extend:XF foo:SF))
1128 = (float_extend:SF foo:DF). */
1129 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1130 && flag_unsafe_math_optimizations
)
1131 || GET_CODE (op
) == FLOAT_EXTEND
)
1132 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1134 > GET_MODE_SIZE (mode
)
1135 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1137 XEXP (op
, 0), mode
);
1139 /* (float_truncate (float x)) is (float x) */
1140 if (GET_CODE (op
) == FLOAT
1141 && (flag_unsafe_math_optimizations
1142 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1143 && ((unsigned)significand_size (GET_MODE (op
))
1144 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1145 - num_sign_bit_copies (XEXP (op
, 0),
1146 GET_MODE (XEXP (op
, 0))))))))
1147 return simplify_gen_unary (FLOAT
, mode
,
1149 GET_MODE (XEXP (op
, 0)));
1151 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1152 (OP:SF foo:SF) if OP is NEG or ABS. */
1153 if ((GET_CODE (op
) == ABS
1154 || GET_CODE (op
) == NEG
)
1155 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1156 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1157 return simplify_gen_unary (GET_CODE (op
), mode
,
1158 XEXP (XEXP (op
, 0), 0), mode
);
1160 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1161 is (float_truncate:SF x). */
1162 if (GET_CODE (op
) == SUBREG
1163 && subreg_lowpart_p (op
)
1164 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1165 return SUBREG_REG (op
);
1169 if (DECIMAL_FLOAT_MODE_P (mode
))
1172 /* (float_extend (float_extend x)) is (float_extend x)
1174 (float_extend (float x)) is (float x) assuming that double
1175 rounding can't happen.
1177 if (GET_CODE (op
) == FLOAT_EXTEND
1178 || (GET_CODE (op
) == FLOAT
1179 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1180 && ((unsigned)significand_size (GET_MODE (op
))
1181 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1182 - num_sign_bit_copies (XEXP (op
, 0),
1183 GET_MODE (XEXP (op
, 0)))))))
1184 return simplify_gen_unary (GET_CODE (op
), mode
,
1186 GET_MODE (XEXP (op
, 0)));
1191 /* (abs (neg <foo>)) -> (abs <foo>) */
1192 if (GET_CODE (op
) == NEG
)
1193 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1194 GET_MODE (XEXP (op
, 0)));
1196 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1198 if (GET_MODE (op
) == VOIDmode
)
1201 /* If operand is something known to be positive, ignore the ABS. */
1202 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1203 || val_signbit_known_clear_p (GET_MODE (op
),
1204 nonzero_bits (op
, GET_MODE (op
))))
1207 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1208 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1209 return gen_rtx_NEG (mode
, op
);
1214 /* (ffs (*_extend <X>)) = (ffs <X>) */
1215 if (GET_CODE (op
) == SIGN_EXTEND
1216 || GET_CODE (op
) == ZERO_EXTEND
)
1217 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1218 GET_MODE (XEXP (op
, 0)));
1222 switch (GET_CODE (op
))
1226 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1227 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1228 GET_MODE (XEXP (op
, 0)));
1232 /* Rotations don't affect popcount. */
1233 if (!side_effects_p (XEXP (op
, 1)))
1234 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1235 GET_MODE (XEXP (op
, 0)));
1244 switch (GET_CODE (op
))
1250 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1251 GET_MODE (XEXP (op
, 0)));
1255 /* Rotations don't affect parity. */
1256 if (!side_effects_p (XEXP (op
, 1)))
1257 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1258 GET_MODE (XEXP (op
, 0)));
1267 /* (bswap (bswap x)) -> x. */
1268 if (GET_CODE (op
) == BSWAP
)
1269 return XEXP (op
, 0);
1273 /* (float (sign_extend <X>)) = (float <X>). */
1274 if (GET_CODE (op
) == SIGN_EXTEND
)
1275 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1276 GET_MODE (XEXP (op
, 0)));
1280 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1281 becomes just the MINUS if its mode is MODE. This allows
1282 folding switch statements on machines using casesi (such as
1284 if (GET_CODE (op
) == TRUNCATE
1285 && GET_MODE (XEXP (op
, 0)) == mode
1286 && GET_CODE (XEXP (op
, 0)) == MINUS
1287 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1288 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1289 return XEXP (op
, 0);
1291 /* Extending a widening multiplication should be canonicalized to
1292 a wider widening multiplication. */
1293 if (GET_CODE (op
) == MULT
)
1295 rtx lhs
= XEXP (op
, 0);
1296 rtx rhs
= XEXP (op
, 1);
1297 enum rtx_code lcode
= GET_CODE (lhs
);
1298 enum rtx_code rcode
= GET_CODE (rhs
);
1300 /* Widening multiplies usually extend both operands, but sometimes
1301 they use a shift to extract a portion of a register. */
1302 if ((lcode
== SIGN_EXTEND
1303 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1304 && (rcode
== SIGN_EXTEND
1305 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1307 enum machine_mode lmode
= GET_MODE (lhs
);
1308 enum machine_mode rmode
= GET_MODE (rhs
);
1311 if (lcode
== ASHIFTRT
)
1312 /* Number of bits not shifted off the end. */
1313 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1314 else /* lcode == SIGN_EXTEND */
1315 /* Size of inner mode. */
1316 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1318 if (rcode
== ASHIFTRT
)
1319 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1320 else /* rcode == SIGN_EXTEND */
1321 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1323 /* We can only widen multiplies if the result is mathematiclly
1324 equivalent. I.e. if overflow was impossible. */
1325 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1326 return simplify_gen_binary
1328 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1329 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1333 /* Check for a sign extension of a subreg of a promoted
1334 variable, where the promotion is sign-extended, and the
1335 target mode is the same as the variable's promotion. */
1336 if (GET_CODE (op
) == SUBREG
1337 && SUBREG_PROMOTED_VAR_P (op
)
1338 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1339 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1341 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1346 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1347 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1348 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1350 gcc_assert (GET_MODE_BITSIZE (mode
)
1351 > GET_MODE_BITSIZE (GET_MODE (op
)));
1352 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1353 GET_MODE (XEXP (op
, 0)));
1356 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1357 is (sign_extend:M (subreg:O <X>)) if there is mode with
1358 GET_MODE_BITSIZE (N) - I bits.
1359 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1360 is similarly (zero_extend:M (subreg:O <X>)). */
1361 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1362 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1363 && CONST_INT_P (XEXP (op
, 1))
1364 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1365 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1367 enum machine_mode tmode
1368 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1369 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1370 gcc_assert (GET_MODE_BITSIZE (mode
)
1371 > GET_MODE_BITSIZE (GET_MODE (op
)));
1372 if (tmode
!= BLKmode
)
1375 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1377 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1378 ? SIGN_EXTEND
: ZERO_EXTEND
,
1379 mode
, inner
, tmode
);
1383 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1384 /* As we do not know which address space the pointer is referring to,
1385 we can do this only if the target does not support different pointer
1386 or address modes depending on the address space. */
1387 if (target_default_pointer_address_modes_p ()
1388 && ! POINTERS_EXTEND_UNSIGNED
1389 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1391 || (GET_CODE (op
) == SUBREG
1392 && REG_P (SUBREG_REG (op
))
1393 && REG_POINTER (SUBREG_REG (op
))
1394 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1395 return convert_memory_address (Pmode
, op
);
1400 /* Check for a zero extension of a subreg of a promoted
1401 variable, where the promotion is zero-extended, and the
1402 target mode is the same as the variable's promotion. */
1403 if (GET_CODE (op
) == SUBREG
1404 && SUBREG_PROMOTED_VAR_P (op
)
1405 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1406 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1408 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1413 /* Extending a widening multiplication should be canonicalized to
1414 a wider widening multiplication. */
1415 if (GET_CODE (op
) == MULT
)
1417 rtx lhs
= XEXP (op
, 0);
1418 rtx rhs
= XEXP (op
, 1);
1419 enum rtx_code lcode
= GET_CODE (lhs
);
1420 enum rtx_code rcode
= GET_CODE (rhs
);
1422 /* Widening multiplies usually extend both operands, but sometimes
1423 they use a shift to extract a portion of a register. */
1424 if ((lcode
== ZERO_EXTEND
1425 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1426 && (rcode
== ZERO_EXTEND
1427 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1429 enum machine_mode lmode
= GET_MODE (lhs
);
1430 enum machine_mode rmode
= GET_MODE (rhs
);
1433 if (lcode
== LSHIFTRT
)
1434 /* Number of bits not shifted off the end. */
1435 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1436 else /* lcode == ZERO_EXTEND */
1437 /* Size of inner mode. */
1438 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1440 if (rcode
== LSHIFTRT
)
1441 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1442 else /* rcode == ZERO_EXTEND */
1443 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1445 /* We can only widen multiplies if the result is mathematiclly
1446 equivalent. I.e. if overflow was impossible. */
1447 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1448 return simplify_gen_binary
1450 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1451 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1455 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1456 if (GET_CODE (op
) == ZERO_EXTEND
)
1457 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1458 GET_MODE (XEXP (op
, 0)));
1460 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1461 is (zero_extend:M (subreg:O <X>)) if there is mode with
1462 GET_MODE_BITSIZE (N) - I bits. */
1463 if (GET_CODE (op
) == LSHIFTRT
1464 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1465 && CONST_INT_P (XEXP (op
, 1))
1466 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1467 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1469 enum machine_mode tmode
1470 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1471 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1472 if (tmode
!= BLKmode
)
1475 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1477 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1481 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1482 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1484 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1485 (and:SI (reg:SI) (const_int 63)). */
1486 if (GET_CODE (op
) == SUBREG
1487 && GET_MODE_PRECISION (GET_MODE (op
))
1488 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1489 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1490 <= HOST_BITS_PER_WIDE_INT
1491 && GET_MODE_PRECISION (mode
)
1492 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1493 && subreg_lowpart_p (op
)
1494 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1495 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1497 if (GET_MODE_PRECISION (mode
)
1498 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1499 return SUBREG_REG (op
);
1500 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1501 GET_MODE (SUBREG_REG (op
)));
1504 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1505 /* As we do not know which address space the pointer is referring to,
1506 we can do this only if the target does not support different pointer
1507 or address modes depending on the address space. */
1508 if (target_default_pointer_address_modes_p ()
1509 && POINTERS_EXTEND_UNSIGNED
> 0
1510 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1512 || (GET_CODE (op
) == SUBREG
1513 && REG_P (SUBREG_REG (op
))
1514 && REG_POINTER (SUBREG_REG (op
))
1515 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1516 return convert_memory_address (Pmode
, op
);
1527 /* Try to compute the value of a unary operation CODE whose output mode is to
1528 be MODE with input operand OP whose mode was originally OP_MODE.
1529 Return zero if the value cannot be computed. */
1531 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1532 rtx op
, enum machine_mode op_mode
)
1534 unsigned int width
= GET_MODE_PRECISION (mode
);
1535 unsigned int op_width
= GET_MODE_PRECISION (op_mode
);
1537 if (code
== VEC_DUPLICATE
)
1539 gcc_assert (VECTOR_MODE_P (mode
));
1540 if (GET_MODE (op
) != VOIDmode
)
1542 if (!VECTOR_MODE_P (GET_MODE (op
)))
1543 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1545 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1548 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1549 || GET_CODE (op
) == CONST_VECTOR
)
1551 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1552 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1553 rtvec v
= rtvec_alloc (n_elts
);
1556 if (GET_CODE (op
) != CONST_VECTOR
)
1557 for (i
= 0; i
< n_elts
; i
++)
1558 RTVEC_ELT (v
, i
) = op
;
1561 enum machine_mode inmode
= GET_MODE (op
);
1562 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1563 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1565 gcc_assert (in_n_elts
< n_elts
);
1566 gcc_assert ((n_elts
% in_n_elts
) == 0);
1567 for (i
= 0; i
< n_elts
; i
++)
1568 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1570 return gen_rtx_CONST_VECTOR (mode
, v
);
1574 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1576 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1577 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1578 enum machine_mode opmode
= GET_MODE (op
);
1579 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1580 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1581 rtvec v
= rtvec_alloc (n_elts
);
1584 gcc_assert (op_n_elts
== n_elts
);
1585 for (i
= 0; i
< n_elts
; i
++)
1587 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1588 CONST_VECTOR_ELT (op
, i
),
1589 GET_MODE_INNER (opmode
));
1592 RTVEC_ELT (v
, i
) = x
;
1594 return gen_rtx_CONST_VECTOR (mode
, v
);
1597 /* The order of these tests is critical so that, for example, we don't
1598 check the wrong mode (input vs. output) for a conversion operation,
1599 such as FIX. At some point, this should be simplified. */
1601 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1603 HOST_WIDE_INT hv
, lv
;
1606 if (CONST_INT_P (op
))
1607 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1609 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1611 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1612 d
= real_value_truncate (mode
, d
);
1613 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1615 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1617 HOST_WIDE_INT hv
, lv
;
1620 if (CONST_INT_P (op
))
1621 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1623 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1625 if (op_mode
== VOIDmode
1626 || GET_MODE_PRECISION (op_mode
) > HOST_BITS_PER_DOUBLE_INT
)
1627 /* We should never get a negative number. */
1628 gcc_assert (hv
>= 0);
1629 else if (GET_MODE_PRECISION (op_mode
) <= HOST_BITS_PER_WIDE_INT
)
1630 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1632 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1633 d
= real_value_truncate (mode
, d
);
1634 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1637 if (CONST_INT_P (op
)
1638 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1640 HOST_WIDE_INT arg0
= INTVAL (op
);
1650 val
= - (unsigned HOST_WIDE_INT
) arg0
;
1654 val
= (arg0
>= 0 ? arg0
: - arg0
);
1658 arg0
&= GET_MODE_MASK (mode
);
1659 val
= ffs_hwi (arg0
);
1663 arg0
&= GET_MODE_MASK (mode
);
1664 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1667 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 1;
1671 arg0
&= GET_MODE_MASK (mode
);
1673 val
= GET_MODE_PRECISION (mode
) - 1;
1675 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 2;
1677 val
= GET_MODE_PRECISION (mode
) - floor_log2 (~arg0
) - 2;
1681 arg0
&= GET_MODE_MASK (mode
);
1684 /* Even if the value at zero is undefined, we have to come
1685 up with some replacement. Seems good enough. */
1686 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1687 val
= GET_MODE_PRECISION (mode
);
1690 val
= ctz_hwi (arg0
);
1694 arg0
&= GET_MODE_MASK (mode
);
1697 val
++, arg0
&= arg0
- 1;
1701 arg0
&= GET_MODE_MASK (mode
);
1704 val
++, arg0
&= arg0
- 1;
1713 for (s
= 0; s
< width
; s
+= 8)
1715 unsigned int d
= width
- s
- 8;
1716 unsigned HOST_WIDE_INT byte
;
1717 byte
= (arg0
>> s
) & 0xff;
1728 /* When zero-extending a CONST_INT, we need to know its
1730 gcc_assert (op_mode
!= VOIDmode
);
1731 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1733 /* If we were really extending the mode,
1734 we would have to distinguish between zero-extension
1735 and sign-extension. */
1736 gcc_assert (width
== op_width
);
1739 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1740 val
= arg0
& GET_MODE_MASK (op_mode
);
1746 if (op_mode
== VOIDmode
)
1748 op_width
= GET_MODE_PRECISION (op_mode
);
1749 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1751 /* If we were really extending the mode,
1752 we would have to distinguish between zero-extension
1753 and sign-extension. */
1754 gcc_assert (width
== op_width
);
1757 else if (op_width
< HOST_BITS_PER_WIDE_INT
)
1759 val
= arg0
& GET_MODE_MASK (op_mode
);
1760 if (val_signbit_known_set_p (op_mode
, val
))
1761 val
|= ~GET_MODE_MASK (op_mode
);
1769 case FLOAT_TRUNCATE
:
1781 return gen_int_mode (val
, mode
);
1784 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1785 for a DImode operation on a CONST_INT. */
1786 else if (width
<= HOST_BITS_PER_DOUBLE_INT
1787 && (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1789 double_int first
, value
;
1791 if (CONST_DOUBLE_AS_INT_P (op
))
1792 first
= double_int::from_pair (CONST_DOUBLE_HIGH (op
),
1793 CONST_DOUBLE_LOW (op
));
1795 first
= double_int::from_shwi (INTVAL (op
));
1808 if (first
.is_negative ())
1817 value
.low
= ffs_hwi (first
.low
);
1818 else if (first
.high
!= 0)
1819 value
.low
= HOST_BITS_PER_WIDE_INT
+ ffs_hwi (first
.high
);
1826 if (first
.high
!= 0)
1827 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.high
) - 1
1828 - HOST_BITS_PER_WIDE_INT
;
1829 else if (first
.low
!= 0)
1830 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.low
) - 1;
1831 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1832 value
.low
= GET_MODE_PRECISION (mode
);
1838 value
.low
= ctz_hwi (first
.low
);
1839 else if (first
.high
!= 0)
1840 value
.low
= HOST_BITS_PER_WIDE_INT
+ ctz_hwi (first
.high
);
1841 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1842 value
.low
= GET_MODE_PRECISION (mode
);
1846 value
= double_int_zero
;
1850 first
.low
&= first
.low
- 1;
1855 first
.high
&= first
.high
- 1;
1860 value
= double_int_zero
;
1864 first
.low
&= first
.low
- 1;
1869 first
.high
&= first
.high
- 1;
1878 value
= double_int_zero
;
1879 for (s
= 0; s
< width
; s
+= 8)
1881 unsigned int d
= width
- s
- 8;
1882 unsigned HOST_WIDE_INT byte
;
1884 if (s
< HOST_BITS_PER_WIDE_INT
)
1885 byte
= (first
.low
>> s
) & 0xff;
1887 byte
= (first
.high
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1889 if (d
< HOST_BITS_PER_WIDE_INT
)
1890 value
.low
|= byte
<< d
;
1892 value
.high
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1898 /* This is just a change-of-mode, so do nothing. */
1903 gcc_assert (op_mode
!= VOIDmode
);
1905 if (op_width
> HOST_BITS_PER_WIDE_INT
)
1908 value
= double_int::from_uhwi (first
.low
& GET_MODE_MASK (op_mode
));
1912 if (op_mode
== VOIDmode
1913 || op_width
> HOST_BITS_PER_WIDE_INT
)
1917 value
.low
= first
.low
& GET_MODE_MASK (op_mode
);
1918 if (val_signbit_known_set_p (op_mode
, value
.low
))
1919 value
.low
|= ~GET_MODE_MASK (op_mode
);
1921 value
.high
= HWI_SIGN_EXTEND (value
.low
);
1932 return immed_double_int_const (value
, mode
);
1935 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1936 && SCALAR_FLOAT_MODE_P (mode
)
1937 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1940 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1947 d
= real_value_abs (&d
);
1950 d
= real_value_negate (&d
);
1952 case FLOAT_TRUNCATE
:
1953 d
= real_value_truncate (mode
, d
);
1956 /* All this does is change the mode, unless changing
1958 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1959 real_convert (&d
, mode
, &d
);
1962 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1969 real_to_target (tmp
, &d
, GET_MODE (op
));
1970 for (i
= 0; i
< 4; i
++)
1972 real_from_target (&d
, tmp
, mode
);
1978 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1981 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1982 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1983 && GET_MODE_CLASS (mode
) == MODE_INT
1984 && width
<= HOST_BITS_PER_DOUBLE_INT
&& width
> 0)
1986 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1987 operators are intentionally left unspecified (to ease implementation
1988 by target backends), for consistency, this routine implements the
1989 same semantics for constant folding as used by the middle-end. */
1991 /* This was formerly used only for non-IEEE float.
1992 eggert@twinsun.com says it is safe for IEEE also. */
1993 HOST_WIDE_INT xh
, xl
, th
, tl
;
1994 REAL_VALUE_TYPE x
, t
;
1995 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1999 if (REAL_VALUE_ISNAN (x
))
2002 /* Test against the signed upper bound. */
2003 if (width
> HOST_BITS_PER_WIDE_INT
)
2005 th
= ((unsigned HOST_WIDE_INT
) 1
2006 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
2012 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
2014 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
2015 if (REAL_VALUES_LESS (t
, x
))
2022 /* Test against the signed lower bound. */
2023 if (width
> HOST_BITS_PER_WIDE_INT
)
2025 th
= HOST_WIDE_INT_M1U
<< (width
- HOST_BITS_PER_WIDE_INT
- 1);
2031 tl
= HOST_WIDE_INT_M1U
<< (width
- 1);
2033 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
2034 if (REAL_VALUES_LESS (x
, t
))
2040 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
2044 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
2047 /* Test against the unsigned upper bound. */
2048 if (width
== HOST_BITS_PER_DOUBLE_INT
)
2053 else if (width
>= HOST_BITS_PER_WIDE_INT
)
2055 th
= ((unsigned HOST_WIDE_INT
) 1
2056 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
2062 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
2064 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
2065 if (REAL_VALUES_LESS (t
, x
))
2072 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
2078 return immed_double_const (xl
, xh
, mode
);
2084 /* Subroutine of simplify_binary_operation to simplify a binary operation
2085 CODE that can commute with byte swapping, with result mode MODE and
2086 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2087 Return zero if no simplification or canonicalization is possible. */
2090 simplify_byte_swapping_operation (enum rtx_code code
, enum machine_mode mode
,
2095 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2096 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2098 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2099 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2100 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2103 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2104 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2106 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2107 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2113 /* Subroutine of simplify_binary_operation to simplify a commutative,
2114 associative binary operation CODE with result mode MODE, operating
2115 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2116 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2117 canonicalization is possible. */
2120 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
2125 /* Linearize the operator to the left. */
2126 if (GET_CODE (op1
) == code
)
2128 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2129 if (GET_CODE (op0
) == code
)
2131 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2132 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2135 /* "a op (b op c)" becomes "(b op c) op a". */
2136 if (! swap_commutative_operands_p (op1
, op0
))
2137 return simplify_gen_binary (code
, mode
, op1
, op0
);
2144 if (GET_CODE (op0
) == code
)
2146 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2147 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2149 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2150 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2153 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2154 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2156 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2158 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2159 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2161 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2168 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2169 and OP1. Return 0 if no simplification is possible.
2171 Don't use this for relational operations such as EQ or LT.
2172 Use simplify_relational_operation instead. */
2174 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2177 rtx trueop0
, trueop1
;
2180 /* Relational operations don't work here. We must know the mode
2181 of the operands in order to do the comparison correctly.
2182 Assuming a full word can give incorrect results.
2183 Consider comparing 128 with -128 in QImode. */
2184 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2185 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2187 /* Make sure the constant is second. */
2188 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2189 && swap_commutative_operands_p (op0
, op1
))
2191 tem
= op0
, op0
= op1
, op1
= tem
;
2194 trueop0
= avoid_constant_pool_reference (op0
);
2195 trueop1
= avoid_constant_pool_reference (op1
);
2197 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2200 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2203 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2204 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2205 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2206 actual constants. */
2209 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
2210 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2212 rtx tem
, reversed
, opleft
, opright
;
2214 unsigned int width
= GET_MODE_PRECISION (mode
);
2216 /* Even if we can't compute a constant result,
2217 there are some cases worth simplifying. */
2222 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2223 when x is NaN, infinite, or finite and nonzero. They aren't
2224 when x is -0 and the rounding mode is not towards -infinity,
2225 since (-0) + 0 is then 0. */
2226 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2229 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2230 transformations are safe even for IEEE. */
2231 if (GET_CODE (op0
) == NEG
)
2232 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2233 else if (GET_CODE (op1
) == NEG
)
2234 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2236 /* (~a) + 1 -> -a */
2237 if (INTEGRAL_MODE_P (mode
)
2238 && GET_CODE (op0
) == NOT
2239 && trueop1
== const1_rtx
)
2240 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2242 /* Handle both-operands-constant cases. We can only add
2243 CONST_INTs to constants since the sum of relocatable symbols
2244 can't be handled by most assemblers. Don't add CONST_INT
2245 to CONST_INT since overflow won't be computed properly if wider
2246 than HOST_BITS_PER_WIDE_INT. */
2248 if ((GET_CODE (op0
) == CONST
2249 || GET_CODE (op0
) == SYMBOL_REF
2250 || GET_CODE (op0
) == LABEL_REF
)
2251 && CONST_INT_P (op1
))
2252 return plus_constant (mode
, op0
, INTVAL (op1
));
2253 else if ((GET_CODE (op1
) == CONST
2254 || GET_CODE (op1
) == SYMBOL_REF
2255 || GET_CODE (op1
) == LABEL_REF
)
2256 && CONST_INT_P (op0
))
2257 return plus_constant (mode
, op1
, INTVAL (op0
));
2259 /* See if this is something like X * C - X or vice versa or
2260 if the multiplication is written as a shift. If so, we can
2261 distribute and make a new multiply, shift, or maybe just
2262 have X (if C is 2 in the example above). But don't make
2263 something more expensive than we had before. */
2265 if (SCALAR_INT_MODE_P (mode
))
2267 double_int coeff0
, coeff1
;
2268 rtx lhs
= op0
, rhs
= op1
;
2270 coeff0
= double_int_one
;
2271 coeff1
= double_int_one
;
2273 if (GET_CODE (lhs
) == NEG
)
2275 coeff0
= double_int_minus_one
;
2276 lhs
= XEXP (lhs
, 0);
2278 else if (GET_CODE (lhs
) == MULT
2279 && CONST_INT_P (XEXP (lhs
, 1)))
2281 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
2282 lhs
= XEXP (lhs
, 0);
2284 else if (GET_CODE (lhs
) == ASHIFT
2285 && CONST_INT_P (XEXP (lhs
, 1))
2286 && INTVAL (XEXP (lhs
, 1)) >= 0
2287 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2289 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2290 lhs
= XEXP (lhs
, 0);
2293 if (GET_CODE (rhs
) == NEG
)
2295 coeff1
= double_int_minus_one
;
2296 rhs
= XEXP (rhs
, 0);
2298 else if (GET_CODE (rhs
) == MULT
2299 && CONST_INT_P (XEXP (rhs
, 1)))
2301 coeff1
= double_int::from_shwi (INTVAL (XEXP (rhs
, 1)));
2302 rhs
= XEXP (rhs
, 0);
2304 else if (GET_CODE (rhs
) == ASHIFT
2305 && CONST_INT_P (XEXP (rhs
, 1))
2306 && INTVAL (XEXP (rhs
, 1)) >= 0
2307 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2309 coeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2310 rhs
= XEXP (rhs
, 0);
2313 if (rtx_equal_p (lhs
, rhs
))
2315 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2318 bool speed
= optimize_function_for_speed_p (cfun
);
2320 val
= coeff0
+ coeff1
;
2321 coeff
= immed_double_int_const (val
, mode
);
2323 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2324 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2329 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2330 if (CONST_SCALAR_INT_P (op1
)
2331 && GET_CODE (op0
) == XOR
2332 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2333 && mode_signbit_p (mode
, op1
))
2334 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2335 simplify_gen_binary (XOR
, mode
, op1
,
2338 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2339 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2340 && GET_CODE (op0
) == MULT
2341 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2345 in1
= XEXP (XEXP (op0
, 0), 0);
2346 in2
= XEXP (op0
, 1);
2347 return simplify_gen_binary (MINUS
, mode
, op1
,
2348 simplify_gen_binary (MULT
, mode
,
2352 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2353 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2355 if (COMPARISON_P (op0
)
2356 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2357 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2358 && (reversed
= reversed_comparison (op0
, mode
)))
2360 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2362 /* If one of the operands is a PLUS or a MINUS, see if we can
2363 simplify this by the associative law.
2364 Don't use the associative law for floating point.
2365 The inaccuracy makes it nonassociative,
2366 and subtle programs can break if operations are associated. */
2368 if (INTEGRAL_MODE_P (mode
)
2369 && (plus_minus_operand_p (op0
)
2370 || plus_minus_operand_p (op1
))
2371 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2374 /* Reassociate floating point addition only when the user
2375 specifies associative math operations. */
2376 if (FLOAT_MODE_P (mode
)
2377 && flag_associative_math
)
2379 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2386 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2387 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2388 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2389 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2391 rtx xop00
= XEXP (op0
, 0);
2392 rtx xop10
= XEXP (op1
, 0);
2395 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2397 if (REG_P (xop00
) && REG_P (xop10
)
2398 && GET_MODE (xop00
) == GET_MODE (xop10
)
2399 && REGNO (xop00
) == REGNO (xop10
)
2400 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2401 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2408 /* We can't assume x-x is 0 even with non-IEEE floating point,
2409 but since it is zero except in very strange circumstances, we
2410 will treat it as zero with -ffinite-math-only. */
2411 if (rtx_equal_p (trueop0
, trueop1
)
2412 && ! side_effects_p (op0
)
2413 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2414 return CONST0_RTX (mode
);
2416 /* Change subtraction from zero into negation. (0 - x) is the
2417 same as -x when x is NaN, infinite, or finite and nonzero.
2418 But if the mode has signed zeros, and does not round towards
2419 -infinity, then 0 - 0 is 0, not -0. */
2420 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2421 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2423 /* (-1 - a) is ~a. */
2424 if (trueop0
== constm1_rtx
)
2425 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2427 /* Subtracting 0 has no effect unless the mode has signed zeros
2428 and supports rounding towards -infinity. In such a case,
2430 if (!(HONOR_SIGNED_ZEROS (mode
)
2431 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2432 && trueop1
== CONST0_RTX (mode
))
2435 /* See if this is something like X * C - X or vice versa or
2436 if the multiplication is written as a shift. If so, we can
2437 distribute and make a new multiply, shift, or maybe just
2438 have X (if C is 2 in the example above). But don't make
2439 something more expensive than we had before. */
2441 if (SCALAR_INT_MODE_P (mode
))
2443 double_int coeff0
, negcoeff1
;
2444 rtx lhs
= op0
, rhs
= op1
;
2446 coeff0
= double_int_one
;
2447 negcoeff1
= double_int_minus_one
;
2449 if (GET_CODE (lhs
) == NEG
)
2451 coeff0
= double_int_minus_one
;
2452 lhs
= XEXP (lhs
, 0);
2454 else if (GET_CODE (lhs
) == MULT
2455 && CONST_INT_P (XEXP (lhs
, 1)))
2457 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
2458 lhs
= XEXP (lhs
, 0);
2460 else if (GET_CODE (lhs
) == ASHIFT
2461 && CONST_INT_P (XEXP (lhs
, 1))
2462 && INTVAL (XEXP (lhs
, 1)) >= 0
2463 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2465 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2466 lhs
= XEXP (lhs
, 0);
2469 if (GET_CODE (rhs
) == NEG
)
2471 negcoeff1
= double_int_one
;
2472 rhs
= XEXP (rhs
, 0);
2474 else if (GET_CODE (rhs
) == MULT
2475 && CONST_INT_P (XEXP (rhs
, 1)))
2477 negcoeff1
= double_int::from_shwi (-INTVAL (XEXP (rhs
, 1)));
2478 rhs
= XEXP (rhs
, 0);
2480 else if (GET_CODE (rhs
) == ASHIFT
2481 && CONST_INT_P (XEXP (rhs
, 1))
2482 && INTVAL (XEXP (rhs
, 1)) >= 0
2483 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2485 negcoeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2486 negcoeff1
= -negcoeff1
;
2487 rhs
= XEXP (rhs
, 0);
2490 if (rtx_equal_p (lhs
, rhs
))
2492 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2495 bool speed
= optimize_function_for_speed_p (cfun
);
2497 val
= coeff0
+ negcoeff1
;
2498 coeff
= immed_double_int_const (val
, mode
);
2500 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2501 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2506 /* (a - (-b)) -> (a + b). True even for IEEE. */
2507 if (GET_CODE (op1
) == NEG
)
2508 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2510 /* (-x - c) may be simplified as (-c - x). */
2511 if (GET_CODE (op0
) == NEG
2512 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2514 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2516 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2519 /* Don't let a relocatable value get a negative coeff. */
2520 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2521 return simplify_gen_binary (PLUS
, mode
,
2523 neg_const_int (mode
, op1
));
2525 /* (x - (x & y)) -> (x & ~y) */
2526 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2528 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2530 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2531 GET_MODE (XEXP (op1
, 1)));
2532 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2534 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2536 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2537 GET_MODE (XEXP (op1
, 0)));
2538 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2542 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2543 by reversing the comparison code if valid. */
2544 if (STORE_FLAG_VALUE
== 1
2545 && trueop0
== const1_rtx
2546 && COMPARISON_P (op1
)
2547 && (reversed
= reversed_comparison (op1
, mode
)))
2550 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2551 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2552 && GET_CODE (op1
) == MULT
2553 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2557 in1
= XEXP (XEXP (op1
, 0), 0);
2558 in2
= XEXP (op1
, 1);
2559 return simplify_gen_binary (PLUS
, mode
,
2560 simplify_gen_binary (MULT
, mode
,
2565 /* Canonicalize (minus (neg A) (mult B C)) to
2566 (minus (mult (neg B) C) A). */
2567 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2568 && GET_CODE (op1
) == MULT
2569 && GET_CODE (op0
) == NEG
)
2573 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2574 in2
= XEXP (op1
, 1);
2575 return simplify_gen_binary (MINUS
, mode
,
2576 simplify_gen_binary (MULT
, mode
,
2581 /* If one of the operands is a PLUS or a MINUS, see if we can
2582 simplify this by the associative law. This will, for example,
2583 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2584 Don't use the associative law for floating point.
2585 The inaccuracy makes it nonassociative,
2586 and subtle programs can break if operations are associated. */
2588 if (INTEGRAL_MODE_P (mode
)
2589 && (plus_minus_operand_p (op0
)
2590 || plus_minus_operand_p (op1
))
2591 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2596 if (trueop1
== constm1_rtx
)
2597 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2599 if (GET_CODE (op0
) == NEG
)
2601 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2602 /* If op1 is a MULT as well and simplify_unary_operation
2603 just moved the NEG to the second operand, simplify_gen_binary
2604 below could through simplify_associative_operation move
2605 the NEG around again and recurse endlessly. */
2607 && GET_CODE (op1
) == MULT
2608 && GET_CODE (temp
) == MULT
2609 && XEXP (op1
, 0) == XEXP (temp
, 0)
2610 && GET_CODE (XEXP (temp
, 1)) == NEG
2611 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2614 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2616 if (GET_CODE (op1
) == NEG
)
2618 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2619 /* If op0 is a MULT as well and simplify_unary_operation
2620 just moved the NEG to the second operand, simplify_gen_binary
2621 below could through simplify_associative_operation move
2622 the NEG around again and recurse endlessly. */
2624 && GET_CODE (op0
) == MULT
2625 && GET_CODE (temp
) == MULT
2626 && XEXP (op0
, 0) == XEXP (temp
, 0)
2627 && GET_CODE (XEXP (temp
, 1)) == NEG
2628 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2631 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2634 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2635 x is NaN, since x * 0 is then also NaN. Nor is it valid
2636 when the mode has signed zeros, since multiplying a negative
2637 number by 0 will give -0, not 0. */
2638 if (!HONOR_NANS (mode
)
2639 && !HONOR_SIGNED_ZEROS (mode
)
2640 && trueop1
== CONST0_RTX (mode
)
2641 && ! side_effects_p (op0
))
2644 /* In IEEE floating point, x*1 is not equivalent to x for
2646 if (!HONOR_SNANS (mode
)
2647 && trueop1
== CONST1_RTX (mode
))
2650 /* Convert multiply by constant power of two into shift unless
2651 we are still generating RTL. This test is a kludge. */
2652 if (CONST_INT_P (trueop1
)
2653 && (val
= exact_log2 (UINTVAL (trueop1
))) >= 0
2654 /* If the mode is larger than the host word size, and the
2655 uppermost bit is set, then this isn't a power of two due
2656 to implicit sign extension. */
2657 && (width
<= HOST_BITS_PER_WIDE_INT
2658 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2659 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2661 /* Likewise for multipliers wider than a word. */
2662 if (CONST_DOUBLE_AS_INT_P (trueop1
)
2663 && GET_MODE (op0
) == mode
2664 && CONST_DOUBLE_LOW (trueop1
) == 0
2665 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0
2666 && (val
< HOST_BITS_PER_DOUBLE_INT
- 1
2667 || GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_DOUBLE_INT
))
2668 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2669 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2671 /* x*2 is x+x and x*(-1) is -x */
2672 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2673 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2674 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2675 && GET_MODE (op0
) == mode
)
2678 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2680 if (REAL_VALUES_EQUAL (d
, dconst2
))
2681 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2683 if (!HONOR_SNANS (mode
)
2684 && REAL_VALUES_EQUAL (d
, dconstm1
))
2685 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2688 /* Optimize -x * -x as x * x. */
2689 if (FLOAT_MODE_P (mode
)
2690 && GET_CODE (op0
) == NEG
2691 && GET_CODE (op1
) == NEG
2692 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2693 && !side_effects_p (XEXP (op0
, 0)))
2694 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2696 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2697 if (SCALAR_FLOAT_MODE_P (mode
)
2698 && GET_CODE (op0
) == ABS
2699 && GET_CODE (op1
) == ABS
2700 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2701 && !side_effects_p (XEXP (op0
, 0)))
2702 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2704 /* Reassociate multiplication, but for floating point MULTs
2705 only when the user specifies unsafe math optimizations. */
2706 if (! FLOAT_MODE_P (mode
)
2707 || flag_unsafe_math_optimizations
)
2709 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2716 if (trueop1
== CONST0_RTX (mode
))
2718 if (INTEGRAL_MODE_P (mode
)
2719 && trueop1
== CONSTM1_RTX (mode
)
2720 && !side_effects_p (op0
))
2722 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2724 /* A | (~A) -> -1 */
2725 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2726 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2727 && ! side_effects_p (op0
)
2728 && SCALAR_INT_MODE_P (mode
))
2731 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2732 if (CONST_INT_P (op1
)
2733 && HWI_COMPUTABLE_MODE_P (mode
)
2734 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2735 && !side_effects_p (op0
))
2738 /* Canonicalize (X & C1) | C2. */
2739 if (GET_CODE (op0
) == AND
2740 && CONST_INT_P (trueop1
)
2741 && CONST_INT_P (XEXP (op0
, 1)))
2743 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2744 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2745 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2747 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2749 && !side_effects_p (XEXP (op0
, 0)))
2752 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2753 if (((c1
|c2
) & mask
) == mask
)
2754 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2756 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2757 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2759 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2760 gen_int_mode (c1
& ~c2
, mode
));
2761 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2765 /* Convert (A & B) | A to A. */
2766 if (GET_CODE (op0
) == AND
2767 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2768 || rtx_equal_p (XEXP (op0
, 1), op1
))
2769 && ! side_effects_p (XEXP (op0
, 0))
2770 && ! side_effects_p (XEXP (op0
, 1)))
2773 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2774 mode size to (rotate A CX). */
2776 if (GET_CODE (op1
) == ASHIFT
2777 || GET_CODE (op1
) == SUBREG
)
2788 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2789 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2790 && CONST_INT_P (XEXP (opleft
, 1))
2791 && CONST_INT_P (XEXP (opright
, 1))
2792 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2793 == GET_MODE_PRECISION (mode
)))
2794 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2796 /* Same, but for ashift that has been "simplified" to a wider mode
2797 by simplify_shift_const. */
2799 if (GET_CODE (opleft
) == SUBREG
2800 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2801 && GET_CODE (opright
) == LSHIFTRT
2802 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2803 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2804 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2805 && (GET_MODE_SIZE (GET_MODE (opleft
))
2806 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2807 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2808 SUBREG_REG (XEXP (opright
, 0)))
2809 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2810 && CONST_INT_P (XEXP (opright
, 1))
2811 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2812 == GET_MODE_PRECISION (mode
)))
2813 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2814 XEXP (SUBREG_REG (opleft
), 1));
2816 /* If we have (ior (and (X C1) C2)), simplify this by making
2817 C1 as small as possible if C1 actually changes. */
2818 if (CONST_INT_P (op1
)
2819 && (HWI_COMPUTABLE_MODE_P (mode
)
2820 || INTVAL (op1
) > 0)
2821 && GET_CODE (op0
) == AND
2822 && CONST_INT_P (XEXP (op0
, 1))
2823 && CONST_INT_P (op1
)
2824 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2826 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2827 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2830 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2833 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2834 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2835 the PLUS does not affect any of the bits in OP1: then we can do
2836 the IOR as a PLUS and we can associate. This is valid if OP1
2837 can be safely shifted left C bits. */
2838 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2839 && GET_CODE (XEXP (op0
, 0)) == PLUS
2840 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2841 && CONST_INT_P (XEXP (op0
, 1))
2842 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2844 int count
= INTVAL (XEXP (op0
, 1));
2845 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2847 if (mask
>> count
== INTVAL (trueop1
)
2848 && trunc_int_for_mode (mask
, mode
) == mask
2849 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2850 return simplify_gen_binary (ASHIFTRT
, mode
,
2851 plus_constant (mode
, XEXP (op0
, 0),
2856 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2860 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2866 if (trueop1
== CONST0_RTX (mode
))
2868 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2869 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2870 if (rtx_equal_p (trueop0
, trueop1
)
2871 && ! side_effects_p (op0
)
2872 && GET_MODE_CLASS (mode
) != MODE_CC
)
2873 return CONST0_RTX (mode
);
2875 /* Canonicalize XOR of the most significant bit to PLUS. */
2876 if (CONST_SCALAR_INT_P (op1
)
2877 && mode_signbit_p (mode
, op1
))
2878 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2879 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2880 if (CONST_SCALAR_INT_P (op1
)
2881 && GET_CODE (op0
) == PLUS
2882 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2883 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2884 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2885 simplify_gen_binary (XOR
, mode
, op1
,
2888 /* If we are XORing two things that have no bits in common,
2889 convert them into an IOR. This helps to detect rotation encoded
2890 using those methods and possibly other simplifications. */
2892 if (HWI_COMPUTABLE_MODE_P (mode
)
2893 && (nonzero_bits (op0
, mode
)
2894 & nonzero_bits (op1
, mode
)) == 0)
2895 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2897 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2898 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2901 int num_negated
= 0;
2903 if (GET_CODE (op0
) == NOT
)
2904 num_negated
++, op0
= XEXP (op0
, 0);
2905 if (GET_CODE (op1
) == NOT
)
2906 num_negated
++, op1
= XEXP (op1
, 0);
2908 if (num_negated
== 2)
2909 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2910 else if (num_negated
== 1)
2911 return simplify_gen_unary (NOT
, mode
,
2912 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2916 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2917 correspond to a machine insn or result in further simplifications
2918 if B is a constant. */
2920 if (GET_CODE (op0
) == AND
2921 && rtx_equal_p (XEXP (op0
, 1), op1
)
2922 && ! side_effects_p (op1
))
2923 return simplify_gen_binary (AND
, mode
,
2924 simplify_gen_unary (NOT
, mode
,
2925 XEXP (op0
, 0), mode
),
2928 else if (GET_CODE (op0
) == AND
2929 && rtx_equal_p (XEXP (op0
, 0), op1
)
2930 && ! side_effects_p (op1
))
2931 return simplify_gen_binary (AND
, mode
,
2932 simplify_gen_unary (NOT
, mode
,
2933 XEXP (op0
, 1), mode
),
2936 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2937 we can transform like this:
2938 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2939 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2940 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2941 Attempt a few simplifications when B and C are both constants. */
2942 if (GET_CODE (op0
) == AND
2943 && CONST_INT_P (op1
)
2944 && CONST_INT_P (XEXP (op0
, 1)))
2946 rtx a
= XEXP (op0
, 0);
2947 rtx b
= XEXP (op0
, 1);
2949 HOST_WIDE_INT bval
= INTVAL (b
);
2950 HOST_WIDE_INT cval
= INTVAL (c
);
2953 = simplify_binary_operation (AND
, mode
,
2954 simplify_gen_unary (NOT
, mode
, a
, mode
),
2956 if ((~cval
& bval
) == 0)
2958 /* Try to simplify ~A&C | ~B&C. */
2959 if (na_c
!= NULL_RTX
)
2960 return simplify_gen_binary (IOR
, mode
, na_c
,
2961 gen_int_mode (~bval
& cval
, mode
));
2965 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2966 if (na_c
== const0_rtx
)
2968 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2969 gen_int_mode (~cval
& bval
,
2971 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2972 gen_int_mode (~bval
& cval
,
2978 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2979 comparison if STORE_FLAG_VALUE is 1. */
2980 if (STORE_FLAG_VALUE
== 1
2981 && trueop1
== const1_rtx
2982 && COMPARISON_P (op0
)
2983 && (reversed
= reversed_comparison (op0
, mode
)))
2986 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2987 is (lt foo (const_int 0)), so we can perform the above
2988 simplification if STORE_FLAG_VALUE is 1. */
2990 if (STORE_FLAG_VALUE
== 1
2991 && trueop1
== const1_rtx
2992 && GET_CODE (op0
) == LSHIFTRT
2993 && CONST_INT_P (XEXP (op0
, 1))
2994 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2995 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2997 /* (xor (comparison foo bar) (const_int sign-bit))
2998 when STORE_FLAG_VALUE is the sign bit. */
2999 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
3000 && trueop1
== const_true_rtx
3001 && COMPARISON_P (op0
)
3002 && (reversed
= reversed_comparison (op0
, mode
)))
3005 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3009 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3015 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3017 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3019 if (HWI_COMPUTABLE_MODE_P (mode
))
3021 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3022 HOST_WIDE_INT nzop1
;
3023 if (CONST_INT_P (trueop1
))
3025 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3026 /* If we are turning off bits already known off in OP0, we need
3028 if ((nzop0
& ~val1
) == 0)
3031 nzop1
= nonzero_bits (trueop1
, mode
);
3032 /* If we are clearing all the nonzero bits, the result is zero. */
3033 if ((nzop1
& nzop0
) == 0
3034 && !side_effects_p (op0
) && !side_effects_p (op1
))
3035 return CONST0_RTX (mode
);
3037 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3038 && GET_MODE_CLASS (mode
) != MODE_CC
)
3041 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3042 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3043 && ! side_effects_p (op0
)
3044 && GET_MODE_CLASS (mode
) != MODE_CC
)
3045 return CONST0_RTX (mode
);
3047 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3048 there are no nonzero bits of C outside of X's mode. */
3049 if ((GET_CODE (op0
) == SIGN_EXTEND
3050 || GET_CODE (op0
) == ZERO_EXTEND
)
3051 && CONST_INT_P (trueop1
)
3052 && HWI_COMPUTABLE_MODE_P (mode
)
3053 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3054 & UINTVAL (trueop1
)) == 0)
3056 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3057 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3058 gen_int_mode (INTVAL (trueop1
),
3060 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3063 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3064 we might be able to further simplify the AND with X and potentially
3065 remove the truncation altogether. */
3066 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3068 rtx x
= XEXP (op0
, 0);
3069 enum machine_mode xmode
= GET_MODE (x
);
3070 tem
= simplify_gen_binary (AND
, xmode
, x
,
3071 gen_int_mode (INTVAL (trueop1
), xmode
));
3072 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3075 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3076 if (GET_CODE (op0
) == IOR
3077 && CONST_INT_P (trueop1
)
3078 && CONST_INT_P (XEXP (op0
, 1)))
3080 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3081 return simplify_gen_binary (IOR
, mode
,
3082 simplify_gen_binary (AND
, mode
,
3083 XEXP (op0
, 0), op1
),
3084 gen_int_mode (tmp
, mode
));
3087 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3088 insn (and may simplify more). */
3089 if (GET_CODE (op0
) == XOR
3090 && rtx_equal_p (XEXP (op0
, 0), op1
)
3091 && ! side_effects_p (op1
))
3092 return simplify_gen_binary (AND
, mode
,
3093 simplify_gen_unary (NOT
, mode
,
3094 XEXP (op0
, 1), mode
),
3097 if (GET_CODE (op0
) == XOR
3098 && rtx_equal_p (XEXP (op0
, 1), op1
)
3099 && ! side_effects_p (op1
))
3100 return simplify_gen_binary (AND
, mode
,
3101 simplify_gen_unary (NOT
, mode
,
3102 XEXP (op0
, 0), mode
),
3105 /* Similarly for (~(A ^ B)) & A. */
3106 if (GET_CODE (op0
) == NOT
3107 && GET_CODE (XEXP (op0
, 0)) == XOR
3108 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3109 && ! side_effects_p (op1
))
3110 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3112 if (GET_CODE (op0
) == NOT
3113 && GET_CODE (XEXP (op0
, 0)) == XOR
3114 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3115 && ! side_effects_p (op1
))
3116 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3118 /* Convert (A | B) & A to A. */
3119 if (GET_CODE (op0
) == IOR
3120 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3121 || rtx_equal_p (XEXP (op0
, 1), op1
))
3122 && ! side_effects_p (XEXP (op0
, 0))
3123 && ! side_effects_p (XEXP (op0
, 1)))
3126 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3127 ((A & N) + B) & M -> (A + B) & M
3128 Similarly if (N & M) == 0,
3129 ((A | N) + B) & M -> (A + B) & M
3130 and for - instead of + and/or ^ instead of |.
3131 Also, if (N & M) == 0, then
3132 (A +- N) & M -> A & M. */
3133 if (CONST_INT_P (trueop1
)
3134 && HWI_COMPUTABLE_MODE_P (mode
)
3135 && ~UINTVAL (trueop1
)
3136 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3137 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3142 pmop
[0] = XEXP (op0
, 0);
3143 pmop
[1] = XEXP (op0
, 1);
3145 if (CONST_INT_P (pmop
[1])
3146 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3147 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3149 for (which
= 0; which
< 2; which
++)
3152 switch (GET_CODE (tem
))
3155 if (CONST_INT_P (XEXP (tem
, 1))
3156 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3157 == UINTVAL (trueop1
))
3158 pmop
[which
] = XEXP (tem
, 0);
3162 if (CONST_INT_P (XEXP (tem
, 1))
3163 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3164 pmop
[which
] = XEXP (tem
, 0);
3171 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3173 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3175 return simplify_gen_binary (code
, mode
, tem
, op1
);
3179 /* (and X (ior (not X) Y) -> (and X Y) */
3180 if (GET_CODE (op1
) == IOR
3181 && GET_CODE (XEXP (op1
, 0)) == NOT
3182 && op0
== XEXP (XEXP (op1
, 0), 0))
3183 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3185 /* (and (ior (not X) Y) X) -> (and X Y) */
3186 if (GET_CODE (op0
) == IOR
3187 && GET_CODE (XEXP (op0
, 0)) == NOT
3188 && op1
== XEXP (XEXP (op0
, 0), 0))
3189 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3191 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3195 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3201 /* 0/x is 0 (or x&0 if x has side-effects). */
3202 if (trueop0
== CONST0_RTX (mode
))
3204 if (side_effects_p (op1
))
3205 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3209 if (trueop1
== CONST1_RTX (mode
))
3211 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3215 /* Convert divide by power of two into shift. */
3216 if (CONST_INT_P (trueop1
)
3217 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3218 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3222 /* Handle floating point and integers separately. */
3223 if (SCALAR_FLOAT_MODE_P (mode
))
3225 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3226 safe for modes with NaNs, since 0.0 / 0.0 will then be
3227 NaN rather than 0.0. Nor is it safe for modes with signed
3228 zeros, since dividing 0 by a negative number gives -0.0 */
3229 if (trueop0
== CONST0_RTX (mode
)
3230 && !HONOR_NANS (mode
)
3231 && !HONOR_SIGNED_ZEROS (mode
)
3232 && ! side_effects_p (op1
))
3235 if (trueop1
== CONST1_RTX (mode
)
3236 && !HONOR_SNANS (mode
))
3239 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3240 && trueop1
!= CONST0_RTX (mode
))
3243 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
3246 if (REAL_VALUES_EQUAL (d
, dconstm1
)
3247 && !HONOR_SNANS (mode
))
3248 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3250 /* Change FP division by a constant into multiplication.
3251 Only do this with -freciprocal-math. */
3252 if (flag_reciprocal_math
3253 && !REAL_VALUES_EQUAL (d
, dconst0
))
3255 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3256 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3257 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3261 else if (SCALAR_INT_MODE_P (mode
))
3263 /* 0/x is 0 (or x&0 if x has side-effects). */
3264 if (trueop0
== CONST0_RTX (mode
)
3265 && !cfun
->can_throw_non_call_exceptions
)
3267 if (side_effects_p (op1
))
3268 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3272 if (trueop1
== CONST1_RTX (mode
))
3274 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3279 if (trueop1
== constm1_rtx
)
3281 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3283 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3289 /* 0%x is 0 (or x&0 if x has side-effects). */
3290 if (trueop0
== CONST0_RTX (mode
))
3292 if (side_effects_p (op1
))
3293 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3296 /* x%1 is 0 (of x&0 if x has side-effects). */
3297 if (trueop1
== CONST1_RTX (mode
))
3299 if (side_effects_p (op0
))
3300 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3301 return CONST0_RTX (mode
);
3303 /* Implement modulus by power of two as AND. */
3304 if (CONST_INT_P (trueop1
)
3305 && exact_log2 (UINTVAL (trueop1
)) > 0)
3306 return simplify_gen_binary (AND
, mode
, op0
,
3307 gen_int_mode (INTVAL (op1
) - 1, mode
));
3311 /* 0%x is 0 (or x&0 if x has side-effects). */
3312 if (trueop0
== CONST0_RTX (mode
))
3314 if (side_effects_p (op1
))
3315 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3318 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3319 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3321 if (side_effects_p (op0
))
3322 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3323 return CONST0_RTX (mode
);
3329 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3330 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3331 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3333 if (CONST_INT_P (trueop1
)
3334 && IN_RANGE (INTVAL (trueop1
),
3335 GET_MODE_BITSIZE (mode
) / 2 + (code
== ROTATE
),
3336 GET_MODE_BITSIZE (mode
) - 1))
3337 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3338 mode
, op0
, GEN_INT (GET_MODE_BITSIZE (mode
)
3339 - INTVAL (trueop1
)));
3342 if (trueop1
== CONST0_RTX (mode
))
3344 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3346 /* Rotating ~0 always results in ~0. */
3347 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3348 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3349 && ! side_effects_p (op1
))
3352 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3354 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
3355 if (val
!= INTVAL (op1
))
3356 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3363 if (trueop1
== CONST0_RTX (mode
))
3365 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3367 goto canonicalize_shift
;
3370 if (trueop1
== CONST0_RTX (mode
))
3372 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3374 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3375 if (GET_CODE (op0
) == CLZ
3376 && CONST_INT_P (trueop1
)
3377 && STORE_FLAG_VALUE
== 1
3378 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3380 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3381 unsigned HOST_WIDE_INT zero_val
= 0;
3383 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3384 && zero_val
== GET_MODE_PRECISION (imode
)
3385 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3386 return simplify_gen_relational (EQ
, mode
, imode
,
3387 XEXP (op0
, 0), const0_rtx
);
3389 goto canonicalize_shift
;
3392 if (width
<= HOST_BITS_PER_WIDE_INT
3393 && mode_signbit_p (mode
, trueop1
)
3394 && ! side_effects_p (op0
))
3396 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3398 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3404 if (width
<= HOST_BITS_PER_WIDE_INT
3405 && CONST_INT_P (trueop1
)
3406 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3407 && ! side_effects_p (op0
))
3409 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3411 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3417 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3419 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3421 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3427 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3429 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3431 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3444 /* ??? There are simplifications that can be done. */
3448 if (!VECTOR_MODE_P (mode
))
3450 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3451 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3452 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3453 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3454 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3456 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3457 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3460 /* Extract a scalar element from a nested VEC_SELECT expression
3461 (with optional nested VEC_CONCAT expression). Some targets
3462 (i386) extract scalar element from a vector using chain of
3463 nested VEC_SELECT expressions. When input operand is a memory
3464 operand, this operation can be simplified to a simple scalar
3465 load from an offseted memory address. */
3466 if (GET_CODE (trueop0
) == VEC_SELECT
)
3468 rtx op0
= XEXP (trueop0
, 0);
3469 rtx op1
= XEXP (trueop0
, 1);
3471 enum machine_mode opmode
= GET_MODE (op0
);
3472 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3473 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3475 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3481 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3482 gcc_assert (i
< n_elts
);
3484 /* Select element, pointed by nested selector. */
3485 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3487 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3488 if (GET_CODE (op0
) == VEC_CONCAT
)
3490 rtx op00
= XEXP (op0
, 0);
3491 rtx op01
= XEXP (op0
, 1);
3493 enum machine_mode mode00
, mode01
;
3494 int n_elts00
, n_elts01
;
3496 mode00
= GET_MODE (op00
);
3497 mode01
= GET_MODE (op01
);
3499 /* Find out number of elements of each operand. */
3500 if (VECTOR_MODE_P (mode00
))
3502 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3503 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3508 if (VECTOR_MODE_P (mode01
))
3510 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3511 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3516 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3518 /* Select correct operand of VEC_CONCAT
3519 and adjust selector. */
3520 if (elem
< n_elts01
)
3531 vec
= rtvec_alloc (1);
3532 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3534 tmp
= gen_rtx_fmt_ee (code
, mode
,
3535 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3538 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3539 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3540 return XEXP (trueop0
, 0);
3544 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3545 gcc_assert (GET_MODE_INNER (mode
)
3546 == GET_MODE_INNER (GET_MODE (trueop0
)));
3547 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3549 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3551 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3552 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3553 rtvec v
= rtvec_alloc (n_elts
);
3556 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3557 for (i
= 0; i
< n_elts
; i
++)
3559 rtx x
= XVECEXP (trueop1
, 0, i
);
3561 gcc_assert (CONST_INT_P (x
));
3562 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3566 return gen_rtx_CONST_VECTOR (mode
, v
);
3569 /* Recognize the identity. */
3570 if (GET_MODE (trueop0
) == mode
)
3572 bool maybe_ident
= true;
3573 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3575 rtx j
= XVECEXP (trueop1
, 0, i
);
3576 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3578 maybe_ident
= false;
3586 /* If we build {a,b} then permute it, build the result directly. */
3587 if (XVECLEN (trueop1
, 0) == 2
3588 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3589 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3590 && GET_CODE (trueop0
) == VEC_CONCAT
3591 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3592 && GET_MODE (XEXP (trueop0
, 0)) == mode
3593 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3594 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3596 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3597 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3600 gcc_assert (i0
< 4 && i1
< 4);
3601 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3602 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3604 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3607 if (XVECLEN (trueop1
, 0) == 2
3608 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3609 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3610 && GET_CODE (trueop0
) == VEC_CONCAT
3611 && GET_MODE (trueop0
) == mode
)
3613 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3614 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3617 gcc_assert (i0
< 2 && i1
< 2);
3618 subop0
= XEXP (trueop0
, i0
);
3619 subop1
= XEXP (trueop0
, i1
);
3621 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3625 if (XVECLEN (trueop1
, 0) == 1
3626 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3627 && GET_CODE (trueop0
) == VEC_CONCAT
)
3630 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3632 /* Try to find the element in the VEC_CONCAT. */
3633 while (GET_MODE (vec
) != mode
3634 && GET_CODE (vec
) == VEC_CONCAT
)
3636 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3637 if (offset
< vec_size
)
3638 vec
= XEXP (vec
, 0);
3642 vec
= XEXP (vec
, 1);
3644 vec
= avoid_constant_pool_reference (vec
);
3647 if (GET_MODE (vec
) == mode
)
3651 /* If we select elements in a vec_merge that all come from the same
3652 operand, select from that operand directly. */
3653 if (GET_CODE (op0
) == VEC_MERGE
)
3655 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3656 if (CONST_INT_P (trueop02
))
3658 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3659 bool all_operand0
= true;
3660 bool all_operand1
= true;
3661 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3663 rtx j
= XVECEXP (trueop1
, 0, i
);
3664 if (sel
& (1 << UINTVAL (j
)))
3665 all_operand1
= false;
3667 all_operand0
= false;
3669 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3670 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3671 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3672 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3679 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3680 ? GET_MODE (trueop0
)
3681 : GET_MODE_INNER (mode
));
3682 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3683 ? GET_MODE (trueop1
)
3684 : GET_MODE_INNER (mode
));
3686 gcc_assert (VECTOR_MODE_P (mode
));
3687 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3688 == GET_MODE_SIZE (mode
));
3690 if (VECTOR_MODE_P (op0_mode
))
3691 gcc_assert (GET_MODE_INNER (mode
)
3692 == GET_MODE_INNER (op0_mode
));
3694 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3696 if (VECTOR_MODE_P (op1_mode
))
3697 gcc_assert (GET_MODE_INNER (mode
)
3698 == GET_MODE_INNER (op1_mode
));
3700 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3702 if ((GET_CODE (trueop0
) == CONST_VECTOR
3703 || CONST_SCALAR_INT_P (trueop0
)
3704 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3705 && (GET_CODE (trueop1
) == CONST_VECTOR
3706 || CONST_SCALAR_INT_P (trueop1
)
3707 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3709 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3710 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3711 rtvec v
= rtvec_alloc (n_elts
);
3713 unsigned in_n_elts
= 1;
3715 if (VECTOR_MODE_P (op0_mode
))
3716 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3717 for (i
= 0; i
< n_elts
; i
++)
3721 if (!VECTOR_MODE_P (op0_mode
))
3722 RTVEC_ELT (v
, i
) = trueop0
;
3724 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3728 if (!VECTOR_MODE_P (op1_mode
))
3729 RTVEC_ELT (v
, i
) = trueop1
;
3731 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3736 return gen_rtx_CONST_VECTOR (mode
, v
);
3739 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3740 Restrict the transformation to avoid generating a VEC_SELECT with a
3741 mode unrelated to its operand. */
3742 if (GET_CODE (trueop0
) == VEC_SELECT
3743 && GET_CODE (trueop1
) == VEC_SELECT
3744 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3745 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3747 rtx par0
= XEXP (trueop0
, 1);
3748 rtx par1
= XEXP (trueop1
, 1);
3749 int len0
= XVECLEN (par0
, 0);
3750 int len1
= XVECLEN (par1
, 0);
3751 rtvec vec
= rtvec_alloc (len0
+ len1
);
3752 for (int i
= 0; i
< len0
; i
++)
3753 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3754 for (int i
= 0; i
< len1
; i
++)
3755 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3756 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3757 gen_rtx_PARALLEL (VOIDmode
, vec
));
3770 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3773 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3775 unsigned int width
= GET_MODE_PRECISION (mode
);
3777 if (VECTOR_MODE_P (mode
)
3778 && code
!= VEC_CONCAT
3779 && GET_CODE (op0
) == CONST_VECTOR
3780 && GET_CODE (op1
) == CONST_VECTOR
)
3782 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3783 enum machine_mode op0mode
= GET_MODE (op0
);
3784 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3785 enum machine_mode op1mode
= GET_MODE (op1
);
3786 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3787 rtvec v
= rtvec_alloc (n_elts
);
3790 gcc_assert (op0_n_elts
== n_elts
);
3791 gcc_assert (op1_n_elts
== n_elts
);
3792 for (i
= 0; i
< n_elts
; i
++)
3794 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3795 CONST_VECTOR_ELT (op0
, i
),
3796 CONST_VECTOR_ELT (op1
, i
));
3799 RTVEC_ELT (v
, i
) = x
;
3802 return gen_rtx_CONST_VECTOR (mode
, v
);
3805 if (VECTOR_MODE_P (mode
)
3806 && code
== VEC_CONCAT
3807 && (CONST_SCALAR_INT_P (op0
)
3808 || GET_CODE (op0
) == CONST_FIXED
3809 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3810 && (CONST_SCALAR_INT_P (op1
)
3811 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3812 || GET_CODE (op1
) == CONST_FIXED
))
3814 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3815 rtvec v
= rtvec_alloc (n_elts
);
3817 gcc_assert (n_elts
>= 2);
3820 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3821 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3823 RTVEC_ELT (v
, 0) = op0
;
3824 RTVEC_ELT (v
, 1) = op1
;
3828 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3829 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3832 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3833 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3834 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3836 for (i
= 0; i
< op0_n_elts
; ++i
)
3837 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3838 for (i
= 0; i
< op1_n_elts
; ++i
)
3839 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3842 return gen_rtx_CONST_VECTOR (mode
, v
);
3845 if (SCALAR_FLOAT_MODE_P (mode
)
3846 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3847 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3848 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3859 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3861 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3863 for (i
= 0; i
< 4; i
++)
3880 real_from_target (&r
, tmp0
, mode
);
3881 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3885 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3888 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3889 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3890 real_convert (&f0
, mode
, &f0
);
3891 real_convert (&f1
, mode
, &f1
);
3893 if (HONOR_SNANS (mode
)
3894 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3898 && REAL_VALUES_EQUAL (f1
, dconst0
)
3899 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3902 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3903 && flag_trapping_math
3904 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3906 int s0
= REAL_VALUE_NEGATIVE (f0
);
3907 int s1
= REAL_VALUE_NEGATIVE (f1
);
3912 /* Inf + -Inf = NaN plus exception. */
3917 /* Inf - Inf = NaN plus exception. */
3922 /* Inf / Inf = NaN plus exception. */
3929 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3930 && flag_trapping_math
3931 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3932 || (REAL_VALUE_ISINF (f1
)
3933 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3934 /* Inf * 0 = NaN plus exception. */
3937 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3939 real_convert (&result
, mode
, &value
);
3941 /* Don't constant fold this floating point operation if
3942 the result has overflowed and flag_trapping_math. */
3944 if (flag_trapping_math
3945 && MODE_HAS_INFINITIES (mode
)
3946 && REAL_VALUE_ISINF (result
)
3947 && !REAL_VALUE_ISINF (f0
)
3948 && !REAL_VALUE_ISINF (f1
))
3949 /* Overflow plus exception. */
3952 /* Don't constant fold this floating point operation if the
3953 result may dependent upon the run-time rounding mode and
3954 flag_rounding_math is set, or if GCC's software emulation
3955 is unable to accurately represent the result. */
3957 if ((flag_rounding_math
3958 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3959 && (inexact
|| !real_identical (&result
, &value
)))
3962 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3966 /* We can fold some multi-word operations. */
3967 if (GET_MODE_CLASS (mode
) == MODE_INT
3968 && width
== HOST_BITS_PER_DOUBLE_INT
3969 && (CONST_DOUBLE_AS_INT_P (op0
) || CONST_INT_P (op0
))
3970 && (CONST_DOUBLE_AS_INT_P (op1
) || CONST_INT_P (op1
)))
3972 double_int o0
, o1
, res
, tmp
;
3975 o0
= rtx_to_double_int (op0
);
3976 o1
= rtx_to_double_int (op1
);
3981 /* A - B == A + (-B). */
3984 /* Fall through.... */
3995 res
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
4002 tmp
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
4009 res
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
4016 tmp
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
4050 case LSHIFTRT
: case ASHIFTRT
:
4052 case ROTATE
: case ROTATERT
:
4054 unsigned HOST_WIDE_INT cnt
;
4056 if (SHIFT_COUNT_TRUNCATED
)
4059 o1
.low
&= GET_MODE_PRECISION (mode
) - 1;
4062 if (!o1
.fits_uhwi ()
4063 || o1
.to_uhwi () >= GET_MODE_PRECISION (mode
))
4066 cnt
= o1
.to_uhwi ();
4067 unsigned short prec
= GET_MODE_PRECISION (mode
);
4069 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
4070 res
= o0
.rshift (cnt
, prec
, code
== ASHIFTRT
);
4071 else if (code
== ASHIFT
)
4072 res
= o0
.alshift (cnt
, prec
);
4073 else if (code
== ROTATE
)
4074 res
= o0
.lrotate (cnt
, prec
);
4075 else /* code == ROTATERT */
4076 res
= o0
.rrotate (cnt
, prec
);
4084 return immed_double_int_const (res
, mode
);
4087 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
4088 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
4090 /* Get the integer argument values in two forms:
4091 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4093 arg0
= INTVAL (op0
);
4094 arg1
= INTVAL (op1
);
4096 if (width
< HOST_BITS_PER_WIDE_INT
)
4098 arg0
&= GET_MODE_MASK (mode
);
4099 arg1
&= GET_MODE_MASK (mode
);
4102 if (val_signbit_known_set_p (mode
, arg0s
))
4103 arg0s
|= ~GET_MODE_MASK (mode
);
4106 if (val_signbit_known_set_p (mode
, arg1s
))
4107 arg1s
|= ~GET_MODE_MASK (mode
);
4115 /* Compute the value of the arithmetic. */
4120 val
= (unsigned HOST_WIDE_INT
) arg0s
+ arg1s
;
4124 val
= (unsigned HOST_WIDE_INT
) arg0s
- arg1s
;
4128 val
= (unsigned HOST_WIDE_INT
) arg0s
* arg1s
;
4133 || ((unsigned HOST_WIDE_INT
) arg0s
4134 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4137 val
= arg0s
/ arg1s
;
4142 || ((unsigned HOST_WIDE_INT
) arg0s
4143 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4146 val
= arg0s
% arg1s
;
4151 || ((unsigned HOST_WIDE_INT
) arg0s
4152 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4155 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
4160 || ((unsigned HOST_WIDE_INT
) arg0s
4161 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4164 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
4182 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4183 the value is in range. We can't return any old value for
4184 out-of-range arguments because either the middle-end (via
4185 shift_truncation_mask) or the back-end might be relying on
4186 target-specific knowledge. Nor can we rely on
4187 shift_truncation_mask, since the shift might not be part of an
4188 ashlM3, lshrM3 or ashrM3 instruction. */
4189 if (SHIFT_COUNT_TRUNCATED
)
4190 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
4191 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
4194 val
= (code
== ASHIFT
4195 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
4196 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
4198 /* Sign-extend the result for arithmetic right shifts. */
4199 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
4200 val
|= HOST_WIDE_INT_M1U
<< (width
- arg1
);
4208 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
4209 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
4217 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
4218 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
4222 /* Do nothing here. */
4226 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
4230 val
= ((unsigned HOST_WIDE_INT
) arg0
4231 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
4235 val
= arg0s
> arg1s
? arg0s
: arg1s
;
4239 val
= ((unsigned HOST_WIDE_INT
) arg0
4240 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
4253 /* ??? There are simplifications that can be done. */
4260 return gen_int_mode (val
, mode
);
4268 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4271 Rather than test for specific case, we do this by a brute-force method
4272 and do all possible simplifications until no more changes occur. Then
4273 we rebuild the operation. */
4275 struct simplify_plus_minus_op_data
4282 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4286 result
= (commutative_operand_precedence (y
)
4287 - commutative_operand_precedence (x
));
4291 /* Group together equal REGs to do more simplification. */
4292 if (REG_P (x
) && REG_P (y
))
4293 return REGNO (x
) > REGNO (y
);
4299 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
4302 struct simplify_plus_minus_op_data ops
[8];
4304 int n_ops
= 2, input_ops
= 2;
4305 int changed
, n_constants
= 0, canonicalized
= 0;
4308 memset (ops
, 0, sizeof ops
);
4310 /* Set up the two operands and then expand them until nothing has been
4311 changed. If we run out of room in our array, give up; this should
4312 almost never happen. */
4317 ops
[1].neg
= (code
== MINUS
);
4323 for (i
= 0; i
< n_ops
; i
++)
4325 rtx this_op
= ops
[i
].op
;
4326 int this_neg
= ops
[i
].neg
;
4327 enum rtx_code this_code
= GET_CODE (this_op
);
4336 ops
[n_ops
].op
= XEXP (this_op
, 1);
4337 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4340 ops
[i
].op
= XEXP (this_op
, 0);
4343 canonicalized
|= this_neg
;
4347 ops
[i
].op
= XEXP (this_op
, 0);
4348 ops
[i
].neg
= ! this_neg
;
4355 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4356 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4357 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4359 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4360 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4361 ops
[n_ops
].neg
= this_neg
;
4369 /* ~a -> (-a - 1) */
4372 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4373 ops
[n_ops
++].neg
= this_neg
;
4374 ops
[i
].op
= XEXP (this_op
, 0);
4375 ops
[i
].neg
= !this_neg
;
4385 ops
[i
].op
= neg_const_int (mode
, this_op
);
4399 if (n_constants
> 1)
4402 gcc_assert (n_ops
>= 2);
4404 /* If we only have two operands, we can avoid the loops. */
4407 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4410 /* Get the two operands. Be careful with the order, especially for
4411 the cases where code == MINUS. */
4412 if (ops
[0].neg
&& ops
[1].neg
)
4414 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4417 else if (ops
[0].neg
)
4428 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4431 /* Now simplify each pair of operands until nothing changes. */
4434 /* Insertion sort is good enough for an eight-element array. */
4435 for (i
= 1; i
< n_ops
; i
++)
4437 struct simplify_plus_minus_op_data save
;
4439 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4445 ops
[j
+ 1] = ops
[j
];
4446 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4451 for (i
= n_ops
- 1; i
> 0; i
--)
4452 for (j
= i
- 1; j
>= 0; j
--)
4454 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4455 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4457 if (lhs
!= 0 && rhs
!= 0)
4459 enum rtx_code ncode
= PLUS
;
4465 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4467 else if (swap_commutative_operands_p (lhs
, rhs
))
4468 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4470 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4471 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4473 rtx tem_lhs
, tem_rhs
;
4475 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4476 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4477 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4479 if (tem
&& !CONSTANT_P (tem
))
4480 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4483 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4485 /* Reject "simplifications" that just wrap the two
4486 arguments in a CONST. Failure to do so can result
4487 in infinite recursion with simplify_binary_operation
4488 when it calls us to simplify CONST operations. */
4490 && ! (GET_CODE (tem
) == CONST
4491 && GET_CODE (XEXP (tem
, 0)) == ncode
4492 && XEXP (XEXP (tem
, 0), 0) == lhs
4493 && XEXP (XEXP (tem
, 0), 1) == rhs
))
4496 if (GET_CODE (tem
) == NEG
)
4497 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4498 if (CONST_INT_P (tem
) && lneg
)
4499 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4503 ops
[j
].op
= NULL_RTX
;
4510 /* If nothing changed, fail. */
4514 /* Pack all the operands to the lower-numbered entries. */
4515 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4525 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4527 && CONST_INT_P (ops
[1].op
)
4528 && CONSTANT_P (ops
[0].op
)
4530 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4532 /* We suppressed creation of trivial CONST expressions in the
4533 combination loop to avoid recursion. Create one manually now.
4534 The combination loop should have ensured that there is exactly
4535 one CONST_INT, and the sort will have ensured that it is last
4536 in the array and that any other constant will be next-to-last. */
4539 && CONST_INT_P (ops
[n_ops
- 1].op
)
4540 && CONSTANT_P (ops
[n_ops
- 2].op
))
4542 rtx value
= ops
[n_ops
- 1].op
;
4543 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4544 value
= neg_const_int (mode
, value
);
4545 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4550 /* Put a non-negated operand first, if possible. */
4552 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4555 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4564 /* Now make the result by performing the requested operations. */
4566 for (i
= 1; i
< n_ops
; i
++)
4567 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4568 mode
, result
, ops
[i
].op
);
4573 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4575 plus_minus_operand_p (const_rtx x
)
4577 return GET_CODE (x
) == PLUS
4578 || GET_CODE (x
) == MINUS
4579 || (GET_CODE (x
) == CONST
4580 && GET_CODE (XEXP (x
, 0)) == PLUS
4581 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4582 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4585 /* Like simplify_binary_operation except used for relational operators.
4586 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4587 not also be VOIDmode.
4589 CMP_MODE specifies in which mode the comparison is done in, so it is
4590 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4591 the operands or, if both are VOIDmode, the operands are compared in
4592 "infinite precision". */
4594 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4595 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4597 rtx tem
, trueop0
, trueop1
;
4599 if (cmp_mode
== VOIDmode
)
4600 cmp_mode
= GET_MODE (op0
);
4601 if (cmp_mode
== VOIDmode
)
4602 cmp_mode
= GET_MODE (op1
);
4604 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4607 if (SCALAR_FLOAT_MODE_P (mode
))
4609 if (tem
== const0_rtx
)
4610 return CONST0_RTX (mode
);
4611 #ifdef FLOAT_STORE_FLAG_VALUE
4613 REAL_VALUE_TYPE val
;
4614 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4615 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4621 if (VECTOR_MODE_P (mode
))
4623 if (tem
== const0_rtx
)
4624 return CONST0_RTX (mode
);
4625 #ifdef VECTOR_STORE_FLAG_VALUE
4630 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4631 if (val
== NULL_RTX
)
4633 if (val
== const1_rtx
)
4634 return CONST1_RTX (mode
);
4636 units
= GET_MODE_NUNITS (mode
);
4637 v
= rtvec_alloc (units
);
4638 for (i
= 0; i
< units
; i
++)
4639 RTVEC_ELT (v
, i
) = val
;
4640 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4650 /* For the following tests, ensure const0_rtx is op1. */
4651 if (swap_commutative_operands_p (op0
, op1
)
4652 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4653 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4655 /* If op0 is a compare, extract the comparison arguments from it. */
4656 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4657 return simplify_gen_relational (code
, mode
, VOIDmode
,
4658 XEXP (op0
, 0), XEXP (op0
, 1));
4660 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4664 trueop0
= avoid_constant_pool_reference (op0
);
4665 trueop1
= avoid_constant_pool_reference (op1
);
4666 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4670 /* This part of simplify_relational_operation is only used when CMP_MODE
4671 is not in class MODE_CC (i.e. it is a real comparison).
4673 MODE is the mode of the result, while CMP_MODE specifies in which
4674 mode the comparison is done in, so it is the mode of the operands. */
4677 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4678 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4680 enum rtx_code op0code
= GET_CODE (op0
);
4682 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4684 /* If op0 is a comparison, extract the comparison arguments
4688 if (GET_MODE (op0
) == mode
)
4689 return simplify_rtx (op0
);
4691 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4692 XEXP (op0
, 0), XEXP (op0
, 1));
4694 else if (code
== EQ
)
4696 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4697 if (new_code
!= UNKNOWN
)
4698 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4699 XEXP (op0
, 0), XEXP (op0
, 1));
4703 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4704 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4705 if ((code
== LTU
|| code
== GEU
)
4706 && GET_CODE (op0
) == PLUS
4707 && CONST_INT_P (XEXP (op0
, 1))
4708 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4709 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4710 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4711 && XEXP (op0
, 1) != const0_rtx
)
4714 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4715 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4716 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4719 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4720 if ((code
== LTU
|| code
== GEU
)
4721 && GET_CODE (op0
) == PLUS
4722 && rtx_equal_p (op1
, XEXP (op0
, 1))
4723 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4724 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4725 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4726 copy_rtx (XEXP (op0
, 0)));
4728 if (op1
== const0_rtx
)
4730 /* Canonicalize (GTU x 0) as (NE x 0). */
4732 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4733 /* Canonicalize (LEU x 0) as (EQ x 0). */
4735 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4737 else if (op1
== const1_rtx
)
4742 /* Canonicalize (GE x 1) as (GT x 0). */
4743 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4746 /* Canonicalize (GEU x 1) as (NE x 0). */
4747 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4750 /* Canonicalize (LT x 1) as (LE x 0). */
4751 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4754 /* Canonicalize (LTU x 1) as (EQ x 0). */
4755 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4761 else if (op1
== constm1_rtx
)
4763 /* Canonicalize (LE x -1) as (LT x 0). */
4765 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4766 /* Canonicalize (GT x -1) as (GE x 0). */
4768 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4771 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4772 if ((code
== EQ
|| code
== NE
)
4773 && (op0code
== PLUS
|| op0code
== MINUS
)
4775 && CONSTANT_P (XEXP (op0
, 1))
4776 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4778 rtx x
= XEXP (op0
, 0);
4779 rtx c
= XEXP (op0
, 1);
4780 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4781 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4783 /* Detect an infinite recursive condition, where we oscillate at this
4784 simplification case between:
4785 A + B == C <---> C - B == A,
4786 where A, B, and C are all constants with non-simplifiable expressions,
4787 usually SYMBOL_REFs. */
4788 if (GET_CODE (tem
) == invcode
4790 && rtx_equal_p (c
, XEXP (tem
, 1)))
4793 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4796 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4797 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4799 && op1
== const0_rtx
4800 && GET_MODE_CLASS (mode
) == MODE_INT
4801 && cmp_mode
!= VOIDmode
4802 /* ??? Work-around BImode bugs in the ia64 backend. */
4804 && cmp_mode
!= BImode
4805 && nonzero_bits (op0
, cmp_mode
) == 1
4806 && STORE_FLAG_VALUE
== 1)
4807 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4808 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4809 : lowpart_subreg (mode
, op0
, cmp_mode
);
4811 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4812 if ((code
== EQ
|| code
== NE
)
4813 && op1
== const0_rtx
4815 return simplify_gen_relational (code
, mode
, cmp_mode
,
4816 XEXP (op0
, 0), XEXP (op0
, 1));
4818 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4819 if ((code
== EQ
|| code
== NE
)
4821 && rtx_equal_p (XEXP (op0
, 0), op1
)
4822 && !side_effects_p (XEXP (op0
, 0)))
4823 return simplify_gen_relational (code
, mode
, cmp_mode
,
4824 XEXP (op0
, 1), const0_rtx
);
4826 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4827 if ((code
== EQ
|| code
== NE
)
4829 && rtx_equal_p (XEXP (op0
, 1), op1
)
4830 && !side_effects_p (XEXP (op0
, 1)))
4831 return simplify_gen_relational (code
, mode
, cmp_mode
,
4832 XEXP (op0
, 0), const0_rtx
);
4834 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4835 if ((code
== EQ
|| code
== NE
)
4837 && CONST_SCALAR_INT_P (op1
)
4838 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4839 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4840 simplify_gen_binary (XOR
, cmp_mode
,
4841 XEXP (op0
, 1), op1
));
4843 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4844 if ((code
== EQ
|| code
== NE
)
4845 && GET_CODE (op0
) == BSWAP
4846 && CONST_SCALAR_INT_P (op1
))
4847 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4848 simplify_gen_unary (BSWAP
, cmp_mode
,
4851 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4852 if ((code
== EQ
|| code
== NE
)
4853 && GET_CODE (op0
) == BSWAP
4854 && GET_CODE (op1
) == BSWAP
)
4855 return simplify_gen_relational (code
, mode
, cmp_mode
,
4856 XEXP (op0
, 0), XEXP (op1
, 0));
4858 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4864 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4865 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4866 XEXP (op0
, 0), const0_rtx
);
4871 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4872 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4873 XEXP (op0
, 0), const0_rtx
);
4892 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4893 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4894 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4895 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4896 For floating-point comparisons, assume that the operands were ordered. */
4899 comparison_result (enum rtx_code code
, int known_results
)
4905 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4908 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4912 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4915 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4919 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4922 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4925 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4927 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4930 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4932 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4935 return const_true_rtx
;
4943 /* Check if the given comparison (done in the given MODE) is actually a
4944 tautology or a contradiction.
4945 If no simplification is possible, this function returns zero.
4946 Otherwise, it returns either const_true_rtx or const0_rtx. */
4949 simplify_const_relational_operation (enum rtx_code code
,
4950 enum machine_mode mode
,
4957 gcc_assert (mode
!= VOIDmode
4958 || (GET_MODE (op0
) == VOIDmode
4959 && GET_MODE (op1
) == VOIDmode
));
4961 /* If op0 is a compare, extract the comparison arguments from it. */
4962 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4964 op1
= XEXP (op0
, 1);
4965 op0
= XEXP (op0
, 0);
4967 if (GET_MODE (op0
) != VOIDmode
)
4968 mode
= GET_MODE (op0
);
4969 else if (GET_MODE (op1
) != VOIDmode
)
4970 mode
= GET_MODE (op1
);
4975 /* We can't simplify MODE_CC values since we don't know what the
4976 actual comparison is. */
4977 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4980 /* Make sure the constant is second. */
4981 if (swap_commutative_operands_p (op0
, op1
))
4983 tem
= op0
, op0
= op1
, op1
= tem
;
4984 code
= swap_condition (code
);
4987 trueop0
= avoid_constant_pool_reference (op0
);
4988 trueop1
= avoid_constant_pool_reference (op1
);
4990 /* For integer comparisons of A and B maybe we can simplify A - B and can
4991 then simplify a comparison of that with zero. If A and B are both either
4992 a register or a CONST_INT, this can't help; testing for these cases will
4993 prevent infinite recursion here and speed things up.
4995 We can only do this for EQ and NE comparisons as otherwise we may
4996 lose or introduce overflow which we cannot disregard as undefined as
4997 we do not know the signedness of the operation on either the left or
4998 the right hand side of the comparison. */
5000 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5001 && (code
== EQ
|| code
== NE
)
5002 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5003 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5004 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
5005 /* We cannot do this if tem is a nonzero address. */
5006 && ! nonzero_address_p (tem
))
5007 return simplify_const_relational_operation (signed_condition (code
),
5008 mode
, tem
, const0_rtx
);
5010 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5011 return const_true_rtx
;
5013 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5016 /* For modes without NaNs, if the two operands are equal, we know the
5017 result except if they have side-effects. Even with NaNs we know
5018 the result of unordered comparisons and, if signaling NaNs are
5019 irrelevant, also the result of LT/GT/LTGT. */
5020 if ((! HONOR_NANS (GET_MODE (trueop0
))
5021 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5022 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5023 && ! HONOR_SNANS (GET_MODE (trueop0
))))
5024 && rtx_equal_p (trueop0
, trueop1
)
5025 && ! side_effects_p (trueop0
))
5026 return comparison_result (code
, CMP_EQ
);
5028 /* If the operands are floating-point constants, see if we can fold
5030 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5031 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5032 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5034 REAL_VALUE_TYPE d0
, d1
;
5036 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
5037 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
5039 /* Comparisons are unordered iff at least one of the values is NaN. */
5040 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
5050 return const_true_rtx
;
5063 return comparison_result (code
,
5064 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
5065 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
5068 /* Otherwise, see if the operands are both integers. */
5069 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5070 && (CONST_DOUBLE_AS_INT_P (trueop0
) || CONST_INT_P (trueop0
))
5071 && (CONST_DOUBLE_AS_INT_P (trueop1
) || CONST_INT_P (trueop1
)))
5073 int width
= GET_MODE_PRECISION (mode
);
5074 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
5075 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
5077 /* Get the two words comprising each integer constant. */
5078 if (CONST_DOUBLE_AS_INT_P (trueop0
))
5080 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
5081 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
5085 l0u
= l0s
= INTVAL (trueop0
);
5086 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
5089 if (CONST_DOUBLE_AS_INT_P (trueop1
))
5091 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
5092 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
5096 l1u
= l1s
= INTVAL (trueop1
);
5097 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
5100 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
5101 we have to sign or zero-extend the values. */
5102 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
5104 l0u
&= GET_MODE_MASK (mode
);
5105 l1u
&= GET_MODE_MASK (mode
);
5107 if (val_signbit_known_set_p (mode
, l0s
))
5108 l0s
|= ~GET_MODE_MASK (mode
);
5110 if (val_signbit_known_set_p (mode
, l1s
))
5111 l1s
|= ~GET_MODE_MASK (mode
);
5113 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
5114 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
5116 if (h0u
== h1u
&& l0u
== l1u
)
5117 return comparison_result (code
, CMP_EQ
);
5121 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
5122 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
5123 return comparison_result (code
, cr
);
5127 /* Optimize comparisons with upper and lower bounds. */
5128 if (HWI_COMPUTABLE_MODE_P (mode
)
5129 && CONST_INT_P (trueop1
))
5132 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
5133 HOST_WIDE_INT val
= INTVAL (trueop1
);
5134 HOST_WIDE_INT mmin
, mmax
;
5144 /* Get a reduced range if the sign bit is zero. */
5145 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
5152 rtx mmin_rtx
, mmax_rtx
;
5153 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
5155 mmin
= INTVAL (mmin_rtx
);
5156 mmax
= INTVAL (mmax_rtx
);
5159 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
5161 mmin
>>= (sign_copies
- 1);
5162 mmax
>>= (sign_copies
- 1);
5168 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5170 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5171 return const_true_rtx
;
5172 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5177 return const_true_rtx
;
5182 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5184 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5185 return const_true_rtx
;
5186 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5191 return const_true_rtx
;
5197 /* x == y is always false for y out of range. */
5198 if (val
< mmin
|| val
> mmax
)
5202 /* x > y is always false for y >= mmax, always true for y < mmin. */
5204 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5206 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5207 return const_true_rtx
;
5213 return const_true_rtx
;
5216 /* x < y is always false for y <= mmin, always true for y > mmax. */
5218 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5220 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5221 return const_true_rtx
;
5227 return const_true_rtx
;
5231 /* x != y is always true for y out of range. */
5232 if (val
< mmin
|| val
> mmax
)
5233 return const_true_rtx
;
5241 /* Optimize integer comparisons with zero. */
5242 if (trueop1
== const0_rtx
)
5244 /* Some addresses are known to be nonzero. We don't know
5245 their sign, but equality comparisons are known. */
5246 if (nonzero_address_p (trueop0
))
5248 if (code
== EQ
|| code
== LEU
)
5250 if (code
== NE
|| code
== GTU
)
5251 return const_true_rtx
;
5254 /* See if the first operand is an IOR with a constant. If so, we
5255 may be able to determine the result of this comparison. */
5256 if (GET_CODE (op0
) == IOR
)
5258 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5259 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5261 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5262 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5263 && (UINTVAL (inner_const
)
5264 & ((unsigned HOST_WIDE_INT
) 1
5274 return const_true_rtx
;
5278 return const_true_rtx
;
5292 /* Optimize comparison of ABS with zero. */
5293 if (trueop1
== CONST0_RTX (mode
)
5294 && (GET_CODE (trueop0
) == ABS
5295 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5296 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5301 /* Optimize abs(x) < 0.0. */
5302 if (!HONOR_SNANS (mode
)
5303 && (!INTEGRAL_MODE_P (mode
)
5304 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5306 if (INTEGRAL_MODE_P (mode
)
5307 && (issue_strict_overflow_warning
5308 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5309 warning (OPT_Wstrict_overflow
,
5310 ("assuming signed overflow does not occur when "
5311 "assuming abs (x) < 0 is false"));
5317 /* Optimize abs(x) >= 0.0. */
5318 if (!HONOR_NANS (mode
)
5319 && (!INTEGRAL_MODE_P (mode
)
5320 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5322 if (INTEGRAL_MODE_P (mode
)
5323 && (issue_strict_overflow_warning
5324 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5325 warning (OPT_Wstrict_overflow
,
5326 ("assuming signed overflow does not occur when "
5327 "assuming abs (x) >= 0 is true"));
5328 return const_true_rtx
;
5333 /* Optimize ! (abs(x) < 0.0). */
5334 return const_true_rtx
;
5344 /* Simplify CODE, an operation with result mode MODE and three operands,
5345 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5346 a constant. Return 0 if no simplifications is possible. */
5349 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
5350 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
5353 unsigned int width
= GET_MODE_PRECISION (mode
);
5354 bool any_change
= false;
5357 /* VOIDmode means "infinite" precision. */
5359 width
= HOST_BITS_PER_WIDE_INT
;
5364 /* Simplify negations around the multiplication. */
5365 /* -a * -b + c => a * b + c. */
5366 if (GET_CODE (op0
) == NEG
)
5368 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5370 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5372 else if (GET_CODE (op1
) == NEG
)
5374 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5376 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5379 /* Canonicalize the two multiplication operands. */
5380 /* a * -b + c => -b * a + c. */
5381 if (swap_commutative_operands_p (op0
, op1
))
5382 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
5385 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5390 if (CONST_INT_P (op0
)
5391 && CONST_INT_P (op1
)
5392 && CONST_INT_P (op2
)
5393 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5394 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5396 /* Extracting a bit-field from a constant */
5397 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5398 HOST_WIDE_INT op1val
= INTVAL (op1
);
5399 HOST_WIDE_INT op2val
= INTVAL (op2
);
5400 if (BITS_BIG_ENDIAN
)
5401 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5405 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5407 /* First zero-extend. */
5408 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5409 /* If desired, propagate sign bit. */
5410 if (code
== SIGN_EXTRACT
5411 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5413 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5416 return gen_int_mode (val
, mode
);
5421 if (CONST_INT_P (op0
))
5422 return op0
!= const0_rtx
? op1
: op2
;
5424 /* Convert c ? a : a into "a". */
5425 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5428 /* Convert a != b ? a : b into "a". */
5429 if (GET_CODE (op0
) == NE
5430 && ! side_effects_p (op0
)
5431 && ! HONOR_NANS (mode
)
5432 && ! HONOR_SIGNED_ZEROS (mode
)
5433 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5434 && rtx_equal_p (XEXP (op0
, 1), op2
))
5435 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5436 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5439 /* Convert a == b ? a : b into "b". */
5440 if (GET_CODE (op0
) == EQ
5441 && ! side_effects_p (op0
)
5442 && ! HONOR_NANS (mode
)
5443 && ! HONOR_SIGNED_ZEROS (mode
)
5444 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5445 && rtx_equal_p (XEXP (op0
, 1), op2
))
5446 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5447 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5450 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5452 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5453 ? GET_MODE (XEXP (op0
, 1))
5454 : GET_MODE (XEXP (op0
, 0)));
5457 /* Look for happy constants in op1 and op2. */
5458 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5460 HOST_WIDE_INT t
= INTVAL (op1
);
5461 HOST_WIDE_INT f
= INTVAL (op2
);
5463 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5464 code
= GET_CODE (op0
);
5465 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5468 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5476 return simplify_gen_relational (code
, mode
, cmp_mode
,
5477 XEXP (op0
, 0), XEXP (op0
, 1));
5480 if (cmp_mode
== VOIDmode
)
5481 cmp_mode
= op0_mode
;
5482 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5483 cmp_mode
, XEXP (op0
, 0),
5486 /* See if any simplifications were possible. */
5489 if (CONST_INT_P (temp
))
5490 return temp
== const0_rtx
? op2
: op1
;
5492 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5498 gcc_assert (GET_MODE (op0
) == mode
);
5499 gcc_assert (GET_MODE (op1
) == mode
);
5500 gcc_assert (VECTOR_MODE_P (mode
));
5501 trueop2
= avoid_constant_pool_reference (op2
);
5502 if (CONST_INT_P (trueop2
))
5504 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5505 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5506 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5507 unsigned HOST_WIDE_INT mask
;
5508 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5511 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5513 if (!(sel
& mask
) && !side_effects_p (op0
))
5515 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5518 rtx trueop0
= avoid_constant_pool_reference (op0
);
5519 rtx trueop1
= avoid_constant_pool_reference (op1
);
5520 if (GET_CODE (trueop0
) == CONST_VECTOR
5521 && GET_CODE (trueop1
) == CONST_VECTOR
)
5523 rtvec v
= rtvec_alloc (n_elts
);
5526 for (i
= 0; i
< n_elts
; i
++)
5527 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5528 ? CONST_VECTOR_ELT (trueop0
, i
)
5529 : CONST_VECTOR_ELT (trueop1
, i
));
5530 return gen_rtx_CONST_VECTOR (mode
, v
);
5533 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5534 if no element from a appears in the result. */
5535 if (GET_CODE (op0
) == VEC_MERGE
)
5537 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5538 if (CONST_INT_P (tem
))
5540 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5541 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5542 return simplify_gen_ternary (code
, mode
, mode
,
5543 XEXP (op0
, 1), op1
, op2
);
5544 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5545 return simplify_gen_ternary (code
, mode
, mode
,
5546 XEXP (op0
, 0), op1
, op2
);
5549 if (GET_CODE (op1
) == VEC_MERGE
)
5551 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5552 if (CONST_INT_P (tem
))
5554 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5555 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5556 return simplify_gen_ternary (code
, mode
, mode
,
5557 op0
, XEXP (op1
, 1), op2
);
5558 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5559 return simplify_gen_ternary (code
, mode
, mode
,
5560 op0
, XEXP (op1
, 0), op2
);
5565 if (rtx_equal_p (op0
, op1
)
5566 && !side_effects_p (op2
) && !side_effects_p (op1
))
5578 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5580 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5582 Works by unpacking OP into a collection of 8-bit values
5583 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5584 and then repacking them again for OUTERMODE. */
5587 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5588 enum machine_mode innermode
, unsigned int byte
)
5590 /* We support up to 512-bit values (for V8DFmode). */
5594 value_mask
= (1 << value_bit
) - 1
5596 unsigned char value
[max_bitsize
/ value_bit
];
5605 rtvec result_v
= NULL
;
5606 enum mode_class outer_class
;
5607 enum machine_mode outer_submode
;
5609 /* Some ports misuse CCmode. */
5610 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5613 /* We have no way to represent a complex constant at the rtl level. */
5614 if (COMPLEX_MODE_P (outermode
))
5617 /* Unpack the value. */
5619 if (GET_CODE (op
) == CONST_VECTOR
)
5621 num_elem
= CONST_VECTOR_NUNITS (op
);
5622 elems
= &CONST_VECTOR_ELT (op
, 0);
5623 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5629 elem_bitsize
= max_bitsize
;
5631 /* If this asserts, it is too complicated; reducing value_bit may help. */
5632 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5633 /* I don't know how to handle endianness of sub-units. */
5634 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5636 for (elem
= 0; elem
< num_elem
; elem
++)
5639 rtx el
= elems
[elem
];
5641 /* Vectors are kept in target memory order. (This is probably
5644 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5645 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5647 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5648 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5649 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5650 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5651 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5654 switch (GET_CODE (el
))
5658 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5660 *vp
++ = INTVAL (el
) >> i
;
5661 /* CONST_INTs are always logically sign-extended. */
5662 for (; i
< elem_bitsize
; i
+= value_bit
)
5663 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5667 if (GET_MODE (el
) == VOIDmode
)
5669 unsigned char extend
= 0;
5670 /* If this triggers, someone should have generated a
5671 CONST_INT instead. */
5672 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5674 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5675 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5676 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5679 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5683 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5685 for (; i
< elem_bitsize
; i
+= value_bit
)
5690 long tmp
[max_bitsize
/ 32];
5691 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5693 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5694 gcc_assert (bitsize
<= elem_bitsize
);
5695 gcc_assert (bitsize
% value_bit
== 0);
5697 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5700 /* real_to_target produces its result in words affected by
5701 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5702 and use WORDS_BIG_ENDIAN instead; see the documentation
5703 of SUBREG in rtl.texi. */
5704 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5707 if (WORDS_BIG_ENDIAN
)
5708 ibase
= bitsize
- 1 - i
;
5711 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5714 /* It shouldn't matter what's done here, so fill it with
5716 for (; i
< elem_bitsize
; i
+= value_bit
)
5722 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5724 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5725 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5729 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5730 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5731 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5733 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5734 >> (i
- HOST_BITS_PER_WIDE_INT
);
5735 for (; i
< elem_bitsize
; i
+= value_bit
)
5745 /* Now, pick the right byte to start with. */
5746 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5747 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5748 will already have offset 0. */
5749 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5751 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5753 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5754 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5755 byte
= (subword_byte
% UNITS_PER_WORD
5756 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5759 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5760 so if it's become negative it will instead be very large.) */
5761 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5763 /* Convert from bytes to chunks of size value_bit. */
5764 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5766 /* Re-pack the value. */
5768 if (VECTOR_MODE_P (outermode
))
5770 num_elem
= GET_MODE_NUNITS (outermode
);
5771 result_v
= rtvec_alloc (num_elem
);
5772 elems
= &RTVEC_ELT (result_v
, 0);
5773 outer_submode
= GET_MODE_INNER (outermode
);
5779 outer_submode
= outermode
;
5782 outer_class
= GET_MODE_CLASS (outer_submode
);
5783 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5785 gcc_assert (elem_bitsize
% value_bit
== 0);
5786 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5788 for (elem
= 0; elem
< num_elem
; elem
++)
5792 /* Vectors are stored in target memory order. (This is probably
5795 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5796 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5798 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5799 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5800 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5801 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5802 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5805 switch (outer_class
)
5808 case MODE_PARTIAL_INT
:
5810 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5813 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5815 lo
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5816 for (; i
< elem_bitsize
; i
+= value_bit
)
5817 hi
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5818 << (i
- HOST_BITS_PER_WIDE_INT
);
5820 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5822 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5823 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5824 else if (elem_bitsize
<= HOST_BITS_PER_DOUBLE_INT
)
5825 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5832 case MODE_DECIMAL_FLOAT
:
5835 long tmp
[max_bitsize
/ 32];
5837 /* real_from_target wants its input in words affected by
5838 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5839 and use WORDS_BIG_ENDIAN instead; see the documentation
5840 of SUBREG in rtl.texi. */
5841 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5843 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5846 if (WORDS_BIG_ENDIAN
)
5847 ibase
= elem_bitsize
- 1 - i
;
5850 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5853 real_from_target (&r
, tmp
, outer_submode
);
5854 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5866 f
.mode
= outer_submode
;
5869 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5871 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5872 for (; i
< elem_bitsize
; i
+= value_bit
)
5873 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5874 << (i
- HOST_BITS_PER_WIDE_INT
));
5876 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5884 if (VECTOR_MODE_P (outermode
))
5885 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5890 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5891 Return 0 if no simplifications are possible. */
5893 simplify_subreg (enum machine_mode outermode
, rtx op
,
5894 enum machine_mode innermode
, unsigned int byte
)
5896 /* Little bit of sanity checking. */
5897 gcc_assert (innermode
!= VOIDmode
);
5898 gcc_assert (outermode
!= VOIDmode
);
5899 gcc_assert (innermode
!= BLKmode
);
5900 gcc_assert (outermode
!= BLKmode
);
5902 gcc_assert (GET_MODE (op
) == innermode
5903 || GET_MODE (op
) == VOIDmode
);
5905 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5908 if (byte
>= GET_MODE_SIZE (innermode
))
5911 if (outermode
== innermode
&& !byte
)
5914 if (CONST_SCALAR_INT_P (op
)
5915 || CONST_DOUBLE_AS_FLOAT_P (op
)
5916 || GET_CODE (op
) == CONST_FIXED
5917 || GET_CODE (op
) == CONST_VECTOR
)
5918 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5920 /* Changing mode twice with SUBREG => just change it once,
5921 or not at all if changing back op starting mode. */
5922 if (GET_CODE (op
) == SUBREG
)
5924 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5925 int final_offset
= byte
+ SUBREG_BYTE (op
);
5928 if (outermode
== innermostmode
5929 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5930 return SUBREG_REG (op
);
5932 /* The SUBREG_BYTE represents offset, as if the value were stored
5933 in memory. Irritating exception is paradoxical subreg, where
5934 we define SUBREG_BYTE to be 0. On big endian machines, this
5935 value should be negative. For a moment, undo this exception. */
5936 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5938 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5939 if (WORDS_BIG_ENDIAN
)
5940 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5941 if (BYTES_BIG_ENDIAN
)
5942 final_offset
+= difference
% UNITS_PER_WORD
;
5944 if (SUBREG_BYTE (op
) == 0
5945 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5947 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5948 if (WORDS_BIG_ENDIAN
)
5949 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5950 if (BYTES_BIG_ENDIAN
)
5951 final_offset
+= difference
% UNITS_PER_WORD
;
5954 /* See whether resulting subreg will be paradoxical. */
5955 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5957 /* In nonparadoxical subregs we can't handle negative offsets. */
5958 if (final_offset
< 0)
5960 /* Bail out in case resulting subreg would be incorrect. */
5961 if (final_offset
% GET_MODE_SIZE (outermode
)
5962 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5968 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5970 /* In paradoxical subreg, see if we are still looking on lower part.
5971 If so, our SUBREG_BYTE will be 0. */
5972 if (WORDS_BIG_ENDIAN
)
5973 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5974 if (BYTES_BIG_ENDIAN
)
5975 offset
+= difference
% UNITS_PER_WORD
;
5976 if (offset
== final_offset
)
5982 /* Recurse for further possible simplifications. */
5983 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5987 if (validate_subreg (outermode
, innermostmode
,
5988 SUBREG_REG (op
), final_offset
))
5990 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5991 if (SUBREG_PROMOTED_VAR_P (op
)
5992 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5993 && GET_MODE_CLASS (outermode
) == MODE_INT
5994 && IN_RANGE (GET_MODE_SIZE (outermode
),
5995 GET_MODE_SIZE (innermode
),
5996 GET_MODE_SIZE (innermostmode
))
5997 && subreg_lowpart_p (newx
))
5999 SUBREG_PROMOTED_VAR_P (newx
) = 1;
6000 SUBREG_PROMOTED_UNSIGNED_SET
6001 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
6008 /* SUBREG of a hard register => just change the register number
6009 and/or mode. If the hard register is not valid in that mode,
6010 suppress this simplification. If the hard register is the stack,
6011 frame, or argument pointer, leave this as a SUBREG. */
6013 if (REG_P (op
) && HARD_REGISTER_P (op
))
6015 unsigned int regno
, final_regno
;
6018 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6019 if (HARD_REGISTER_NUM_P (final_regno
))
6022 int final_offset
= byte
;
6024 /* Adjust offset for paradoxical subregs. */
6026 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
6028 int difference
= (GET_MODE_SIZE (innermode
)
6029 - GET_MODE_SIZE (outermode
));
6030 if (WORDS_BIG_ENDIAN
)
6031 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6032 if (BYTES_BIG_ENDIAN
)
6033 final_offset
+= difference
% UNITS_PER_WORD
;
6036 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
6038 /* Propagate original regno. We don't have any way to specify
6039 the offset inside original regno, so do so only for lowpart.
6040 The information is used only by alias analysis that can not
6041 grog partial register anyway. */
6043 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
6044 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6049 /* If we have a SUBREG of a register that we are replacing and we are
6050 replacing it with a MEM, make a new MEM and try replacing the
6051 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6052 or if we would be widening it. */
6055 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6056 /* Allow splitting of volatile memory references in case we don't
6057 have instruction to move the whole thing. */
6058 && (! MEM_VOLATILE_P (op
)
6059 || ! have_insn_for (SET
, innermode
))
6060 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
6061 return adjust_address_nv (op
, outermode
, byte
);
6063 /* Handle complex values represented as CONCAT
6064 of real and imaginary part. */
6065 if (GET_CODE (op
) == CONCAT
)
6067 unsigned int part_size
, final_offset
;
6070 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
6071 if (byte
< part_size
)
6073 part
= XEXP (op
, 0);
6074 final_offset
= byte
;
6078 part
= XEXP (op
, 1);
6079 final_offset
= byte
- part_size
;
6082 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
6085 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
6088 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
6089 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6093 /* A SUBREG resulting from a zero extension may fold to zero if
6094 it extracts higher bits that the ZERO_EXTEND's source bits. */
6095 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6097 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6098 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
6099 return CONST0_RTX (outermode
);
6102 if (SCALAR_INT_MODE_P (outermode
)
6103 && SCALAR_INT_MODE_P (innermode
)
6104 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
6105 && byte
== subreg_lowpart_offset (outermode
, innermode
))
6107 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
6115 /* Make a SUBREG operation or equivalent if it folds. */
6118 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
6119 enum machine_mode innermode
, unsigned int byte
)
6123 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6127 if (GET_CODE (op
) == SUBREG
6128 || GET_CODE (op
) == CONCAT
6129 || GET_MODE (op
) == VOIDmode
)
6132 if (validate_subreg (outermode
, innermode
, op
, byte
))
6133 return gen_rtx_SUBREG (outermode
, op
, byte
);
6138 /* Simplify X, an rtx expression.
6140 Return the simplified expression or NULL if no simplifications
6143 This is the preferred entry point into the simplification routines;
6144 however, we still allow passes to call the more specific routines.
6146 Right now GCC has three (yes, three) major bodies of RTL simplification
6147 code that need to be unified.
6149 1. fold_rtx in cse.c. This code uses various CSE specific
6150 information to aid in RTL simplification.
6152 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6153 it uses combine specific information to aid in RTL
6156 3. The routines in this file.
6159 Long term we want to only have one body of simplification code; to
6160 get to that state I recommend the following steps:
6162 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6163 which are not pass dependent state into these routines.
6165 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6166 use this routine whenever possible.
6168 3. Allow for pass dependent state to be provided to these
6169 routines and add simplifications based on the pass dependent
6170 state. Remove code from cse.c & combine.c that becomes
6173 It will take time, but ultimately the compiler will be easier to
6174 maintain and improve. It's totally silly that when we add a
6175 simplification that it needs to be added to 4 places (3 for RTL
6176 simplification and 1 for tree simplification. */
6179 simplify_rtx (const_rtx x
)
6181 const enum rtx_code code
= GET_CODE (x
);
6182 const enum machine_mode mode
= GET_MODE (x
);
6184 switch (GET_RTX_CLASS (code
))
6187 return simplify_unary_operation (code
, mode
,
6188 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6189 case RTX_COMM_ARITH
:
6190 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6191 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6193 /* Fall through.... */
6196 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6199 case RTX_BITFIELD_OPS
:
6200 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6201 XEXP (x
, 0), XEXP (x
, 1),
6205 case RTX_COMM_COMPARE
:
6206 return simplify_relational_operation (code
, mode
,
6207 ((GET_MODE (XEXP (x
, 0))
6209 ? GET_MODE (XEXP (x
, 0))
6210 : GET_MODE (XEXP (x
, 1))),
6216 return simplify_subreg (mode
, SUBREG_REG (x
),
6217 GET_MODE (SUBREG_REG (x
)),
6224 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6225 if (GET_CODE (XEXP (x
, 0)) == HIGH
6226 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))