1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
36 #include "diagnostic-core.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
49 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
50 static bool plus_minus_operand_p (const_rtx
);
51 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
52 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
53 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
55 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
57 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
58 enum machine_mode
, rtx
, rtx
);
59 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
60 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
63 /* Negate a CONST_INT rtx, truncating (because a conversion from a
64 maximally negative number can overflow). */
66 neg_const_int (enum machine_mode mode
, const_rtx i
)
68 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
71 /* Test whether expression, X, is an immediate constant that represents
72 the most significant bit of machine mode MODE. */
75 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
77 unsigned HOST_WIDE_INT val
;
80 if (GET_MODE_CLASS (mode
) != MODE_INT
)
83 width
= GET_MODE_PRECISION (mode
);
87 if (width
<= HOST_BITS_PER_WIDE_INT
90 else if (width
<= HOST_BITS_PER_DOUBLE_INT
91 && CONST_DOUBLE_AS_INT_P (x
)
92 && CONST_DOUBLE_LOW (x
) == 0)
94 val
= CONST_DOUBLE_HIGH (x
);
95 width
-= HOST_BITS_PER_WIDE_INT
;
98 /* FIXME: We don't yet have a representation for wider modes. */
101 if (width
< HOST_BITS_PER_WIDE_INT
)
102 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
103 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
106 /* Test whether VAL is equal to the most significant bit of mode MODE
107 (after masking with the mode mask of MODE). Returns false if the
108 precision of MODE is too large to handle. */
111 val_signbit_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
115 if (GET_MODE_CLASS (mode
) != MODE_INT
)
118 width
= GET_MODE_PRECISION (mode
);
119 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
122 val
&= GET_MODE_MASK (mode
);
123 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
126 /* Test whether the most significant bit of mode MODE is set in VAL.
127 Returns false if the precision of MODE is too large to handle. */
129 val_signbit_known_set_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
133 if (GET_MODE_CLASS (mode
) != MODE_INT
)
136 width
= GET_MODE_PRECISION (mode
);
137 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
140 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
144 /* Test whether the most significant bit of mode MODE is clear in VAL.
145 Returns false if the precision of MODE is too large to handle. */
147 val_signbit_known_clear_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
151 if (GET_MODE_CLASS (mode
) != MODE_INT
)
154 width
= GET_MODE_PRECISION (mode
);
155 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
158 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
162 /* Make a binary operation by properly ordering the operands and
163 seeing if the expression folds. */
166 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
171 /* If this simplifies, do it. */
172 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
176 /* Put complex operands first and constants second if commutative. */
177 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
178 && swap_commutative_operands_p (op0
, op1
))
179 tem
= op0
, op0
= op1
, op1
= tem
;
181 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
184 /* If X is a MEM referencing the constant pool, return the real value.
185 Otherwise return X. */
187 avoid_constant_pool_reference (rtx x
)
190 enum machine_mode cmode
;
191 HOST_WIDE_INT offset
= 0;
193 switch (GET_CODE (x
))
199 /* Handle float extensions of constant pool references. */
201 c
= avoid_constant_pool_reference (tmp
);
202 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
206 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
207 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
215 if (GET_MODE (x
) == BLKmode
)
220 /* Call target hook to avoid the effects of -fpic etc.... */
221 addr
= targetm
.delegitimize_address (addr
);
223 /* Split the address into a base and integer offset. */
224 if (GET_CODE (addr
) == CONST
225 && GET_CODE (XEXP (addr
, 0)) == PLUS
226 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
228 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
229 addr
= XEXP (XEXP (addr
, 0), 0);
232 if (GET_CODE (addr
) == LO_SUM
)
233 addr
= XEXP (addr
, 1);
235 /* If this is a constant pool reference, we can turn it into its
236 constant and hope that simplifications happen. */
237 if (GET_CODE (addr
) == SYMBOL_REF
238 && CONSTANT_POOL_ADDRESS_P (addr
))
240 c
= get_pool_constant (addr
);
241 cmode
= get_pool_mode (addr
);
243 /* If we're accessing the constant in a different mode than it was
244 originally stored, attempt to fix that up via subreg simplifications.
245 If that fails we have no choice but to return the original memory. */
246 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
247 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
249 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
250 if (tem
&& CONSTANT_P (tem
))
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
265 delegitimize_mem_from_attrs (rtx x
)
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
271 && MEM_OFFSET_KNOWN_P (x
))
273 tree decl
= MEM_EXPR (x
);
274 enum machine_mode mode
= GET_MODE (x
);
275 HOST_WIDE_INT offset
= 0;
277 switch (TREE_CODE (decl
))
287 case ARRAY_RANGE_REF
:
292 case VIEW_CONVERT_EXPR
:
294 HOST_WIDE_INT bitsize
, bitpos
;
296 int unsignedp
, volatilep
= 0;
298 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
299 &mode
, &unsignedp
, &volatilep
, false);
300 if (bitsize
!= GET_MODE_BITSIZE (mode
)
301 || (bitpos
% BITS_PER_UNIT
)
302 || (toffset
&& !tree_fits_shwi_p (toffset
)))
306 offset
+= bitpos
/ BITS_PER_UNIT
;
308 offset
+= tree_to_shwi (toffset
);
315 && mode
== GET_MODE (x
)
316 && TREE_CODE (decl
) == VAR_DECL
317 && (TREE_STATIC (decl
)
318 || DECL_THREAD_LOCAL_P (decl
))
319 && DECL_RTL_SET_P (decl
)
320 && MEM_P (DECL_RTL (decl
)))
324 offset
+= MEM_OFFSET (x
);
326 newx
= DECL_RTL (decl
);
330 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
339 || (GET_CODE (o
) == PLUS
340 && GET_CODE (XEXP (o
, 1)) == CONST_INT
341 && (offset
== INTVAL (XEXP (o
, 1))
342 || (GET_CODE (n
) == PLUS
343 && GET_CODE (XEXP (n
, 1)) == CONST_INT
344 && (INTVAL (XEXP (n
, 1)) + offset
345 == INTVAL (XEXP (o
, 1)))
346 && (n
= XEXP (n
, 0))))
347 && (o
= XEXP (o
, 0))))
348 && rtx_equal_p (o
, n
)))
349 x
= adjust_address_nv (newx
, mode
, offset
);
351 else if (GET_MODE (x
) == GET_MODE (newx
)
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
364 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
365 enum machine_mode op_mode
)
369 /* If this simplifies, use it. */
370 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
373 return gen_rtx_fmt_e (code
, mode
, op
);
376 /* Likewise for ternary operations. */
379 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
380 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
384 /* If this simplifies, use it. */
385 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
389 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
396 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
397 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
401 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
405 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
414 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
415 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
417 enum rtx_code code
= GET_CODE (x
);
418 enum machine_mode mode
= GET_MODE (x
);
419 enum machine_mode op_mode
;
421 rtx op0
, op1
, op2
, newx
, op
;
425 if (__builtin_expect (fn
!= NULL
, 0))
427 newx
= fn (x
, old_rtx
, data
);
431 else if (rtx_equal_p (x
, old_rtx
))
432 return copy_rtx ((rtx
) data
);
434 switch (GET_RTX_CLASS (code
))
438 op_mode
= GET_MODE (op0
);
439 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
440 if (op0
== XEXP (x
, 0))
442 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
446 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
447 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
448 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
450 return simplify_gen_binary (code
, mode
, op0
, op1
);
453 case RTX_COMM_COMPARE
:
456 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
457 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
458 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
459 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
461 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
464 case RTX_BITFIELD_OPS
:
466 op_mode
= GET_MODE (op0
);
467 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
468 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
469 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
470 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
472 if (op_mode
== VOIDmode
)
473 op_mode
= GET_MODE (op0
);
474 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
479 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
480 if (op0
== SUBREG_REG (x
))
482 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
483 GET_MODE (SUBREG_REG (x
)),
485 return op0
? op0
: x
;
492 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
493 if (op0
== XEXP (x
, 0))
495 return replace_equiv_address_nv (x
, op0
);
497 else if (code
== LO_SUM
)
499 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
500 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
506 /* (lo_sum (high x) (const (plus x ofs))) -> (const (plus x ofs)) */
507 if (GET_CODE (op0
) == HIGH
&& GET_CODE (op1
) == CONST
508 && GET_CODE(XEXP (op1
, 0)) == PLUS
509 && rtx_equal_p (XEXP (XEXP (op1
, 0), 0), XEXP (op0
, 0)))
512 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
514 return gen_rtx_LO_SUM (mode
, op0
, op1
);
523 fmt
= GET_RTX_FORMAT (code
);
524 for (i
= 0; fmt
[i
]; i
++)
529 newvec
= XVEC (newx
, i
);
530 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
532 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
534 if (op
!= RTVEC_ELT (vec
, j
))
538 newvec
= shallow_copy_rtvec (vec
);
540 newx
= shallow_copy_rtx (x
);
541 XVEC (newx
, i
) = newvec
;
543 RTVEC_ELT (newvec
, j
) = op
;
551 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
552 if (op
!= XEXP (x
, i
))
555 newx
= shallow_copy_rtx (x
);
564 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
565 resulting RTX. Return a new RTX which is as simplified as possible. */
568 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
570 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
573 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
574 Only handle cases where the truncated value is inherently an rvalue.
576 RTL provides two ways of truncating a value:
578 1. a lowpart subreg. This form is only a truncation when both
579 the outer and inner modes (here MODE and OP_MODE respectively)
580 are scalar integers, and only then when the subreg is used as
583 It is only valid to form such truncating subregs if the
584 truncation requires no action by the target. The onus for
585 proving this is on the creator of the subreg -- e.g. the
586 caller to simplify_subreg or simplify_gen_subreg -- and typically
587 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
589 2. a TRUNCATE. This form handles both scalar and compound integers.
591 The first form is preferred where valid. However, the TRUNCATE
592 handling in simplify_unary_operation turns the second form into the
593 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
594 so it is generally safe to form rvalue truncations using:
596 simplify_gen_unary (TRUNCATE, ...)
598 and leave simplify_unary_operation to work out which representation
601 Because of the proof requirements on (1), simplify_truncation must
602 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
603 regardless of whether the outer truncation came from a SUBREG or a
604 TRUNCATE. For example, if the caller has proven that an SImode
609 is a no-op and can be represented as a subreg, it does not follow
610 that SImode truncations of X and Y are also no-ops. On a target
611 like 64-bit MIPS that requires SImode values to be stored in
612 sign-extended form, an SImode truncation of:
614 (and:DI (reg:DI X) (const_int 63))
616 is trivially a no-op because only the lower 6 bits can be set.
617 However, X is still an arbitrary 64-bit number and so we cannot
618 assume that truncating it too is a no-op. */
621 simplify_truncation (enum machine_mode mode
, rtx op
,
622 enum machine_mode op_mode
)
624 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
625 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
626 gcc_assert (precision
<= op_precision
);
628 /* Optimize truncations of zero and sign extended values. */
629 if (GET_CODE (op
) == ZERO_EXTEND
630 || GET_CODE (op
) == SIGN_EXTEND
)
632 /* There are three possibilities. If MODE is the same as the
633 origmode, we can omit both the extension and the subreg.
634 If MODE is not larger than the origmode, we can apply the
635 truncation without the extension. Finally, if the outermode
636 is larger than the origmode, we can just extend to the appropriate
638 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
639 if (mode
== origmode
)
641 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
642 return simplify_gen_unary (TRUNCATE
, mode
,
643 XEXP (op
, 0), origmode
);
645 return simplify_gen_unary (GET_CODE (op
), mode
,
646 XEXP (op
, 0), origmode
);
649 /* If the machine can perform operations in the truncated mode, distribute
650 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
651 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
653 #ifdef WORD_REGISTER_OPERATIONS
654 && precision
>= BITS_PER_WORD
656 && (GET_CODE (op
) == PLUS
657 || GET_CODE (op
) == MINUS
658 || GET_CODE (op
) == MULT
))
660 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
663 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
665 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
669 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
670 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
671 the outer subreg is effectively a truncation to the original mode. */
672 if ((GET_CODE (op
) == LSHIFTRT
673 || GET_CODE (op
) == ASHIFTRT
)
674 /* Ensure that OP_MODE is at least twice as wide as MODE
675 to avoid the possibility that an outer LSHIFTRT shifts by more
676 than the sign extension's sign_bit_copies and introduces zeros
677 into the high bits of the result. */
678 && 2 * precision
<= op_precision
679 && CONST_INT_P (XEXP (op
, 1))
680 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
681 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
682 && UINTVAL (XEXP (op
, 1)) < precision
)
683 return simplify_gen_binary (ASHIFTRT
, mode
,
684 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
686 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
687 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
688 the outer subreg is effectively a truncation to the original mode. */
689 if ((GET_CODE (op
) == LSHIFTRT
690 || GET_CODE (op
) == ASHIFTRT
)
691 && CONST_INT_P (XEXP (op
, 1))
692 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
693 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
694 && UINTVAL (XEXP (op
, 1)) < precision
)
695 return simplify_gen_binary (LSHIFTRT
, mode
,
696 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
698 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
699 to (ashift:QI (x:QI) C), where C is a suitable small constant and
700 the outer subreg is effectively a truncation to the original mode. */
701 if (GET_CODE (op
) == ASHIFT
702 && CONST_INT_P (XEXP (op
, 1))
703 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
704 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
705 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
706 && UINTVAL (XEXP (op
, 1)) < precision
)
707 return simplify_gen_binary (ASHIFT
, mode
,
708 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
710 /* Recognize a word extraction from a multi-word subreg. */
711 if ((GET_CODE (op
) == LSHIFTRT
712 || GET_CODE (op
) == ASHIFTRT
)
713 && SCALAR_INT_MODE_P (mode
)
714 && SCALAR_INT_MODE_P (op_mode
)
715 && precision
>= BITS_PER_WORD
716 && 2 * precision
<= op_precision
717 && CONST_INT_P (XEXP (op
, 1))
718 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
719 && UINTVAL (XEXP (op
, 1)) < op_precision
)
721 int byte
= subreg_lowpart_offset (mode
, op_mode
);
722 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
723 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
725 ? byte
- shifted_bytes
726 : byte
+ shifted_bytes
));
729 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
730 and try replacing the TRUNCATE and shift with it. Don't do this
731 if the MEM has a mode-dependent address. */
732 if ((GET_CODE (op
) == LSHIFTRT
733 || GET_CODE (op
) == ASHIFTRT
)
734 && SCALAR_INT_MODE_P (op_mode
)
735 && MEM_P (XEXP (op
, 0))
736 && CONST_INT_P (XEXP (op
, 1))
737 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
738 && INTVAL (XEXP (op
, 1)) > 0
739 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
740 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
741 MEM_ADDR_SPACE (XEXP (op
, 0)))
742 && ! MEM_VOLATILE_P (XEXP (op
, 0))
743 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
744 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
746 int byte
= subreg_lowpart_offset (mode
, op_mode
);
747 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
748 return adjust_address_nv (XEXP (op
, 0), mode
,
750 ? byte
- shifted_bytes
751 : byte
+ shifted_bytes
));
754 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
755 (OP:SI foo:SI) if OP is NEG or ABS. */
756 if ((GET_CODE (op
) == ABS
757 || GET_CODE (op
) == NEG
)
758 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
759 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
760 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
761 return simplify_gen_unary (GET_CODE (op
), mode
,
762 XEXP (XEXP (op
, 0), 0), mode
);
764 /* (truncate:A (subreg:B (truncate:C X) 0)) is
766 if (GET_CODE (op
) == SUBREG
767 && SCALAR_INT_MODE_P (mode
)
768 && SCALAR_INT_MODE_P (op_mode
)
769 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
770 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
771 && subreg_lowpart_p (op
))
773 rtx inner
= XEXP (SUBREG_REG (op
), 0);
774 if (GET_MODE_PRECISION (mode
)
775 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
776 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
778 /* If subreg above is paradoxical and C is narrower
779 than A, return (subreg:A (truncate:C X) 0). */
780 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
781 GET_MODE (SUBREG_REG (op
)), 0);
784 /* (truncate:A (truncate:B X)) is (truncate:A X). */
785 if (GET_CODE (op
) == TRUNCATE
)
786 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
787 GET_MODE (XEXP (op
, 0)));
792 /* Try to simplify a unary operation CODE whose output mode is to be
793 MODE with input operand OP whose mode was originally OP_MODE.
794 Return zero if no simplification can be made. */
796 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
797 rtx op
, enum machine_mode op_mode
)
801 trueop
= avoid_constant_pool_reference (op
);
803 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
807 return simplify_unary_operation_1 (code
, mode
, op
);
810 /* Perform some simplifications we can do even if the operands
813 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
815 enum rtx_code reversed
;
821 /* (not (not X)) == X. */
822 if (GET_CODE (op
) == NOT
)
825 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
826 comparison is all ones. */
827 if (COMPARISON_P (op
)
828 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
829 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
830 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
831 XEXP (op
, 0), XEXP (op
, 1));
833 /* (not (plus X -1)) can become (neg X). */
834 if (GET_CODE (op
) == PLUS
835 && XEXP (op
, 1) == constm1_rtx
)
836 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
838 /* Similarly, (not (neg X)) is (plus X -1). */
839 if (GET_CODE (op
) == NEG
)
840 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
843 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
844 if (GET_CODE (op
) == XOR
845 && CONST_INT_P (XEXP (op
, 1))
846 && (temp
= simplify_unary_operation (NOT
, mode
,
847 XEXP (op
, 1), mode
)) != 0)
848 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
850 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
851 if (GET_CODE (op
) == PLUS
852 && CONST_INT_P (XEXP (op
, 1))
853 && mode_signbit_p (mode
, XEXP (op
, 1))
854 && (temp
= simplify_unary_operation (NOT
, mode
,
855 XEXP (op
, 1), mode
)) != 0)
856 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
859 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
860 operands other than 1, but that is not valid. We could do a
861 similar simplification for (not (lshiftrt C X)) where C is
862 just the sign bit, but this doesn't seem common enough to
864 if (GET_CODE (op
) == ASHIFT
865 && XEXP (op
, 0) == const1_rtx
)
867 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
868 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
871 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
872 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
873 so we can perform the above simplification. */
874 if (STORE_FLAG_VALUE
== -1
875 && GET_CODE (op
) == ASHIFTRT
876 && GET_CODE (XEXP (op
, 1))
877 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
878 return simplify_gen_relational (GE
, mode
, VOIDmode
,
879 XEXP (op
, 0), const0_rtx
);
882 if (GET_CODE (op
) == SUBREG
883 && subreg_lowpart_p (op
)
884 && (GET_MODE_SIZE (GET_MODE (op
))
885 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
886 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
887 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
889 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
892 x
= gen_rtx_ROTATE (inner_mode
,
893 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
895 XEXP (SUBREG_REG (op
), 1));
896 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
901 /* Apply De Morgan's laws to reduce number of patterns for machines
902 with negating logical insns (and-not, nand, etc.). If result has
903 only one NOT, put it first, since that is how the patterns are
905 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
907 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
908 enum machine_mode op_mode
;
910 op_mode
= GET_MODE (in1
);
911 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
913 op_mode
= GET_MODE (in2
);
914 if (op_mode
== VOIDmode
)
916 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
918 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
921 in2
= in1
; in1
= tem
;
924 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
928 /* (not (bswap x)) -> (bswap (not x)). */
929 if (GET_CODE (op
) == BSWAP
)
931 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
932 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
937 /* (neg (neg X)) == X. */
938 if (GET_CODE (op
) == NEG
)
941 /* (neg (plus X 1)) can become (not X). */
942 if (GET_CODE (op
) == PLUS
943 && XEXP (op
, 1) == const1_rtx
)
944 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
946 /* Similarly, (neg (not X)) is (plus X 1). */
947 if (GET_CODE (op
) == NOT
)
948 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
951 /* (neg (minus X Y)) can become (minus Y X). This transformation
952 isn't safe for modes with signed zeros, since if X and Y are
953 both +0, (minus Y X) is the same as (minus X Y). If the
954 rounding mode is towards +infinity (or -infinity) then the two
955 expressions will be rounded differently. */
956 if (GET_CODE (op
) == MINUS
957 && !HONOR_SIGNED_ZEROS (mode
)
958 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
959 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
961 if (GET_CODE (op
) == PLUS
962 && !HONOR_SIGNED_ZEROS (mode
)
963 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
965 /* (neg (plus A C)) is simplified to (minus -C A). */
966 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
967 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
969 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
971 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
974 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
975 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
976 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
979 /* (neg (mult A B)) becomes (mult A (neg B)).
980 This works even for floating-point values. */
981 if (GET_CODE (op
) == MULT
982 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
984 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
985 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
988 /* NEG commutes with ASHIFT since it is multiplication. Only do
989 this if we can then eliminate the NEG (e.g., if the operand
991 if (GET_CODE (op
) == ASHIFT
)
993 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
995 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
998 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
999 C is equal to the width of MODE minus 1. */
1000 if (GET_CODE (op
) == ASHIFTRT
1001 && CONST_INT_P (XEXP (op
, 1))
1002 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1003 return simplify_gen_binary (LSHIFTRT
, mode
,
1004 XEXP (op
, 0), XEXP (op
, 1));
1006 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1007 C is equal to the width of MODE minus 1. */
1008 if (GET_CODE (op
) == LSHIFTRT
1009 && CONST_INT_P (XEXP (op
, 1))
1010 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1011 return simplify_gen_binary (ASHIFTRT
, mode
,
1012 XEXP (op
, 0), XEXP (op
, 1));
1014 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1015 if (GET_CODE (op
) == XOR
1016 && XEXP (op
, 1) == const1_rtx
1017 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1018 return plus_constant (mode
, XEXP (op
, 0), -1);
1020 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1021 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1022 if (GET_CODE (op
) == LT
1023 && XEXP (op
, 1) == const0_rtx
1024 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1026 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
1027 int isize
= GET_MODE_PRECISION (inner
);
1028 if (STORE_FLAG_VALUE
== 1)
1030 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1031 GEN_INT (isize
- 1));
1034 if (GET_MODE_PRECISION (mode
) > isize
)
1035 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1036 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1038 else if (STORE_FLAG_VALUE
== -1)
1040 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1041 GEN_INT (isize
- 1));
1044 if (GET_MODE_PRECISION (mode
) > isize
)
1045 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1046 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1052 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1053 with the umulXi3_highpart patterns. */
1054 if (GET_CODE (op
) == LSHIFTRT
1055 && GET_CODE (XEXP (op
, 0)) == MULT
)
1058 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1060 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1062 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1066 /* We can't handle truncation to a partial integer mode here
1067 because we don't know the real bitsize of the partial
1072 if (GET_MODE (op
) != VOIDmode
)
1074 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1079 /* If we know that the value is already truncated, we can
1080 replace the TRUNCATE with a SUBREG. */
1081 if (GET_MODE_NUNITS (mode
) == 1
1082 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1083 || truncated_to_mode (mode
, op
)))
1085 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1090 /* A truncate of a comparison can be replaced with a subreg if
1091 STORE_FLAG_VALUE permits. This is like the previous test,
1092 but it works even if the comparison is done in a mode larger
1093 than HOST_BITS_PER_WIDE_INT. */
1094 if (HWI_COMPUTABLE_MODE_P (mode
)
1095 && COMPARISON_P (op
)
1096 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1098 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1103 /* A truncate of a memory is just loading the low part of the memory
1104 if we are not changing the meaning of the address. */
1105 if (GET_CODE (op
) == MEM
1106 && !VECTOR_MODE_P (mode
)
1107 && !MEM_VOLATILE_P (op
)
1108 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1110 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1117 case FLOAT_TRUNCATE
:
1118 if (DECIMAL_FLOAT_MODE_P (mode
))
1121 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1122 if (GET_CODE (op
) == FLOAT_EXTEND
1123 && GET_MODE (XEXP (op
, 0)) == mode
)
1124 return XEXP (op
, 0);
1126 /* (float_truncate:SF (float_truncate:DF foo:XF))
1127 = (float_truncate:SF foo:XF).
1128 This may eliminate double rounding, so it is unsafe.
1130 (float_truncate:SF (float_extend:XF foo:DF))
1131 = (float_truncate:SF foo:DF).
1133 (float_truncate:DF (float_extend:XF foo:SF))
1134 = (float_extend:SF foo:DF). */
1135 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1136 && flag_unsafe_math_optimizations
)
1137 || GET_CODE (op
) == FLOAT_EXTEND
)
1138 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1140 > GET_MODE_SIZE (mode
)
1141 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1143 XEXP (op
, 0), mode
);
1145 /* (float_truncate (float x)) is (float x) */
1146 if (GET_CODE (op
) == FLOAT
1147 && (flag_unsafe_math_optimizations
1148 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1149 && ((unsigned)significand_size (GET_MODE (op
))
1150 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1151 - num_sign_bit_copies (XEXP (op
, 0),
1152 GET_MODE (XEXP (op
, 0))))))))
1153 return simplify_gen_unary (FLOAT
, mode
,
1155 GET_MODE (XEXP (op
, 0)));
1157 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1158 (OP:SF foo:SF) if OP is NEG or ABS. */
1159 if ((GET_CODE (op
) == ABS
1160 || GET_CODE (op
) == NEG
)
1161 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1162 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1163 return simplify_gen_unary (GET_CODE (op
), mode
,
1164 XEXP (XEXP (op
, 0), 0), mode
);
1166 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1167 is (float_truncate:SF x). */
1168 if (GET_CODE (op
) == SUBREG
1169 && subreg_lowpart_p (op
)
1170 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1171 return SUBREG_REG (op
);
1175 if (DECIMAL_FLOAT_MODE_P (mode
))
1178 /* (float_extend (float_extend x)) is (float_extend x)
1180 (float_extend (float x)) is (float x) assuming that double
1181 rounding can't happen.
1183 if (GET_CODE (op
) == FLOAT_EXTEND
1184 || (GET_CODE (op
) == FLOAT
1185 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1186 && ((unsigned)significand_size (GET_MODE (op
))
1187 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1188 - num_sign_bit_copies (XEXP (op
, 0),
1189 GET_MODE (XEXP (op
, 0)))))))
1190 return simplify_gen_unary (GET_CODE (op
), mode
,
1192 GET_MODE (XEXP (op
, 0)));
1197 /* (abs (neg <foo>)) -> (abs <foo>) */
1198 if (GET_CODE (op
) == NEG
)
1199 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1200 GET_MODE (XEXP (op
, 0)));
1202 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1204 if (GET_MODE (op
) == VOIDmode
)
1207 /* If operand is something known to be positive, ignore the ABS. */
1208 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1209 || val_signbit_known_clear_p (GET_MODE (op
),
1210 nonzero_bits (op
, GET_MODE (op
))))
1213 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1214 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1215 return gen_rtx_NEG (mode
, op
);
1220 /* (ffs (*_extend <X>)) = (ffs <X>) */
1221 if (GET_CODE (op
) == SIGN_EXTEND
1222 || GET_CODE (op
) == ZERO_EXTEND
)
1223 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1224 GET_MODE (XEXP (op
, 0)));
1228 switch (GET_CODE (op
))
1232 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1233 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1234 GET_MODE (XEXP (op
, 0)));
1238 /* Rotations don't affect popcount. */
1239 if (!side_effects_p (XEXP (op
, 1)))
1240 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1241 GET_MODE (XEXP (op
, 0)));
1250 switch (GET_CODE (op
))
1256 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1257 GET_MODE (XEXP (op
, 0)));
1261 /* Rotations don't affect parity. */
1262 if (!side_effects_p (XEXP (op
, 1)))
1263 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1264 GET_MODE (XEXP (op
, 0)));
1273 /* (bswap (bswap x)) -> x. */
1274 if (GET_CODE (op
) == BSWAP
)
1275 return XEXP (op
, 0);
1279 /* (float (sign_extend <X>)) = (float <X>). */
1280 if (GET_CODE (op
) == SIGN_EXTEND
)
1281 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1282 GET_MODE (XEXP (op
, 0)));
1286 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1287 becomes just the MINUS if its mode is MODE. This allows
1288 folding switch statements on machines using casesi (such as
1290 if (GET_CODE (op
) == TRUNCATE
1291 && GET_MODE (XEXP (op
, 0)) == mode
1292 && GET_CODE (XEXP (op
, 0)) == MINUS
1293 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1294 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1295 return XEXP (op
, 0);
1297 /* Extending a widening multiplication should be canonicalized to
1298 a wider widening multiplication. */
1299 if (GET_CODE (op
) == MULT
)
1301 rtx lhs
= XEXP (op
, 0);
1302 rtx rhs
= XEXP (op
, 1);
1303 enum rtx_code lcode
= GET_CODE (lhs
);
1304 enum rtx_code rcode
= GET_CODE (rhs
);
1306 /* Widening multiplies usually extend both operands, but sometimes
1307 they use a shift to extract a portion of a register. */
1308 if ((lcode
== SIGN_EXTEND
1309 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1310 && (rcode
== SIGN_EXTEND
1311 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1313 enum machine_mode lmode
= GET_MODE (lhs
);
1314 enum machine_mode rmode
= GET_MODE (rhs
);
1317 if (lcode
== ASHIFTRT
)
1318 /* Number of bits not shifted off the end. */
1319 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1320 else /* lcode == SIGN_EXTEND */
1321 /* Size of inner mode. */
1322 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1324 if (rcode
== ASHIFTRT
)
1325 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1326 else /* rcode == SIGN_EXTEND */
1327 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1329 /* We can only widen multiplies if the result is mathematiclly
1330 equivalent. I.e. if overflow was impossible. */
1331 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1332 return simplify_gen_binary
1334 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1335 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1339 /* Check for a sign extension of a subreg of a promoted
1340 variable, where the promotion is sign-extended, and the
1341 target mode is the same as the variable's promotion. */
1342 if (GET_CODE (op
) == SUBREG
1343 && SUBREG_PROMOTED_VAR_P (op
)
1344 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1345 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1347 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1352 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1353 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1354 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1356 gcc_assert (GET_MODE_BITSIZE (mode
)
1357 > GET_MODE_BITSIZE (GET_MODE (op
)));
1358 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1359 GET_MODE (XEXP (op
, 0)));
1362 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1363 is (sign_extend:M (subreg:O <X>)) if there is mode with
1364 GET_MODE_BITSIZE (N) - I bits.
1365 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1366 is similarly (zero_extend:M (subreg:O <X>)). */
1367 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1368 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1369 && CONST_INT_P (XEXP (op
, 1))
1370 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1371 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1373 enum machine_mode tmode
1374 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1375 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1376 gcc_assert (GET_MODE_BITSIZE (mode
)
1377 > GET_MODE_BITSIZE (GET_MODE (op
)));
1378 if (tmode
!= BLKmode
)
1381 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1383 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1384 ? SIGN_EXTEND
: ZERO_EXTEND
,
1385 mode
, inner
, tmode
);
1389 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1390 /* As we do not know which address space the pointer is referring to,
1391 we can do this only if the target does not support different pointer
1392 or address modes depending on the address space. */
1393 if (target_default_pointer_address_modes_p ()
1394 && ! POINTERS_EXTEND_UNSIGNED
1395 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1397 || (GET_CODE (op
) == SUBREG
1398 && REG_P (SUBREG_REG (op
))
1399 && REG_POINTER (SUBREG_REG (op
))
1400 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1401 return convert_memory_address (Pmode
, op
);
1406 /* Check for a zero extension of a subreg of a promoted
1407 variable, where the promotion is zero-extended, and the
1408 target mode is the same as the variable's promotion. */
1409 if (GET_CODE (op
) == SUBREG
1410 && SUBREG_PROMOTED_VAR_P (op
)
1411 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1412 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1414 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1419 /* Extending a widening multiplication should be canonicalized to
1420 a wider widening multiplication. */
1421 if (GET_CODE (op
) == MULT
)
1423 rtx lhs
= XEXP (op
, 0);
1424 rtx rhs
= XEXP (op
, 1);
1425 enum rtx_code lcode
= GET_CODE (lhs
);
1426 enum rtx_code rcode
= GET_CODE (rhs
);
1428 /* Widening multiplies usually extend both operands, but sometimes
1429 they use a shift to extract a portion of a register. */
1430 if ((lcode
== ZERO_EXTEND
1431 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1432 && (rcode
== ZERO_EXTEND
1433 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1435 enum machine_mode lmode
= GET_MODE (lhs
);
1436 enum machine_mode rmode
= GET_MODE (rhs
);
1439 if (lcode
== LSHIFTRT
)
1440 /* Number of bits not shifted off the end. */
1441 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1442 else /* lcode == ZERO_EXTEND */
1443 /* Size of inner mode. */
1444 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1446 if (rcode
== LSHIFTRT
)
1447 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1448 else /* rcode == ZERO_EXTEND */
1449 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1451 /* We can only widen multiplies if the result is mathematiclly
1452 equivalent. I.e. if overflow was impossible. */
1453 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1454 return simplify_gen_binary
1456 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1457 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1461 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1462 if (GET_CODE (op
) == ZERO_EXTEND
)
1463 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1464 GET_MODE (XEXP (op
, 0)));
1466 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1467 is (zero_extend:M (subreg:O <X>)) if there is mode with
1468 GET_MODE_BITSIZE (N) - I bits. */
1469 if (GET_CODE (op
) == LSHIFTRT
1470 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1471 && CONST_INT_P (XEXP (op
, 1))
1472 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1473 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1475 enum machine_mode tmode
1476 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1477 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1478 if (tmode
!= BLKmode
)
1481 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1483 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1487 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1488 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1490 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1491 (and:SI (reg:SI) (const_int 63)). */
1492 if (GET_CODE (op
) == SUBREG
1493 && GET_MODE_PRECISION (GET_MODE (op
))
1494 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1495 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1496 <= HOST_BITS_PER_WIDE_INT
1497 && GET_MODE_PRECISION (mode
)
1498 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1499 && subreg_lowpart_p (op
)
1500 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1501 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1503 if (GET_MODE_PRECISION (mode
)
1504 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1505 return SUBREG_REG (op
);
1506 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1507 GET_MODE (SUBREG_REG (op
)));
1510 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1511 /* As we do not know which address space the pointer is referring to,
1512 we can do this only if the target does not support different pointer
1513 or address modes depending on the address space. */
1514 if (target_default_pointer_address_modes_p ()
1515 && POINTERS_EXTEND_UNSIGNED
> 0
1516 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1518 || (GET_CODE (op
) == SUBREG
1519 && REG_P (SUBREG_REG (op
))
1520 && REG_POINTER (SUBREG_REG (op
))
1521 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1522 return convert_memory_address (Pmode
, op
);
1533 /* Try to compute the value of a unary operation CODE whose output mode is to
1534 be MODE with input operand OP whose mode was originally OP_MODE.
1535 Return zero if the value cannot be computed. */
1537 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1538 rtx op
, enum machine_mode op_mode
)
1540 unsigned int width
= GET_MODE_PRECISION (mode
);
1541 unsigned int op_width
= GET_MODE_PRECISION (op_mode
);
1543 if (code
== VEC_DUPLICATE
)
1545 gcc_assert (VECTOR_MODE_P (mode
));
1546 if (GET_MODE (op
) != VOIDmode
)
1548 if (!VECTOR_MODE_P (GET_MODE (op
)))
1549 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1551 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1554 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1555 || GET_CODE (op
) == CONST_VECTOR
)
1557 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1558 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1559 rtvec v
= rtvec_alloc (n_elts
);
1562 if (GET_CODE (op
) != CONST_VECTOR
)
1563 for (i
= 0; i
< n_elts
; i
++)
1564 RTVEC_ELT (v
, i
) = op
;
1567 enum machine_mode inmode
= GET_MODE (op
);
1568 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1569 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1571 gcc_assert (in_n_elts
< n_elts
);
1572 gcc_assert ((n_elts
% in_n_elts
) == 0);
1573 for (i
= 0; i
< n_elts
; i
++)
1574 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1576 return gen_rtx_CONST_VECTOR (mode
, v
);
1580 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1582 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1583 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1584 enum machine_mode opmode
= GET_MODE (op
);
1585 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1586 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1587 rtvec v
= rtvec_alloc (n_elts
);
1590 gcc_assert (op_n_elts
== n_elts
);
1591 for (i
= 0; i
< n_elts
; i
++)
1593 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1594 CONST_VECTOR_ELT (op
, i
),
1595 GET_MODE_INNER (opmode
));
1598 RTVEC_ELT (v
, i
) = x
;
1600 return gen_rtx_CONST_VECTOR (mode
, v
);
1603 /* The order of these tests is critical so that, for example, we don't
1604 check the wrong mode (input vs. output) for a conversion operation,
1605 such as FIX. At some point, this should be simplified. */
1607 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1609 HOST_WIDE_INT hv
, lv
;
1612 if (CONST_INT_P (op
))
1613 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1615 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1617 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1618 d
= real_value_truncate (mode
, d
);
1619 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1621 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1623 HOST_WIDE_INT hv
, lv
;
1626 if (CONST_INT_P (op
))
1627 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1629 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1631 if (op_mode
== VOIDmode
1632 || GET_MODE_PRECISION (op_mode
) > HOST_BITS_PER_DOUBLE_INT
)
1633 /* We should never get a negative number. */
1634 gcc_assert (hv
>= 0);
1635 else if (GET_MODE_PRECISION (op_mode
) <= HOST_BITS_PER_WIDE_INT
)
1636 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1638 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1639 d
= real_value_truncate (mode
, d
);
1640 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1643 if (CONST_INT_P (op
)
1644 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1646 HOST_WIDE_INT arg0
= INTVAL (op
);
1656 val
= - (unsigned HOST_WIDE_INT
) arg0
;
1660 val
= (arg0
>= 0 ? arg0
: - arg0
);
1664 arg0
&= GET_MODE_MASK (mode
);
1665 val
= ffs_hwi (arg0
);
1669 arg0
&= GET_MODE_MASK (mode
);
1670 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1673 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 1;
1677 arg0
&= GET_MODE_MASK (mode
);
1679 val
= GET_MODE_PRECISION (mode
) - 1;
1681 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 2;
1683 val
= GET_MODE_PRECISION (mode
) - floor_log2 (~arg0
) - 2;
1687 arg0
&= GET_MODE_MASK (mode
);
1690 /* Even if the value at zero is undefined, we have to come
1691 up with some replacement. Seems good enough. */
1692 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1693 val
= GET_MODE_PRECISION (mode
);
1696 val
= ctz_hwi (arg0
);
1700 arg0
&= GET_MODE_MASK (mode
);
1703 val
++, arg0
&= arg0
- 1;
1707 arg0
&= GET_MODE_MASK (mode
);
1710 val
++, arg0
&= arg0
- 1;
1719 for (s
= 0; s
< width
; s
+= 8)
1721 unsigned int d
= width
- s
- 8;
1722 unsigned HOST_WIDE_INT byte
;
1723 byte
= (arg0
>> s
) & 0xff;
1734 /* When zero-extending a CONST_INT, we need to know its
1736 gcc_assert (op_mode
!= VOIDmode
);
1737 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1739 /* If we were really extending the mode,
1740 we would have to distinguish between zero-extension
1741 and sign-extension. */
1742 gcc_assert (width
== op_width
);
1745 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1746 val
= arg0
& GET_MODE_MASK (op_mode
);
1752 if (op_mode
== VOIDmode
)
1754 op_width
= GET_MODE_PRECISION (op_mode
);
1755 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1757 /* If we were really extending the mode,
1758 we would have to distinguish between zero-extension
1759 and sign-extension. */
1760 gcc_assert (width
== op_width
);
1763 else if (op_width
< HOST_BITS_PER_WIDE_INT
)
1765 val
= arg0
& GET_MODE_MASK (op_mode
);
1766 if (val_signbit_known_set_p (op_mode
, val
))
1767 val
|= ~GET_MODE_MASK (op_mode
);
1775 case FLOAT_TRUNCATE
:
1787 return gen_int_mode (val
, mode
);
1790 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1791 for a DImode operation on a CONST_INT. */
1792 else if (width
<= HOST_BITS_PER_DOUBLE_INT
1793 && (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1795 double_int first
, value
;
1797 if (CONST_DOUBLE_AS_INT_P (op
))
1798 first
= double_int::from_pair (CONST_DOUBLE_HIGH (op
),
1799 CONST_DOUBLE_LOW (op
));
1801 first
= double_int::from_shwi (INTVAL (op
));
1814 if (first
.is_negative ())
1823 value
.low
= ffs_hwi (first
.low
);
1824 else if (first
.high
!= 0)
1825 value
.low
= HOST_BITS_PER_WIDE_INT
+ ffs_hwi (first
.high
);
1832 if (first
.high
!= 0)
1833 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.high
) - 1
1834 - HOST_BITS_PER_WIDE_INT
;
1835 else if (first
.low
!= 0)
1836 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.low
) - 1;
1837 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1838 value
.low
= GET_MODE_PRECISION (mode
);
1844 value
.low
= ctz_hwi (first
.low
);
1845 else if (first
.high
!= 0)
1846 value
.low
= HOST_BITS_PER_WIDE_INT
+ ctz_hwi (first
.high
);
1847 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1848 value
.low
= GET_MODE_PRECISION (mode
);
1852 value
= double_int_zero
;
1856 first
.low
&= first
.low
- 1;
1861 first
.high
&= first
.high
- 1;
1866 value
= double_int_zero
;
1870 first
.low
&= first
.low
- 1;
1875 first
.high
&= first
.high
- 1;
1884 value
= double_int_zero
;
1885 for (s
= 0; s
< width
; s
+= 8)
1887 unsigned int d
= width
- s
- 8;
1888 unsigned HOST_WIDE_INT byte
;
1890 if (s
< HOST_BITS_PER_WIDE_INT
)
1891 byte
= (first
.low
>> s
) & 0xff;
1893 byte
= (first
.high
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1895 if (d
< HOST_BITS_PER_WIDE_INT
)
1896 value
.low
|= byte
<< d
;
1898 value
.high
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1904 /* This is just a change-of-mode, so do nothing. */
1909 gcc_assert (op_mode
!= VOIDmode
);
1911 if (op_width
> HOST_BITS_PER_WIDE_INT
)
1914 value
= double_int::from_uhwi (first
.low
& GET_MODE_MASK (op_mode
));
1918 if (op_mode
== VOIDmode
1919 || op_width
> HOST_BITS_PER_WIDE_INT
)
1923 value
.low
= first
.low
& GET_MODE_MASK (op_mode
);
1924 if (val_signbit_known_set_p (op_mode
, value
.low
))
1925 value
.low
|= ~GET_MODE_MASK (op_mode
);
1927 value
.high
= HWI_SIGN_EXTEND (value
.low
);
1938 return immed_double_int_const (value
, mode
);
1941 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1942 && SCALAR_FLOAT_MODE_P (mode
)
1943 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1946 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1953 d
= real_value_abs (&d
);
1956 d
= real_value_negate (&d
);
1958 case FLOAT_TRUNCATE
:
1959 d
= real_value_truncate (mode
, d
);
1962 /* All this does is change the mode, unless changing
1964 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1965 real_convert (&d
, mode
, &d
);
1968 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1975 real_to_target (tmp
, &d
, GET_MODE (op
));
1976 for (i
= 0; i
< 4; i
++)
1978 real_from_target (&d
, tmp
, mode
);
1984 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1987 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1988 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1989 && GET_MODE_CLASS (mode
) == MODE_INT
1990 && width
<= HOST_BITS_PER_DOUBLE_INT
&& width
> 0)
1992 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1993 operators are intentionally left unspecified (to ease implementation
1994 by target backends), for consistency, this routine implements the
1995 same semantics for constant folding as used by the middle-end. */
1997 /* This was formerly used only for non-IEEE float.
1998 eggert@twinsun.com says it is safe for IEEE also. */
1999 HOST_WIDE_INT xh
, xl
, th
, tl
;
2000 REAL_VALUE_TYPE x
, t
;
2001 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
2005 if (REAL_VALUE_ISNAN (x
))
2008 /* Test against the signed upper bound. */
2009 if (width
> HOST_BITS_PER_WIDE_INT
)
2011 th
= ((unsigned HOST_WIDE_INT
) 1
2012 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
2018 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
2020 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
2021 if (REAL_VALUES_LESS (t
, x
))
2028 /* Test against the signed lower bound. */
2029 if (width
> HOST_BITS_PER_WIDE_INT
)
2031 th
= HOST_WIDE_INT_M1U
<< (width
- HOST_BITS_PER_WIDE_INT
- 1);
2037 tl
= HOST_WIDE_INT_M1U
<< (width
- 1);
2039 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
2040 if (REAL_VALUES_LESS (x
, t
))
2046 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
2050 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
2053 /* Test against the unsigned upper bound. */
2054 if (width
== HOST_BITS_PER_DOUBLE_INT
)
2059 else if (width
>= HOST_BITS_PER_WIDE_INT
)
2061 th
= ((unsigned HOST_WIDE_INT
) 1
2062 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
2068 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
2070 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
2071 if (REAL_VALUES_LESS (t
, x
))
2078 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
2084 return immed_double_const (xl
, xh
, mode
);
2090 /* Subroutine of simplify_binary_operation to simplify a binary operation
2091 CODE that can commute with byte swapping, with result mode MODE and
2092 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2093 Return zero if no simplification or canonicalization is possible. */
2096 simplify_byte_swapping_operation (enum rtx_code code
, enum machine_mode mode
,
2101 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2102 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2104 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2105 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2106 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2109 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2110 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2112 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2113 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2119 /* Subroutine of simplify_binary_operation to simplify a commutative,
2120 associative binary operation CODE with result mode MODE, operating
2121 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2122 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2123 canonicalization is possible. */
2126 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
2131 /* Linearize the operator to the left. */
2132 if (GET_CODE (op1
) == code
)
2134 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2135 if (GET_CODE (op0
) == code
)
2137 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2138 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2141 /* "a op (b op c)" becomes "(b op c) op a". */
2142 if (! swap_commutative_operands_p (op1
, op0
))
2143 return simplify_gen_binary (code
, mode
, op1
, op0
);
2150 if (GET_CODE (op0
) == code
)
2152 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2153 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2155 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2156 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2159 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2160 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2162 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2164 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2165 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2167 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2174 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2175 and OP1. Return 0 if no simplification is possible.
2177 Don't use this for relational operations such as EQ or LT.
2178 Use simplify_relational_operation instead. */
2180 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2183 rtx trueop0
, trueop1
;
2186 /* Relational operations don't work here. We must know the mode
2187 of the operands in order to do the comparison correctly.
2188 Assuming a full word can give incorrect results.
2189 Consider comparing 128 with -128 in QImode. */
2190 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2191 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2193 /* Make sure the constant is second. */
2194 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2195 && swap_commutative_operands_p (op0
, op1
))
2197 tem
= op0
, op0
= op1
, op1
= tem
;
2200 trueop0
= avoid_constant_pool_reference (op0
);
2201 trueop1
= avoid_constant_pool_reference (op1
);
2203 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2206 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2209 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2210 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2211 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2212 actual constants. */
2215 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
2216 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2218 rtx tem
, reversed
, opleft
, opright
;
2220 unsigned int width
= GET_MODE_PRECISION (mode
);
2222 /* Even if we can't compute a constant result,
2223 there are some cases worth simplifying. */
2228 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2229 when x is NaN, infinite, or finite and nonzero. They aren't
2230 when x is -0 and the rounding mode is not towards -infinity,
2231 since (-0) + 0 is then 0. */
2232 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2235 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2236 transformations are safe even for IEEE. */
2237 if (GET_CODE (op0
) == NEG
)
2238 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2239 else if (GET_CODE (op1
) == NEG
)
2240 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2242 /* (~a) + 1 -> -a */
2243 if (INTEGRAL_MODE_P (mode
)
2244 && GET_CODE (op0
) == NOT
2245 && trueop1
== const1_rtx
)
2246 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2248 /* Handle both-operands-constant cases. We can only add
2249 CONST_INTs to constants since the sum of relocatable symbols
2250 can't be handled by most assemblers. Don't add CONST_INT
2251 to CONST_INT since overflow won't be computed properly if wider
2252 than HOST_BITS_PER_WIDE_INT. */
2254 if ((GET_CODE (op0
) == CONST
2255 || GET_CODE (op0
) == SYMBOL_REF
2256 || GET_CODE (op0
) == LABEL_REF
)
2257 && CONST_INT_P (op1
))
2258 return plus_constant (mode
, op0
, INTVAL (op1
));
2259 else if ((GET_CODE (op1
) == CONST
2260 || GET_CODE (op1
) == SYMBOL_REF
2261 || GET_CODE (op1
) == LABEL_REF
)
2262 && CONST_INT_P (op0
))
2263 return plus_constant (mode
, op1
, INTVAL (op0
));
2265 /* See if this is something like X * C - X or vice versa or
2266 if the multiplication is written as a shift. If so, we can
2267 distribute and make a new multiply, shift, or maybe just
2268 have X (if C is 2 in the example above). But don't make
2269 something more expensive than we had before. */
2271 if (SCALAR_INT_MODE_P (mode
))
2273 double_int coeff0
, coeff1
;
2274 rtx lhs
= op0
, rhs
= op1
;
2276 coeff0
= double_int_one
;
2277 coeff1
= double_int_one
;
2279 if (GET_CODE (lhs
) == NEG
)
2281 coeff0
= double_int_minus_one
;
2282 lhs
= XEXP (lhs
, 0);
2284 else if (GET_CODE (lhs
) == MULT
2285 && CONST_INT_P (XEXP (lhs
, 1)))
2287 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
2288 lhs
= XEXP (lhs
, 0);
2290 else if (GET_CODE (lhs
) == ASHIFT
2291 && CONST_INT_P (XEXP (lhs
, 1))
2292 && INTVAL (XEXP (lhs
, 1)) >= 0
2293 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2295 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2296 lhs
= XEXP (lhs
, 0);
2299 if (GET_CODE (rhs
) == NEG
)
2301 coeff1
= double_int_minus_one
;
2302 rhs
= XEXP (rhs
, 0);
2304 else if (GET_CODE (rhs
) == MULT
2305 && CONST_INT_P (XEXP (rhs
, 1)))
2307 coeff1
= double_int::from_shwi (INTVAL (XEXP (rhs
, 1)));
2308 rhs
= XEXP (rhs
, 0);
2310 else if (GET_CODE (rhs
) == ASHIFT
2311 && CONST_INT_P (XEXP (rhs
, 1))
2312 && INTVAL (XEXP (rhs
, 1)) >= 0
2313 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2315 coeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2316 rhs
= XEXP (rhs
, 0);
2319 if (rtx_equal_p (lhs
, rhs
))
2321 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2324 bool speed
= optimize_function_for_speed_p (cfun
);
2326 val
= coeff0
+ coeff1
;
2327 coeff
= immed_double_int_const (val
, mode
);
2329 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2330 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2335 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2336 if (CONST_SCALAR_INT_P (op1
)
2337 && GET_CODE (op0
) == XOR
2338 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2339 && mode_signbit_p (mode
, op1
))
2340 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2341 simplify_gen_binary (XOR
, mode
, op1
,
2344 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2345 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2346 && GET_CODE (op0
) == MULT
2347 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2351 in1
= XEXP (XEXP (op0
, 0), 0);
2352 in2
= XEXP (op0
, 1);
2353 return simplify_gen_binary (MINUS
, mode
, op1
,
2354 simplify_gen_binary (MULT
, mode
,
2358 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2359 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2361 if (COMPARISON_P (op0
)
2362 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2363 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2364 && (reversed
= reversed_comparison (op0
, mode
)))
2366 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2368 /* If one of the operands is a PLUS or a MINUS, see if we can
2369 simplify this by the associative law.
2370 Don't use the associative law for floating point.
2371 The inaccuracy makes it nonassociative,
2372 and subtle programs can break if operations are associated. */
2374 if (INTEGRAL_MODE_P (mode
)
2375 && (plus_minus_operand_p (op0
)
2376 || plus_minus_operand_p (op1
))
2377 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2380 /* Reassociate floating point addition only when the user
2381 specifies associative math operations. */
2382 if (FLOAT_MODE_P (mode
)
2383 && flag_associative_math
)
2385 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2392 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2393 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2394 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2395 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2397 rtx xop00
= XEXP (op0
, 0);
2398 rtx xop10
= XEXP (op1
, 0);
2401 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2403 if (REG_P (xop00
) && REG_P (xop10
)
2404 && GET_MODE (xop00
) == GET_MODE (xop10
)
2405 && REGNO (xop00
) == REGNO (xop10
)
2406 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2407 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2414 /* We can't assume x-x is 0 even with non-IEEE floating point,
2415 but since it is zero except in very strange circumstances, we
2416 will treat it as zero with -ffinite-math-only. */
2417 if (rtx_equal_p (trueop0
, trueop1
)
2418 && ! side_effects_p (op0
)
2419 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2420 return CONST0_RTX (mode
);
2422 /* Change subtraction from zero into negation. (0 - x) is the
2423 same as -x when x is NaN, infinite, or finite and nonzero.
2424 But if the mode has signed zeros, and does not round towards
2425 -infinity, then 0 - 0 is 0, not -0. */
2426 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2427 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2429 /* (-1 - a) is ~a. */
2430 if (trueop0
== constm1_rtx
)
2431 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2433 /* Subtracting 0 has no effect unless the mode has signed zeros
2434 and supports rounding towards -infinity. In such a case,
2436 if (!(HONOR_SIGNED_ZEROS (mode
)
2437 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2438 && trueop1
== CONST0_RTX (mode
))
2441 /* See if this is something like X * C - X or vice versa or
2442 if the multiplication is written as a shift. If so, we can
2443 distribute and make a new multiply, shift, or maybe just
2444 have X (if C is 2 in the example above). But don't make
2445 something more expensive than we had before. */
2447 if (SCALAR_INT_MODE_P (mode
))
2449 double_int coeff0
, negcoeff1
;
2450 rtx lhs
= op0
, rhs
= op1
;
2452 coeff0
= double_int_one
;
2453 negcoeff1
= double_int_minus_one
;
2455 if (GET_CODE (lhs
) == NEG
)
2457 coeff0
= double_int_minus_one
;
2458 lhs
= XEXP (lhs
, 0);
2460 else if (GET_CODE (lhs
) == MULT
2461 && CONST_INT_P (XEXP (lhs
, 1)))
2463 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
2464 lhs
= XEXP (lhs
, 0);
2466 else if (GET_CODE (lhs
) == ASHIFT
2467 && CONST_INT_P (XEXP (lhs
, 1))
2468 && INTVAL (XEXP (lhs
, 1)) >= 0
2469 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2471 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2472 lhs
= XEXP (lhs
, 0);
2475 if (GET_CODE (rhs
) == NEG
)
2477 negcoeff1
= double_int_one
;
2478 rhs
= XEXP (rhs
, 0);
2480 else if (GET_CODE (rhs
) == MULT
2481 && CONST_INT_P (XEXP (rhs
, 1)))
2483 negcoeff1
= double_int::from_shwi (-INTVAL (XEXP (rhs
, 1)));
2484 rhs
= XEXP (rhs
, 0);
2486 else if (GET_CODE (rhs
) == ASHIFT
2487 && CONST_INT_P (XEXP (rhs
, 1))
2488 && INTVAL (XEXP (rhs
, 1)) >= 0
2489 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2491 negcoeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2492 negcoeff1
= -negcoeff1
;
2493 rhs
= XEXP (rhs
, 0);
2496 if (rtx_equal_p (lhs
, rhs
))
2498 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2501 bool speed
= optimize_function_for_speed_p (cfun
);
2503 val
= coeff0
+ negcoeff1
;
2504 coeff
= immed_double_int_const (val
, mode
);
2506 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2507 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2512 /* (a - (-b)) -> (a + b). True even for IEEE. */
2513 if (GET_CODE (op1
) == NEG
)
2514 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2516 /* (-x - c) may be simplified as (-c - x). */
2517 if (GET_CODE (op0
) == NEG
2518 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2520 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2522 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2525 /* Don't let a relocatable value get a negative coeff. */
2526 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2527 return simplify_gen_binary (PLUS
, mode
,
2529 neg_const_int (mode
, op1
));
2531 /* (x - (x & y)) -> (x & ~y) */
2532 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2534 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2536 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2537 GET_MODE (XEXP (op1
, 1)));
2538 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2540 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2542 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2543 GET_MODE (XEXP (op1
, 0)));
2544 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2548 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2549 by reversing the comparison code if valid. */
2550 if (STORE_FLAG_VALUE
== 1
2551 && trueop0
== const1_rtx
2552 && COMPARISON_P (op1
)
2553 && (reversed
= reversed_comparison (op1
, mode
)))
2556 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2557 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2558 && GET_CODE (op1
) == MULT
2559 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2563 in1
= XEXP (XEXP (op1
, 0), 0);
2564 in2
= XEXP (op1
, 1);
2565 return simplify_gen_binary (PLUS
, mode
,
2566 simplify_gen_binary (MULT
, mode
,
2571 /* Canonicalize (minus (neg A) (mult B C)) to
2572 (minus (mult (neg B) C) A). */
2573 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2574 && GET_CODE (op1
) == MULT
2575 && GET_CODE (op0
) == NEG
)
2579 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2580 in2
= XEXP (op1
, 1);
2581 return simplify_gen_binary (MINUS
, mode
,
2582 simplify_gen_binary (MULT
, mode
,
2587 /* If one of the operands is a PLUS or a MINUS, see if we can
2588 simplify this by the associative law. This will, for example,
2589 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2590 Don't use the associative law for floating point.
2591 The inaccuracy makes it nonassociative,
2592 and subtle programs can break if operations are associated. */
2594 if (INTEGRAL_MODE_P (mode
)
2595 && (plus_minus_operand_p (op0
)
2596 || plus_minus_operand_p (op1
))
2597 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2602 if (trueop1
== constm1_rtx
)
2603 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2605 if (GET_CODE (op0
) == NEG
)
2607 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2608 /* If op1 is a MULT as well and simplify_unary_operation
2609 just moved the NEG to the second operand, simplify_gen_binary
2610 below could through simplify_associative_operation move
2611 the NEG around again and recurse endlessly. */
2613 && GET_CODE (op1
) == MULT
2614 && GET_CODE (temp
) == MULT
2615 && XEXP (op1
, 0) == XEXP (temp
, 0)
2616 && GET_CODE (XEXP (temp
, 1)) == NEG
2617 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2620 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2622 if (GET_CODE (op1
) == NEG
)
2624 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2625 /* If op0 is a MULT as well and simplify_unary_operation
2626 just moved the NEG to the second operand, simplify_gen_binary
2627 below could through simplify_associative_operation move
2628 the NEG around again and recurse endlessly. */
2630 && GET_CODE (op0
) == MULT
2631 && GET_CODE (temp
) == MULT
2632 && XEXP (op0
, 0) == XEXP (temp
, 0)
2633 && GET_CODE (XEXP (temp
, 1)) == NEG
2634 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2637 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2640 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2641 x is NaN, since x * 0 is then also NaN. Nor is it valid
2642 when the mode has signed zeros, since multiplying a negative
2643 number by 0 will give -0, not 0. */
2644 if (!HONOR_NANS (mode
)
2645 && !HONOR_SIGNED_ZEROS (mode
)
2646 && trueop1
== CONST0_RTX (mode
)
2647 && ! side_effects_p (op0
))
2650 /* In IEEE floating point, x*1 is not equivalent to x for
2652 if (!HONOR_SNANS (mode
)
2653 && trueop1
== CONST1_RTX (mode
))
2656 /* Convert multiply by constant power of two into shift unless
2657 we are still generating RTL. This test is a kludge. */
2658 if (CONST_INT_P (trueop1
)
2659 && (val
= exact_log2 (UINTVAL (trueop1
))) >= 0
2660 /* If the mode is larger than the host word size, and the
2661 uppermost bit is set, then this isn't a power of two due
2662 to implicit sign extension. */
2663 && (width
<= HOST_BITS_PER_WIDE_INT
2664 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2665 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2667 /* Likewise for multipliers wider than a word. */
2668 if (CONST_DOUBLE_AS_INT_P (trueop1
)
2669 && GET_MODE (op0
) == mode
2670 && CONST_DOUBLE_LOW (trueop1
) == 0
2671 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0
2672 && (val
< HOST_BITS_PER_DOUBLE_INT
- 1
2673 || GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_DOUBLE_INT
))
2674 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2675 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2677 /* x*2 is x+x and x*(-1) is -x */
2678 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2679 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2680 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2681 && GET_MODE (op0
) == mode
)
2684 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2686 if (REAL_VALUES_EQUAL (d
, dconst2
))
2687 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2689 if (!HONOR_SNANS (mode
)
2690 && REAL_VALUES_EQUAL (d
, dconstm1
))
2691 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2694 /* Optimize -x * -x as x * x. */
2695 if (FLOAT_MODE_P (mode
)
2696 && GET_CODE (op0
) == NEG
2697 && GET_CODE (op1
) == NEG
2698 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2699 && !side_effects_p (XEXP (op0
, 0)))
2700 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2702 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2703 if (SCALAR_FLOAT_MODE_P (mode
)
2704 && GET_CODE (op0
) == ABS
2705 && GET_CODE (op1
) == ABS
2706 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2707 && !side_effects_p (XEXP (op0
, 0)))
2708 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2710 /* Reassociate multiplication, but for floating point MULTs
2711 only when the user specifies unsafe math optimizations. */
2712 if (! FLOAT_MODE_P (mode
)
2713 || flag_unsafe_math_optimizations
)
2715 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2722 if (trueop1
== CONST0_RTX (mode
))
2724 if (INTEGRAL_MODE_P (mode
)
2725 && trueop1
== CONSTM1_RTX (mode
)
2726 && !side_effects_p (op0
))
2728 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2730 /* A | (~A) -> -1 */
2731 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2732 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2733 && ! side_effects_p (op0
)
2734 && SCALAR_INT_MODE_P (mode
))
2737 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2738 if (CONST_INT_P (op1
)
2739 && HWI_COMPUTABLE_MODE_P (mode
)
2740 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2741 && !side_effects_p (op0
))
2744 /* Canonicalize (X & C1) | C2. */
2745 if (GET_CODE (op0
) == AND
2746 && CONST_INT_P (trueop1
)
2747 && CONST_INT_P (XEXP (op0
, 1)))
2749 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2750 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2751 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2753 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2755 && !side_effects_p (XEXP (op0
, 0)))
2758 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2759 if (((c1
|c2
) & mask
) == mask
)
2760 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2762 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2763 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2765 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2766 gen_int_mode (c1
& ~c2
, mode
));
2767 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2771 /* Convert (A & B) | A to A. */
2772 if (GET_CODE (op0
) == AND
2773 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2774 || rtx_equal_p (XEXP (op0
, 1), op1
))
2775 && ! side_effects_p (XEXP (op0
, 0))
2776 && ! side_effects_p (XEXP (op0
, 1)))
2779 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2780 mode size to (rotate A CX). */
2782 if (GET_CODE (op1
) == ASHIFT
2783 || GET_CODE (op1
) == SUBREG
)
2794 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2795 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2796 && CONST_INT_P (XEXP (opleft
, 1))
2797 && CONST_INT_P (XEXP (opright
, 1))
2798 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2799 == GET_MODE_PRECISION (mode
)))
2800 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2802 /* Same, but for ashift that has been "simplified" to a wider mode
2803 by simplify_shift_const. */
2805 if (GET_CODE (opleft
) == SUBREG
2806 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2807 && GET_CODE (opright
) == LSHIFTRT
2808 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2809 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2810 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2811 && (GET_MODE_SIZE (GET_MODE (opleft
))
2812 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2813 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2814 SUBREG_REG (XEXP (opright
, 0)))
2815 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2816 && CONST_INT_P (XEXP (opright
, 1))
2817 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2818 == GET_MODE_PRECISION (mode
)))
2819 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2820 XEXP (SUBREG_REG (opleft
), 1));
2822 /* If we have (ior (and (X C1) C2)), simplify this by making
2823 C1 as small as possible if C1 actually changes. */
2824 if (CONST_INT_P (op1
)
2825 && (HWI_COMPUTABLE_MODE_P (mode
)
2826 || INTVAL (op1
) > 0)
2827 && GET_CODE (op0
) == AND
2828 && CONST_INT_P (XEXP (op0
, 1))
2829 && CONST_INT_P (op1
)
2830 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2832 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2833 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2836 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2839 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2840 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2841 the PLUS does not affect any of the bits in OP1: then we can do
2842 the IOR as a PLUS and we can associate. This is valid if OP1
2843 can be safely shifted left C bits. */
2844 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2845 && GET_CODE (XEXP (op0
, 0)) == PLUS
2846 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2847 && CONST_INT_P (XEXP (op0
, 1))
2848 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2850 int count
= INTVAL (XEXP (op0
, 1));
2851 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2853 if (mask
>> count
== INTVAL (trueop1
)
2854 && trunc_int_for_mode (mask
, mode
) == mask
2855 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2856 return simplify_gen_binary (ASHIFTRT
, mode
,
2857 plus_constant (mode
, XEXP (op0
, 0),
2862 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2866 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2872 if (trueop1
== CONST0_RTX (mode
))
2874 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2875 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2876 if (rtx_equal_p (trueop0
, trueop1
)
2877 && ! side_effects_p (op0
)
2878 && GET_MODE_CLASS (mode
) != MODE_CC
)
2879 return CONST0_RTX (mode
);
2881 /* Canonicalize XOR of the most significant bit to PLUS. */
2882 if (CONST_SCALAR_INT_P (op1
)
2883 && mode_signbit_p (mode
, op1
))
2884 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2885 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2886 if (CONST_SCALAR_INT_P (op1
)
2887 && GET_CODE (op0
) == PLUS
2888 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2889 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2890 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2891 simplify_gen_binary (XOR
, mode
, op1
,
2894 /* If we are XORing two things that have no bits in common,
2895 convert them into an IOR. This helps to detect rotation encoded
2896 using those methods and possibly other simplifications. */
2898 if (HWI_COMPUTABLE_MODE_P (mode
)
2899 && (nonzero_bits (op0
, mode
)
2900 & nonzero_bits (op1
, mode
)) == 0)
2901 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2903 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2904 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2907 int num_negated
= 0;
2909 if (GET_CODE (op0
) == NOT
)
2910 num_negated
++, op0
= XEXP (op0
, 0);
2911 if (GET_CODE (op1
) == NOT
)
2912 num_negated
++, op1
= XEXP (op1
, 0);
2914 if (num_negated
== 2)
2915 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2916 else if (num_negated
== 1)
2917 return simplify_gen_unary (NOT
, mode
,
2918 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2922 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2923 correspond to a machine insn or result in further simplifications
2924 if B is a constant. */
2926 if (GET_CODE (op0
) == AND
2927 && rtx_equal_p (XEXP (op0
, 1), op1
)
2928 && ! side_effects_p (op1
))
2929 return simplify_gen_binary (AND
, mode
,
2930 simplify_gen_unary (NOT
, mode
,
2931 XEXP (op0
, 0), mode
),
2934 else if (GET_CODE (op0
) == AND
2935 && rtx_equal_p (XEXP (op0
, 0), op1
)
2936 && ! side_effects_p (op1
))
2937 return simplify_gen_binary (AND
, mode
,
2938 simplify_gen_unary (NOT
, mode
,
2939 XEXP (op0
, 1), mode
),
2942 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2943 we can transform like this:
2944 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2945 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2946 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2947 Attempt a few simplifications when B and C are both constants. */
2948 if (GET_CODE (op0
) == AND
2949 && CONST_INT_P (op1
)
2950 && CONST_INT_P (XEXP (op0
, 1)))
2952 rtx a
= XEXP (op0
, 0);
2953 rtx b
= XEXP (op0
, 1);
2955 HOST_WIDE_INT bval
= INTVAL (b
);
2956 HOST_WIDE_INT cval
= INTVAL (c
);
2959 = simplify_binary_operation (AND
, mode
,
2960 simplify_gen_unary (NOT
, mode
, a
, mode
),
2962 if ((~cval
& bval
) == 0)
2964 /* Try to simplify ~A&C | ~B&C. */
2965 if (na_c
!= NULL_RTX
)
2966 return simplify_gen_binary (IOR
, mode
, na_c
,
2967 gen_int_mode (~bval
& cval
, mode
));
2971 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2972 if (na_c
== const0_rtx
)
2974 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2975 gen_int_mode (~cval
& bval
,
2977 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2978 gen_int_mode (~bval
& cval
,
2984 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2985 comparison if STORE_FLAG_VALUE is 1. */
2986 if (STORE_FLAG_VALUE
== 1
2987 && trueop1
== const1_rtx
2988 && COMPARISON_P (op0
)
2989 && (reversed
= reversed_comparison (op0
, mode
)))
2992 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2993 is (lt foo (const_int 0)), so we can perform the above
2994 simplification if STORE_FLAG_VALUE is 1. */
2996 if (STORE_FLAG_VALUE
== 1
2997 && trueop1
== const1_rtx
2998 && GET_CODE (op0
) == LSHIFTRT
2999 && CONST_INT_P (XEXP (op0
, 1))
3000 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
3001 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
3003 /* (xor (comparison foo bar) (const_int sign-bit))
3004 when STORE_FLAG_VALUE is the sign bit. */
3005 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
3006 && trueop1
== const_true_rtx
3007 && COMPARISON_P (op0
)
3008 && (reversed
= reversed_comparison (op0
, mode
)))
3011 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3015 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3021 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3023 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3025 if (HWI_COMPUTABLE_MODE_P (mode
))
3027 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3028 HOST_WIDE_INT nzop1
;
3029 if (CONST_INT_P (trueop1
))
3031 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3032 /* If we are turning off bits already known off in OP0, we need
3034 if ((nzop0
& ~val1
) == 0)
3037 nzop1
= nonzero_bits (trueop1
, mode
);
3038 /* If we are clearing all the nonzero bits, the result is zero. */
3039 if ((nzop1
& nzop0
) == 0
3040 && !side_effects_p (op0
) && !side_effects_p (op1
))
3041 return CONST0_RTX (mode
);
3043 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3044 && GET_MODE_CLASS (mode
) != MODE_CC
)
3047 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3048 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3049 && ! side_effects_p (op0
)
3050 && GET_MODE_CLASS (mode
) != MODE_CC
)
3051 return CONST0_RTX (mode
);
3053 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3054 there are no nonzero bits of C outside of X's mode. */
3055 if ((GET_CODE (op0
) == SIGN_EXTEND
3056 || GET_CODE (op0
) == ZERO_EXTEND
)
3057 && CONST_INT_P (trueop1
)
3058 && HWI_COMPUTABLE_MODE_P (mode
)
3059 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3060 & UINTVAL (trueop1
)) == 0)
3062 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3063 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3064 gen_int_mode (INTVAL (trueop1
),
3066 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3069 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3070 we might be able to further simplify the AND with X and potentially
3071 remove the truncation altogether. */
3072 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3074 rtx x
= XEXP (op0
, 0);
3075 enum machine_mode xmode
= GET_MODE (x
);
3076 tem
= simplify_gen_binary (AND
, xmode
, x
,
3077 gen_int_mode (INTVAL (trueop1
), xmode
));
3078 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3081 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3082 if (GET_CODE (op0
) == IOR
3083 && CONST_INT_P (trueop1
)
3084 && CONST_INT_P (XEXP (op0
, 1)))
3086 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3087 return simplify_gen_binary (IOR
, mode
,
3088 simplify_gen_binary (AND
, mode
,
3089 XEXP (op0
, 0), op1
),
3090 gen_int_mode (tmp
, mode
));
3093 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3094 insn (and may simplify more). */
3095 if (GET_CODE (op0
) == XOR
3096 && rtx_equal_p (XEXP (op0
, 0), op1
)
3097 && ! side_effects_p (op1
))
3098 return simplify_gen_binary (AND
, mode
,
3099 simplify_gen_unary (NOT
, mode
,
3100 XEXP (op0
, 1), mode
),
3103 if (GET_CODE (op0
) == XOR
3104 && rtx_equal_p (XEXP (op0
, 1), op1
)
3105 && ! side_effects_p (op1
))
3106 return simplify_gen_binary (AND
, mode
,
3107 simplify_gen_unary (NOT
, mode
,
3108 XEXP (op0
, 0), mode
),
3111 /* Similarly for (~(A ^ B)) & A. */
3112 if (GET_CODE (op0
) == NOT
3113 && GET_CODE (XEXP (op0
, 0)) == XOR
3114 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3115 && ! side_effects_p (op1
))
3116 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3118 if (GET_CODE (op0
) == NOT
3119 && GET_CODE (XEXP (op0
, 0)) == XOR
3120 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3121 && ! side_effects_p (op1
))
3122 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3124 /* Convert (A | B) & A to A. */
3125 if (GET_CODE (op0
) == IOR
3126 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3127 || rtx_equal_p (XEXP (op0
, 1), op1
))
3128 && ! side_effects_p (XEXP (op0
, 0))
3129 && ! side_effects_p (XEXP (op0
, 1)))
3132 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3133 ((A & N) + B) & M -> (A + B) & M
3134 Similarly if (N & M) == 0,
3135 ((A | N) + B) & M -> (A + B) & M
3136 and for - instead of + and/or ^ instead of |.
3137 Also, if (N & M) == 0, then
3138 (A +- N) & M -> A & M. */
3139 if (CONST_INT_P (trueop1
)
3140 && HWI_COMPUTABLE_MODE_P (mode
)
3141 && ~UINTVAL (trueop1
)
3142 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3143 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3148 pmop
[0] = XEXP (op0
, 0);
3149 pmop
[1] = XEXP (op0
, 1);
3151 if (CONST_INT_P (pmop
[1])
3152 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3153 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3155 for (which
= 0; which
< 2; which
++)
3158 switch (GET_CODE (tem
))
3161 if (CONST_INT_P (XEXP (tem
, 1))
3162 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3163 == UINTVAL (trueop1
))
3164 pmop
[which
] = XEXP (tem
, 0);
3168 if (CONST_INT_P (XEXP (tem
, 1))
3169 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3170 pmop
[which
] = XEXP (tem
, 0);
3177 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3179 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3181 return simplify_gen_binary (code
, mode
, tem
, op1
);
3185 /* (and X (ior (not X) Y) -> (and X Y) */
3186 if (GET_CODE (op1
) == IOR
3187 && GET_CODE (XEXP (op1
, 0)) == NOT
3188 && op0
== XEXP (XEXP (op1
, 0), 0))
3189 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3191 /* (and (ior (not X) Y) X) -> (and X Y) */
3192 if (GET_CODE (op0
) == IOR
3193 && GET_CODE (XEXP (op0
, 0)) == NOT
3194 && op1
== XEXP (XEXP (op0
, 0), 0))
3195 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3197 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3201 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3207 /* 0/x is 0 (or x&0 if x has side-effects). */
3208 if (trueop0
== CONST0_RTX (mode
))
3210 if (side_effects_p (op1
))
3211 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3215 if (trueop1
== CONST1_RTX (mode
))
3217 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3221 /* Convert divide by power of two into shift. */
3222 if (CONST_INT_P (trueop1
)
3223 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3224 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3228 /* Handle floating point and integers separately. */
3229 if (SCALAR_FLOAT_MODE_P (mode
))
3231 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3232 safe for modes with NaNs, since 0.0 / 0.0 will then be
3233 NaN rather than 0.0. Nor is it safe for modes with signed
3234 zeros, since dividing 0 by a negative number gives -0.0 */
3235 if (trueop0
== CONST0_RTX (mode
)
3236 && !HONOR_NANS (mode
)
3237 && !HONOR_SIGNED_ZEROS (mode
)
3238 && ! side_effects_p (op1
))
3241 if (trueop1
== CONST1_RTX (mode
)
3242 && !HONOR_SNANS (mode
))
3245 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3246 && trueop1
!= CONST0_RTX (mode
))
3249 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
3252 if (REAL_VALUES_EQUAL (d
, dconstm1
)
3253 && !HONOR_SNANS (mode
))
3254 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3256 /* Change FP division by a constant into multiplication.
3257 Only do this with -freciprocal-math. */
3258 if (flag_reciprocal_math
3259 && !REAL_VALUES_EQUAL (d
, dconst0
))
3261 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3262 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3263 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3267 else if (SCALAR_INT_MODE_P (mode
))
3269 /* 0/x is 0 (or x&0 if x has side-effects). */
3270 if (trueop0
== CONST0_RTX (mode
)
3271 && !cfun
->can_throw_non_call_exceptions
)
3273 if (side_effects_p (op1
))
3274 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3278 if (trueop1
== CONST1_RTX (mode
))
3280 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3285 if (trueop1
== constm1_rtx
)
3287 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3289 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3295 /* 0%x is 0 (or x&0 if x has side-effects). */
3296 if (trueop0
== CONST0_RTX (mode
))
3298 if (side_effects_p (op1
))
3299 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3302 /* x%1 is 0 (of x&0 if x has side-effects). */
3303 if (trueop1
== CONST1_RTX (mode
))
3305 if (side_effects_p (op0
))
3306 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3307 return CONST0_RTX (mode
);
3309 /* Implement modulus by power of two as AND. */
3310 if (CONST_INT_P (trueop1
)
3311 && exact_log2 (UINTVAL (trueop1
)) > 0)
3312 return simplify_gen_binary (AND
, mode
, op0
,
3313 gen_int_mode (INTVAL (op1
) - 1, mode
));
3317 /* 0%x is 0 (or x&0 if x has side-effects). */
3318 if (trueop0
== CONST0_RTX (mode
))
3320 if (side_effects_p (op1
))
3321 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3324 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3325 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3327 if (side_effects_p (op0
))
3328 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3329 return CONST0_RTX (mode
);
3335 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3336 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3337 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3339 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3340 if (CONST_INT_P (trueop1
)
3341 && IN_RANGE (INTVAL (trueop1
),
3342 GET_MODE_BITSIZE (mode
) / 2 + (code
== ROTATE
),
3343 GET_MODE_BITSIZE (mode
) - 1))
3344 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3345 mode
, op0
, GEN_INT (GET_MODE_BITSIZE (mode
)
3346 - INTVAL (trueop1
)));
3350 if (trueop1
== CONST0_RTX (mode
))
3352 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3354 /* Rotating ~0 always results in ~0. */
3355 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3356 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3357 && ! side_effects_p (op1
))
3360 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3362 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
3363 if (val
!= INTVAL (op1
))
3364 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3371 if (trueop1
== CONST0_RTX (mode
))
3373 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3375 goto canonicalize_shift
;
3378 if (trueop1
== CONST0_RTX (mode
))
3380 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3382 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3383 if (GET_CODE (op0
) == CLZ
3384 && CONST_INT_P (trueop1
)
3385 && STORE_FLAG_VALUE
== 1
3386 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3388 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3389 unsigned HOST_WIDE_INT zero_val
= 0;
3391 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3392 && zero_val
== GET_MODE_PRECISION (imode
)
3393 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3394 return simplify_gen_relational (EQ
, mode
, imode
,
3395 XEXP (op0
, 0), const0_rtx
);
3397 goto canonicalize_shift
;
3400 if (width
<= HOST_BITS_PER_WIDE_INT
3401 && mode_signbit_p (mode
, trueop1
)
3402 && ! side_effects_p (op0
))
3404 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3406 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3412 if (width
<= HOST_BITS_PER_WIDE_INT
3413 && CONST_INT_P (trueop1
)
3414 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3415 && ! side_effects_p (op0
))
3417 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3419 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3425 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3427 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3429 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3435 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3437 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3439 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3452 /* ??? There are simplifications that can be done. */
3456 if (!VECTOR_MODE_P (mode
))
3458 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3459 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3460 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3461 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3462 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3464 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3465 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3468 /* Extract a scalar element from a nested VEC_SELECT expression
3469 (with optional nested VEC_CONCAT expression). Some targets
3470 (i386) extract scalar element from a vector using chain of
3471 nested VEC_SELECT expressions. When input operand is a memory
3472 operand, this operation can be simplified to a simple scalar
3473 load from an offseted memory address. */
3474 if (GET_CODE (trueop0
) == VEC_SELECT
)
3476 rtx op0
= XEXP (trueop0
, 0);
3477 rtx op1
= XEXP (trueop0
, 1);
3479 enum machine_mode opmode
= GET_MODE (op0
);
3480 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3481 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3483 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3489 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3490 gcc_assert (i
< n_elts
);
3492 /* Select element, pointed by nested selector. */
3493 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3495 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3496 if (GET_CODE (op0
) == VEC_CONCAT
)
3498 rtx op00
= XEXP (op0
, 0);
3499 rtx op01
= XEXP (op0
, 1);
3501 enum machine_mode mode00
, mode01
;
3502 int n_elts00
, n_elts01
;
3504 mode00
= GET_MODE (op00
);
3505 mode01
= GET_MODE (op01
);
3507 /* Find out number of elements of each operand. */
3508 if (VECTOR_MODE_P (mode00
))
3510 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3511 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3516 if (VECTOR_MODE_P (mode01
))
3518 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3519 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3524 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3526 /* Select correct operand of VEC_CONCAT
3527 and adjust selector. */
3528 if (elem
< n_elts01
)
3539 vec
= rtvec_alloc (1);
3540 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3542 tmp
= gen_rtx_fmt_ee (code
, mode
,
3543 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3546 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3547 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3548 return XEXP (trueop0
, 0);
3552 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3553 gcc_assert (GET_MODE_INNER (mode
)
3554 == GET_MODE_INNER (GET_MODE (trueop0
)));
3555 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3557 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3559 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3560 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3561 rtvec v
= rtvec_alloc (n_elts
);
3564 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3565 for (i
= 0; i
< n_elts
; i
++)
3567 rtx x
= XVECEXP (trueop1
, 0, i
);
3569 gcc_assert (CONST_INT_P (x
));
3570 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3574 return gen_rtx_CONST_VECTOR (mode
, v
);
3577 /* Recognize the identity. */
3578 if (GET_MODE (trueop0
) == mode
)
3580 bool maybe_ident
= true;
3581 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3583 rtx j
= XVECEXP (trueop1
, 0, i
);
3584 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3586 maybe_ident
= false;
3594 /* If we build {a,b} then permute it, build the result directly. */
3595 if (XVECLEN (trueop1
, 0) == 2
3596 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3597 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3598 && GET_CODE (trueop0
) == VEC_CONCAT
3599 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3600 && GET_MODE (XEXP (trueop0
, 0)) == mode
3601 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3602 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3604 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3605 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3608 gcc_assert (i0
< 4 && i1
< 4);
3609 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3610 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3612 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3615 if (XVECLEN (trueop1
, 0) == 2
3616 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3617 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3618 && GET_CODE (trueop0
) == VEC_CONCAT
3619 && GET_MODE (trueop0
) == mode
)
3621 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3622 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3625 gcc_assert (i0
< 2 && i1
< 2);
3626 subop0
= XEXP (trueop0
, i0
);
3627 subop1
= XEXP (trueop0
, i1
);
3629 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3633 if (XVECLEN (trueop1
, 0) == 1
3634 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3635 && GET_CODE (trueop0
) == VEC_CONCAT
)
3638 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3640 /* Try to find the element in the VEC_CONCAT. */
3641 while (GET_MODE (vec
) != mode
3642 && GET_CODE (vec
) == VEC_CONCAT
)
3644 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3645 if (offset
< vec_size
)
3646 vec
= XEXP (vec
, 0);
3650 vec
= XEXP (vec
, 1);
3652 vec
= avoid_constant_pool_reference (vec
);
3655 if (GET_MODE (vec
) == mode
)
3659 /* If we select elements in a vec_merge that all come from the same
3660 operand, select from that operand directly. */
3661 if (GET_CODE (op0
) == VEC_MERGE
)
3663 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3664 if (CONST_INT_P (trueop02
))
3666 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3667 bool all_operand0
= true;
3668 bool all_operand1
= true;
3669 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3671 rtx j
= XVECEXP (trueop1
, 0, i
);
3672 if (sel
& (1 << UINTVAL (j
)))
3673 all_operand1
= false;
3675 all_operand0
= false;
3677 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3678 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3679 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3680 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3687 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3688 ? GET_MODE (trueop0
)
3689 : GET_MODE_INNER (mode
));
3690 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3691 ? GET_MODE (trueop1
)
3692 : GET_MODE_INNER (mode
));
3694 gcc_assert (VECTOR_MODE_P (mode
));
3695 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3696 == GET_MODE_SIZE (mode
));
3698 if (VECTOR_MODE_P (op0_mode
))
3699 gcc_assert (GET_MODE_INNER (mode
)
3700 == GET_MODE_INNER (op0_mode
));
3702 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3704 if (VECTOR_MODE_P (op1_mode
))
3705 gcc_assert (GET_MODE_INNER (mode
)
3706 == GET_MODE_INNER (op1_mode
));
3708 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3710 if ((GET_CODE (trueop0
) == CONST_VECTOR
3711 || CONST_SCALAR_INT_P (trueop0
)
3712 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3713 && (GET_CODE (trueop1
) == CONST_VECTOR
3714 || CONST_SCALAR_INT_P (trueop1
)
3715 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3717 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3718 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3719 rtvec v
= rtvec_alloc (n_elts
);
3721 unsigned in_n_elts
= 1;
3723 if (VECTOR_MODE_P (op0_mode
))
3724 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3725 for (i
= 0; i
< n_elts
; i
++)
3729 if (!VECTOR_MODE_P (op0_mode
))
3730 RTVEC_ELT (v
, i
) = trueop0
;
3732 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3736 if (!VECTOR_MODE_P (op1_mode
))
3737 RTVEC_ELT (v
, i
) = trueop1
;
3739 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3744 return gen_rtx_CONST_VECTOR (mode
, v
);
3747 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3748 Restrict the transformation to avoid generating a VEC_SELECT with a
3749 mode unrelated to its operand. */
3750 if (GET_CODE (trueop0
) == VEC_SELECT
3751 && GET_CODE (trueop1
) == VEC_SELECT
3752 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3753 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3755 rtx par0
= XEXP (trueop0
, 1);
3756 rtx par1
= XEXP (trueop1
, 1);
3757 int len0
= XVECLEN (par0
, 0);
3758 int len1
= XVECLEN (par1
, 0);
3759 rtvec vec
= rtvec_alloc (len0
+ len1
);
3760 for (int i
= 0; i
< len0
; i
++)
3761 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3762 for (int i
= 0; i
< len1
; i
++)
3763 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3764 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3765 gen_rtx_PARALLEL (VOIDmode
, vec
));
3778 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3781 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3783 unsigned int width
= GET_MODE_PRECISION (mode
);
3785 if (VECTOR_MODE_P (mode
)
3786 && code
!= VEC_CONCAT
3787 && GET_CODE (op0
) == CONST_VECTOR
3788 && GET_CODE (op1
) == CONST_VECTOR
)
3790 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3791 enum machine_mode op0mode
= GET_MODE (op0
);
3792 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3793 enum machine_mode op1mode
= GET_MODE (op1
);
3794 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3795 rtvec v
= rtvec_alloc (n_elts
);
3798 gcc_assert (op0_n_elts
== n_elts
);
3799 gcc_assert (op1_n_elts
== n_elts
);
3800 for (i
= 0; i
< n_elts
; i
++)
3802 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3803 CONST_VECTOR_ELT (op0
, i
),
3804 CONST_VECTOR_ELT (op1
, i
));
3807 RTVEC_ELT (v
, i
) = x
;
3810 return gen_rtx_CONST_VECTOR (mode
, v
);
3813 if (VECTOR_MODE_P (mode
)
3814 && code
== VEC_CONCAT
3815 && (CONST_SCALAR_INT_P (op0
)
3816 || GET_CODE (op0
) == CONST_FIXED
3817 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3818 && (CONST_SCALAR_INT_P (op1
)
3819 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3820 || GET_CODE (op1
) == CONST_FIXED
))
3822 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3823 rtvec v
= rtvec_alloc (n_elts
);
3825 gcc_assert (n_elts
>= 2);
3828 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3829 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3831 RTVEC_ELT (v
, 0) = op0
;
3832 RTVEC_ELT (v
, 1) = op1
;
3836 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3837 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3840 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3841 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3842 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3844 for (i
= 0; i
< op0_n_elts
; ++i
)
3845 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3846 for (i
= 0; i
< op1_n_elts
; ++i
)
3847 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3850 return gen_rtx_CONST_VECTOR (mode
, v
);
3853 if (SCALAR_FLOAT_MODE_P (mode
)
3854 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3855 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3856 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3867 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3869 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3871 for (i
= 0; i
< 4; i
++)
3888 real_from_target (&r
, tmp0
, mode
);
3889 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3893 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3896 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3897 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3898 real_convert (&f0
, mode
, &f0
);
3899 real_convert (&f1
, mode
, &f1
);
3901 if (HONOR_SNANS (mode
)
3902 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3906 && REAL_VALUES_EQUAL (f1
, dconst0
)
3907 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3910 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3911 && flag_trapping_math
3912 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3914 int s0
= REAL_VALUE_NEGATIVE (f0
);
3915 int s1
= REAL_VALUE_NEGATIVE (f1
);
3920 /* Inf + -Inf = NaN plus exception. */
3925 /* Inf - Inf = NaN plus exception. */
3930 /* Inf / Inf = NaN plus exception. */
3937 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3938 && flag_trapping_math
3939 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3940 || (REAL_VALUE_ISINF (f1
)
3941 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3942 /* Inf * 0 = NaN plus exception. */
3945 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3947 real_convert (&result
, mode
, &value
);
3949 /* Don't constant fold this floating point operation if
3950 the result has overflowed and flag_trapping_math. */
3952 if (flag_trapping_math
3953 && MODE_HAS_INFINITIES (mode
)
3954 && REAL_VALUE_ISINF (result
)
3955 && !REAL_VALUE_ISINF (f0
)
3956 && !REAL_VALUE_ISINF (f1
))
3957 /* Overflow plus exception. */
3960 /* Don't constant fold this floating point operation if the
3961 result may dependent upon the run-time rounding mode and
3962 flag_rounding_math is set, or if GCC's software emulation
3963 is unable to accurately represent the result. */
3965 if ((flag_rounding_math
3966 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3967 && (inexact
|| !real_identical (&result
, &value
)))
3970 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3974 /* We can fold some multi-word operations. */
3975 if (GET_MODE_CLASS (mode
) == MODE_INT
3976 && width
== HOST_BITS_PER_DOUBLE_INT
3977 && (CONST_DOUBLE_AS_INT_P (op0
) || CONST_INT_P (op0
))
3978 && (CONST_DOUBLE_AS_INT_P (op1
) || CONST_INT_P (op1
)))
3980 double_int o0
, o1
, res
, tmp
;
3983 o0
= rtx_to_double_int (op0
);
3984 o1
= rtx_to_double_int (op1
);
3989 /* A - B == A + (-B). */
3992 /* Fall through.... */
4003 res
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
4010 tmp
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
4017 res
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
4024 tmp
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
4058 case LSHIFTRT
: case ASHIFTRT
:
4060 case ROTATE
: case ROTATERT
:
4062 unsigned HOST_WIDE_INT cnt
;
4064 if (SHIFT_COUNT_TRUNCATED
)
4067 o1
.low
&= GET_MODE_PRECISION (mode
) - 1;
4070 if (!o1
.fits_uhwi ()
4071 || o1
.to_uhwi () >= GET_MODE_PRECISION (mode
))
4074 cnt
= o1
.to_uhwi ();
4075 unsigned short prec
= GET_MODE_PRECISION (mode
);
4077 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
4078 res
= o0
.rshift (cnt
, prec
, code
== ASHIFTRT
);
4079 else if (code
== ASHIFT
)
4080 res
= o0
.alshift (cnt
, prec
);
4081 else if (code
== ROTATE
)
4082 res
= o0
.lrotate (cnt
, prec
);
4083 else /* code == ROTATERT */
4084 res
= o0
.rrotate (cnt
, prec
);
4092 return immed_double_int_const (res
, mode
);
4095 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
4096 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
4098 /* Get the integer argument values in two forms:
4099 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4101 arg0
= INTVAL (op0
);
4102 arg1
= INTVAL (op1
);
4104 if (width
< HOST_BITS_PER_WIDE_INT
)
4106 arg0
&= GET_MODE_MASK (mode
);
4107 arg1
&= GET_MODE_MASK (mode
);
4110 if (val_signbit_known_set_p (mode
, arg0s
))
4111 arg0s
|= ~GET_MODE_MASK (mode
);
4114 if (val_signbit_known_set_p (mode
, arg1s
))
4115 arg1s
|= ~GET_MODE_MASK (mode
);
4123 /* Compute the value of the arithmetic. */
4128 val
= (unsigned HOST_WIDE_INT
) arg0s
+ arg1s
;
4132 val
= (unsigned HOST_WIDE_INT
) arg0s
- arg1s
;
4136 val
= (unsigned HOST_WIDE_INT
) arg0s
* arg1s
;
4141 || ((unsigned HOST_WIDE_INT
) arg0s
4142 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4145 val
= arg0s
/ arg1s
;
4150 || ((unsigned HOST_WIDE_INT
) arg0s
4151 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4154 val
= arg0s
% arg1s
;
4159 || ((unsigned HOST_WIDE_INT
) arg0s
4160 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4163 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
4168 || ((unsigned HOST_WIDE_INT
) arg0s
4169 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4172 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
4190 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4191 the value is in range. We can't return any old value for
4192 out-of-range arguments because either the middle-end (via
4193 shift_truncation_mask) or the back-end might be relying on
4194 target-specific knowledge. Nor can we rely on
4195 shift_truncation_mask, since the shift might not be part of an
4196 ashlM3, lshrM3 or ashrM3 instruction. */
4197 if (SHIFT_COUNT_TRUNCATED
)
4198 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
4199 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
4202 val
= (code
== ASHIFT
4203 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
4204 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
4206 /* Sign-extend the result for arithmetic right shifts. */
4207 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
4208 val
|= HOST_WIDE_INT_M1U
<< (width
- arg1
);
4216 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
4217 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
4225 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
4226 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
4230 /* Do nothing here. */
4234 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
4238 val
= ((unsigned HOST_WIDE_INT
) arg0
4239 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
4243 val
= arg0s
> arg1s
? arg0s
: arg1s
;
4247 val
= ((unsigned HOST_WIDE_INT
) arg0
4248 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
4261 /* ??? There are simplifications that can be done. */
4268 return gen_int_mode (val
, mode
);
4276 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4279 Rather than test for specific case, we do this by a brute-force method
4280 and do all possible simplifications until no more changes occur. Then
4281 we rebuild the operation. */
4283 struct simplify_plus_minus_op_data
4290 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4294 result
= (commutative_operand_precedence (y
)
4295 - commutative_operand_precedence (x
));
4299 /* Group together equal REGs to do more simplification. */
4300 if (REG_P (x
) && REG_P (y
))
4301 return REGNO (x
) > REGNO (y
);
4307 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
4310 struct simplify_plus_minus_op_data ops
[8];
4312 int n_ops
= 2, input_ops
= 2;
4313 int changed
, n_constants
= 0, canonicalized
= 0;
4316 memset (ops
, 0, sizeof ops
);
4318 /* Set up the two operands and then expand them until nothing has been
4319 changed. If we run out of room in our array, give up; this should
4320 almost never happen. */
4325 ops
[1].neg
= (code
== MINUS
);
4331 for (i
= 0; i
< n_ops
; i
++)
4333 rtx this_op
= ops
[i
].op
;
4334 int this_neg
= ops
[i
].neg
;
4335 enum rtx_code this_code
= GET_CODE (this_op
);
4344 ops
[n_ops
].op
= XEXP (this_op
, 1);
4345 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4348 ops
[i
].op
= XEXP (this_op
, 0);
4351 canonicalized
|= this_neg
;
4355 ops
[i
].op
= XEXP (this_op
, 0);
4356 ops
[i
].neg
= ! this_neg
;
4363 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4364 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4365 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4367 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4368 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4369 ops
[n_ops
].neg
= this_neg
;
4377 /* ~a -> (-a - 1) */
4380 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4381 ops
[n_ops
++].neg
= this_neg
;
4382 ops
[i
].op
= XEXP (this_op
, 0);
4383 ops
[i
].neg
= !this_neg
;
4393 ops
[i
].op
= neg_const_int (mode
, this_op
);
4407 if (n_constants
> 1)
4410 gcc_assert (n_ops
>= 2);
4412 /* If we only have two operands, we can avoid the loops. */
4415 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4418 /* Get the two operands. Be careful with the order, especially for
4419 the cases where code == MINUS. */
4420 if (ops
[0].neg
&& ops
[1].neg
)
4422 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4425 else if (ops
[0].neg
)
4436 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4439 /* Now simplify each pair of operands until nothing changes. */
4442 /* Insertion sort is good enough for an eight-element array. */
4443 for (i
= 1; i
< n_ops
; i
++)
4445 struct simplify_plus_minus_op_data save
;
4447 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4453 ops
[j
+ 1] = ops
[j
];
4454 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4459 for (i
= n_ops
- 1; i
> 0; i
--)
4460 for (j
= i
- 1; j
>= 0; j
--)
4462 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4463 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4465 if (lhs
!= 0 && rhs
!= 0)
4467 enum rtx_code ncode
= PLUS
;
4473 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4475 else if (swap_commutative_operands_p (lhs
, rhs
))
4476 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4478 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4479 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4481 rtx tem_lhs
, tem_rhs
;
4483 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4484 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4485 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4487 if (tem
&& !CONSTANT_P (tem
))
4488 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4491 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4493 /* Reject "simplifications" that just wrap the two
4494 arguments in a CONST. Failure to do so can result
4495 in infinite recursion with simplify_binary_operation
4496 when it calls us to simplify CONST operations. */
4498 && ! (GET_CODE (tem
) == CONST
4499 && GET_CODE (XEXP (tem
, 0)) == ncode
4500 && XEXP (XEXP (tem
, 0), 0) == lhs
4501 && XEXP (XEXP (tem
, 0), 1) == rhs
))
4504 if (GET_CODE (tem
) == NEG
)
4505 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4506 if (CONST_INT_P (tem
) && lneg
)
4507 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4511 ops
[j
].op
= NULL_RTX
;
4518 /* If nothing changed, fail. */
4522 /* Pack all the operands to the lower-numbered entries. */
4523 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4533 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4535 && CONST_INT_P (ops
[1].op
)
4536 && CONSTANT_P (ops
[0].op
)
4538 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4540 /* We suppressed creation of trivial CONST expressions in the
4541 combination loop to avoid recursion. Create one manually now.
4542 The combination loop should have ensured that there is exactly
4543 one CONST_INT, and the sort will have ensured that it is last
4544 in the array and that any other constant will be next-to-last. */
4547 && CONST_INT_P (ops
[n_ops
- 1].op
)
4548 && CONSTANT_P (ops
[n_ops
- 2].op
))
4550 rtx value
= ops
[n_ops
- 1].op
;
4551 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4552 value
= neg_const_int (mode
, value
);
4553 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4558 /* Put a non-negated operand first, if possible. */
4560 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4563 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4572 /* Now make the result by performing the requested operations. */
4574 for (i
= 1; i
< n_ops
; i
++)
4575 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4576 mode
, result
, ops
[i
].op
);
4581 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4583 plus_minus_operand_p (const_rtx x
)
4585 return GET_CODE (x
) == PLUS
4586 || GET_CODE (x
) == MINUS
4587 || (GET_CODE (x
) == CONST
4588 && GET_CODE (XEXP (x
, 0)) == PLUS
4589 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4590 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4593 /* Like simplify_binary_operation except used for relational operators.
4594 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4595 not also be VOIDmode.
4597 CMP_MODE specifies in which mode the comparison is done in, so it is
4598 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4599 the operands or, if both are VOIDmode, the operands are compared in
4600 "infinite precision". */
4602 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4603 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4605 rtx tem
, trueop0
, trueop1
;
4607 if (cmp_mode
== VOIDmode
)
4608 cmp_mode
= GET_MODE (op0
);
4609 if (cmp_mode
== VOIDmode
)
4610 cmp_mode
= GET_MODE (op1
);
4612 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4615 if (SCALAR_FLOAT_MODE_P (mode
))
4617 if (tem
== const0_rtx
)
4618 return CONST0_RTX (mode
);
4619 #ifdef FLOAT_STORE_FLAG_VALUE
4621 REAL_VALUE_TYPE val
;
4622 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4623 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4629 if (VECTOR_MODE_P (mode
))
4631 if (tem
== const0_rtx
)
4632 return CONST0_RTX (mode
);
4633 #ifdef VECTOR_STORE_FLAG_VALUE
4638 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4639 if (val
== NULL_RTX
)
4641 if (val
== const1_rtx
)
4642 return CONST1_RTX (mode
);
4644 units
= GET_MODE_NUNITS (mode
);
4645 v
= rtvec_alloc (units
);
4646 for (i
= 0; i
< units
; i
++)
4647 RTVEC_ELT (v
, i
) = val
;
4648 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4658 /* For the following tests, ensure const0_rtx is op1. */
4659 if (swap_commutative_operands_p (op0
, op1
)
4660 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4661 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4663 /* If op0 is a compare, extract the comparison arguments from it. */
4664 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4665 return simplify_gen_relational (code
, mode
, VOIDmode
,
4666 XEXP (op0
, 0), XEXP (op0
, 1));
4668 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4672 trueop0
= avoid_constant_pool_reference (op0
);
4673 trueop1
= avoid_constant_pool_reference (op1
);
4674 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4678 /* This part of simplify_relational_operation is only used when CMP_MODE
4679 is not in class MODE_CC (i.e. it is a real comparison).
4681 MODE is the mode of the result, while CMP_MODE specifies in which
4682 mode the comparison is done in, so it is the mode of the operands. */
4685 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4686 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4688 enum rtx_code op0code
= GET_CODE (op0
);
4690 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4692 /* If op0 is a comparison, extract the comparison arguments
4696 if (GET_MODE (op0
) == mode
)
4697 return simplify_rtx (op0
);
4699 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4700 XEXP (op0
, 0), XEXP (op0
, 1));
4702 else if (code
== EQ
)
4704 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4705 if (new_code
!= UNKNOWN
)
4706 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4707 XEXP (op0
, 0), XEXP (op0
, 1));
4711 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4712 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4713 if ((code
== LTU
|| code
== GEU
)
4714 && GET_CODE (op0
) == PLUS
4715 && CONST_INT_P (XEXP (op0
, 1))
4716 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4717 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4718 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4719 && XEXP (op0
, 1) != const0_rtx
)
4722 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4723 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4724 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4727 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4728 if ((code
== LTU
|| code
== GEU
)
4729 && GET_CODE (op0
) == PLUS
4730 && rtx_equal_p (op1
, XEXP (op0
, 1))
4731 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4732 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4733 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4734 copy_rtx (XEXP (op0
, 0)));
4736 if (op1
== const0_rtx
)
4738 /* Canonicalize (GTU x 0) as (NE x 0). */
4740 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4741 /* Canonicalize (LEU x 0) as (EQ x 0). */
4743 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4745 else if (op1
== const1_rtx
)
4750 /* Canonicalize (GE x 1) as (GT x 0). */
4751 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4754 /* Canonicalize (GEU x 1) as (NE x 0). */
4755 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4758 /* Canonicalize (LT x 1) as (LE x 0). */
4759 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4762 /* Canonicalize (LTU x 1) as (EQ x 0). */
4763 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4769 else if (op1
== constm1_rtx
)
4771 /* Canonicalize (LE x -1) as (LT x 0). */
4773 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4774 /* Canonicalize (GT x -1) as (GE x 0). */
4776 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4779 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4780 if ((code
== EQ
|| code
== NE
)
4781 && (op0code
== PLUS
|| op0code
== MINUS
)
4783 && CONSTANT_P (XEXP (op0
, 1))
4784 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4786 rtx x
= XEXP (op0
, 0);
4787 rtx c
= XEXP (op0
, 1);
4788 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4789 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4791 /* Detect an infinite recursive condition, where we oscillate at this
4792 simplification case between:
4793 A + B == C <---> C - B == A,
4794 where A, B, and C are all constants with non-simplifiable expressions,
4795 usually SYMBOL_REFs. */
4796 if (GET_CODE (tem
) == invcode
4798 && rtx_equal_p (c
, XEXP (tem
, 1)))
4801 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4804 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4805 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4807 && op1
== const0_rtx
4808 && GET_MODE_CLASS (mode
) == MODE_INT
4809 && cmp_mode
!= VOIDmode
4810 /* ??? Work-around BImode bugs in the ia64 backend. */
4812 && cmp_mode
!= BImode
4813 && nonzero_bits (op0
, cmp_mode
) == 1
4814 && STORE_FLAG_VALUE
== 1)
4815 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4816 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4817 : lowpart_subreg (mode
, op0
, cmp_mode
);
4819 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4820 if ((code
== EQ
|| code
== NE
)
4821 && op1
== const0_rtx
4823 return simplify_gen_relational (code
, mode
, cmp_mode
,
4824 XEXP (op0
, 0), XEXP (op0
, 1));
4826 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4827 if ((code
== EQ
|| code
== NE
)
4829 && rtx_equal_p (XEXP (op0
, 0), op1
)
4830 && !side_effects_p (XEXP (op0
, 0)))
4831 return simplify_gen_relational (code
, mode
, cmp_mode
,
4832 XEXP (op0
, 1), const0_rtx
);
4834 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4835 if ((code
== EQ
|| code
== NE
)
4837 && rtx_equal_p (XEXP (op0
, 1), op1
)
4838 && !side_effects_p (XEXP (op0
, 1)))
4839 return simplify_gen_relational (code
, mode
, cmp_mode
,
4840 XEXP (op0
, 0), const0_rtx
);
4842 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4843 if ((code
== EQ
|| code
== NE
)
4845 && CONST_SCALAR_INT_P (op1
)
4846 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4847 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4848 simplify_gen_binary (XOR
, cmp_mode
,
4849 XEXP (op0
, 1), op1
));
4851 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4852 if ((code
== EQ
|| code
== NE
)
4853 && GET_CODE (op0
) == BSWAP
4854 && CONST_SCALAR_INT_P (op1
))
4855 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4856 simplify_gen_unary (BSWAP
, cmp_mode
,
4859 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4860 if ((code
== EQ
|| code
== NE
)
4861 && GET_CODE (op0
) == BSWAP
4862 && GET_CODE (op1
) == BSWAP
)
4863 return simplify_gen_relational (code
, mode
, cmp_mode
,
4864 XEXP (op0
, 0), XEXP (op1
, 0));
4866 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4872 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4873 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4874 XEXP (op0
, 0), const0_rtx
);
4879 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4880 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4881 XEXP (op0
, 0), const0_rtx
);
4900 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4901 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4902 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4903 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4904 For floating-point comparisons, assume that the operands were ordered. */
4907 comparison_result (enum rtx_code code
, int known_results
)
4913 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4916 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4920 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4923 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4927 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4930 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4933 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4935 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4938 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4940 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4943 return const_true_rtx
;
4951 /* Check if the given comparison (done in the given MODE) is actually a
4952 tautology or a contradiction.
4953 If no simplification is possible, this function returns zero.
4954 Otherwise, it returns either const_true_rtx or const0_rtx. */
4957 simplify_const_relational_operation (enum rtx_code code
,
4958 enum machine_mode mode
,
4965 gcc_assert (mode
!= VOIDmode
4966 || (GET_MODE (op0
) == VOIDmode
4967 && GET_MODE (op1
) == VOIDmode
));
4969 /* If op0 is a compare, extract the comparison arguments from it. */
4970 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4972 op1
= XEXP (op0
, 1);
4973 op0
= XEXP (op0
, 0);
4975 if (GET_MODE (op0
) != VOIDmode
)
4976 mode
= GET_MODE (op0
);
4977 else if (GET_MODE (op1
) != VOIDmode
)
4978 mode
= GET_MODE (op1
);
4983 /* We can't simplify MODE_CC values since we don't know what the
4984 actual comparison is. */
4985 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4988 /* Make sure the constant is second. */
4989 if (swap_commutative_operands_p (op0
, op1
))
4991 tem
= op0
, op0
= op1
, op1
= tem
;
4992 code
= swap_condition (code
);
4995 trueop0
= avoid_constant_pool_reference (op0
);
4996 trueop1
= avoid_constant_pool_reference (op1
);
4998 /* For integer comparisons of A and B maybe we can simplify A - B and can
4999 then simplify a comparison of that with zero. If A and B are both either
5000 a register or a CONST_INT, this can't help; testing for these cases will
5001 prevent infinite recursion here and speed things up.
5003 We can only do this for EQ and NE comparisons as otherwise we may
5004 lose or introduce overflow which we cannot disregard as undefined as
5005 we do not know the signedness of the operation on either the left or
5006 the right hand side of the comparison. */
5008 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
5009 && (code
== EQ
|| code
== NE
)
5010 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
5011 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
5012 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
5013 /* We cannot do this if tem is a nonzero address. */
5014 && ! nonzero_address_p (tem
))
5015 return simplify_const_relational_operation (signed_condition (code
),
5016 mode
, tem
, const0_rtx
);
5018 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5019 return const_true_rtx
;
5021 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5024 /* For modes without NaNs, if the two operands are equal, we know the
5025 result except if they have side-effects. Even with NaNs we know
5026 the result of unordered comparisons and, if signaling NaNs are
5027 irrelevant, also the result of LT/GT/LTGT. */
5028 if ((! HONOR_NANS (GET_MODE (trueop0
))
5029 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5030 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5031 && ! HONOR_SNANS (GET_MODE (trueop0
))))
5032 && rtx_equal_p (trueop0
, trueop1
)
5033 && ! side_effects_p (trueop0
))
5034 return comparison_result (code
, CMP_EQ
);
5036 /* If the operands are floating-point constants, see if we can fold
5038 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5039 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5040 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5042 REAL_VALUE_TYPE d0
, d1
;
5044 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
5045 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
5047 /* Comparisons are unordered iff at least one of the values is NaN. */
5048 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
5058 return const_true_rtx
;
5071 return comparison_result (code
,
5072 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
5073 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
5076 /* Otherwise, see if the operands are both integers. */
5077 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5078 && (CONST_DOUBLE_AS_INT_P (trueop0
) || CONST_INT_P (trueop0
))
5079 && (CONST_DOUBLE_AS_INT_P (trueop1
) || CONST_INT_P (trueop1
)))
5081 int width
= GET_MODE_PRECISION (mode
);
5082 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
5083 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
5085 /* Get the two words comprising each integer constant. */
5086 if (CONST_DOUBLE_AS_INT_P (trueop0
))
5088 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
5089 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
5093 l0u
= l0s
= INTVAL (trueop0
);
5094 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
5097 if (CONST_DOUBLE_AS_INT_P (trueop1
))
5099 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
5100 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
5104 l1u
= l1s
= INTVAL (trueop1
);
5105 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
5108 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
5109 we have to sign or zero-extend the values. */
5110 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
5112 l0u
&= GET_MODE_MASK (mode
);
5113 l1u
&= GET_MODE_MASK (mode
);
5115 if (val_signbit_known_set_p (mode
, l0s
))
5116 l0s
|= ~GET_MODE_MASK (mode
);
5118 if (val_signbit_known_set_p (mode
, l1s
))
5119 l1s
|= ~GET_MODE_MASK (mode
);
5121 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
5122 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
5124 if (h0u
== h1u
&& l0u
== l1u
)
5125 return comparison_result (code
, CMP_EQ
);
5129 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
5130 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
5131 return comparison_result (code
, cr
);
5135 /* Optimize comparisons with upper and lower bounds. */
5136 if (HWI_COMPUTABLE_MODE_P (mode
)
5137 && CONST_INT_P (trueop1
))
5140 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
5141 HOST_WIDE_INT val
= INTVAL (trueop1
);
5142 HOST_WIDE_INT mmin
, mmax
;
5152 /* Get a reduced range if the sign bit is zero. */
5153 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
5160 rtx mmin_rtx
, mmax_rtx
;
5161 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
5163 mmin
= INTVAL (mmin_rtx
);
5164 mmax
= INTVAL (mmax_rtx
);
5167 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
5169 mmin
>>= (sign_copies
- 1);
5170 mmax
>>= (sign_copies
- 1);
5176 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5178 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5179 return const_true_rtx
;
5180 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5185 return const_true_rtx
;
5190 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5192 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5193 return const_true_rtx
;
5194 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5199 return const_true_rtx
;
5205 /* x == y is always false for y out of range. */
5206 if (val
< mmin
|| val
> mmax
)
5210 /* x > y is always false for y >= mmax, always true for y < mmin. */
5212 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5214 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5215 return const_true_rtx
;
5221 return const_true_rtx
;
5224 /* x < y is always false for y <= mmin, always true for y > mmax. */
5226 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5228 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5229 return const_true_rtx
;
5235 return const_true_rtx
;
5239 /* x != y is always true for y out of range. */
5240 if (val
< mmin
|| val
> mmax
)
5241 return const_true_rtx
;
5249 /* Optimize integer comparisons with zero. */
5250 if (trueop1
== const0_rtx
)
5252 /* Some addresses are known to be nonzero. We don't know
5253 their sign, but equality comparisons are known. */
5254 if (nonzero_address_p (trueop0
))
5256 if (code
== EQ
|| code
== LEU
)
5258 if (code
== NE
|| code
== GTU
)
5259 return const_true_rtx
;
5262 /* See if the first operand is an IOR with a constant. If so, we
5263 may be able to determine the result of this comparison. */
5264 if (GET_CODE (op0
) == IOR
)
5266 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5267 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5269 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5270 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5271 && (UINTVAL (inner_const
)
5272 & ((unsigned HOST_WIDE_INT
) 1
5282 return const_true_rtx
;
5286 return const_true_rtx
;
5300 /* Optimize comparison of ABS with zero. */
5301 if (trueop1
== CONST0_RTX (mode
)
5302 && (GET_CODE (trueop0
) == ABS
5303 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5304 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5309 /* Optimize abs(x) < 0.0. */
5310 if (!HONOR_SNANS (mode
)
5311 && (!INTEGRAL_MODE_P (mode
)
5312 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5314 if (INTEGRAL_MODE_P (mode
)
5315 && (issue_strict_overflow_warning
5316 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5317 warning (OPT_Wstrict_overflow
,
5318 ("assuming signed overflow does not occur when "
5319 "assuming abs (x) < 0 is false"));
5325 /* Optimize abs(x) >= 0.0. */
5326 if (!HONOR_NANS (mode
)
5327 && (!INTEGRAL_MODE_P (mode
)
5328 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5330 if (INTEGRAL_MODE_P (mode
)
5331 && (issue_strict_overflow_warning
5332 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5333 warning (OPT_Wstrict_overflow
,
5334 ("assuming signed overflow does not occur when "
5335 "assuming abs (x) >= 0 is true"));
5336 return const_true_rtx
;
5341 /* Optimize ! (abs(x) < 0.0). */
5342 return const_true_rtx
;
5352 /* Simplify CODE, an operation with result mode MODE and three operands,
5353 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5354 a constant. Return 0 if no simplifications is possible. */
5357 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
5358 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
5361 unsigned int width
= GET_MODE_PRECISION (mode
);
5362 bool any_change
= false;
5365 /* VOIDmode means "infinite" precision. */
5367 width
= HOST_BITS_PER_WIDE_INT
;
5372 /* Simplify negations around the multiplication. */
5373 /* -a * -b + c => a * b + c. */
5374 if (GET_CODE (op0
) == NEG
)
5376 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5378 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5380 else if (GET_CODE (op1
) == NEG
)
5382 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5384 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5387 /* Canonicalize the two multiplication operands. */
5388 /* a * -b + c => -b * a + c. */
5389 if (swap_commutative_operands_p (op0
, op1
))
5390 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
5393 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5398 if (CONST_INT_P (op0
)
5399 && CONST_INT_P (op1
)
5400 && CONST_INT_P (op2
)
5401 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5402 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5404 /* Extracting a bit-field from a constant */
5405 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5406 HOST_WIDE_INT op1val
= INTVAL (op1
);
5407 HOST_WIDE_INT op2val
= INTVAL (op2
);
5408 if (BITS_BIG_ENDIAN
)
5409 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5413 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5415 /* First zero-extend. */
5416 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5417 /* If desired, propagate sign bit. */
5418 if (code
== SIGN_EXTRACT
5419 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5421 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5424 return gen_int_mode (val
, mode
);
5429 if (CONST_INT_P (op0
))
5430 return op0
!= const0_rtx
? op1
: op2
;
5432 /* Convert c ? a : a into "a". */
5433 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5436 /* Convert a != b ? a : b into "a". */
5437 if (GET_CODE (op0
) == NE
5438 && ! side_effects_p (op0
)
5439 && ! HONOR_NANS (mode
)
5440 && ! HONOR_SIGNED_ZEROS (mode
)
5441 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5442 && rtx_equal_p (XEXP (op0
, 1), op2
))
5443 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5444 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5447 /* Convert a == b ? a : b into "b". */
5448 if (GET_CODE (op0
) == EQ
5449 && ! side_effects_p (op0
)
5450 && ! HONOR_NANS (mode
)
5451 && ! HONOR_SIGNED_ZEROS (mode
)
5452 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5453 && rtx_equal_p (XEXP (op0
, 1), op2
))
5454 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5455 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5458 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5460 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5461 ? GET_MODE (XEXP (op0
, 1))
5462 : GET_MODE (XEXP (op0
, 0)));
5465 /* Look for happy constants in op1 and op2. */
5466 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5468 HOST_WIDE_INT t
= INTVAL (op1
);
5469 HOST_WIDE_INT f
= INTVAL (op2
);
5471 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5472 code
= GET_CODE (op0
);
5473 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5476 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5484 return simplify_gen_relational (code
, mode
, cmp_mode
,
5485 XEXP (op0
, 0), XEXP (op0
, 1));
5488 if (cmp_mode
== VOIDmode
)
5489 cmp_mode
= op0_mode
;
5490 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5491 cmp_mode
, XEXP (op0
, 0),
5494 /* See if any simplifications were possible. */
5497 if (CONST_INT_P (temp
))
5498 return temp
== const0_rtx
? op2
: op1
;
5500 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5506 gcc_assert (GET_MODE (op0
) == mode
);
5507 gcc_assert (GET_MODE (op1
) == mode
);
5508 gcc_assert (VECTOR_MODE_P (mode
));
5509 trueop2
= avoid_constant_pool_reference (op2
);
5510 if (CONST_INT_P (trueop2
))
5512 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5513 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5514 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5515 unsigned HOST_WIDE_INT mask
;
5516 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5519 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5521 if (!(sel
& mask
) && !side_effects_p (op0
))
5523 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5526 rtx trueop0
= avoid_constant_pool_reference (op0
);
5527 rtx trueop1
= avoid_constant_pool_reference (op1
);
5528 if (GET_CODE (trueop0
) == CONST_VECTOR
5529 && GET_CODE (trueop1
) == CONST_VECTOR
)
5531 rtvec v
= rtvec_alloc (n_elts
);
5534 for (i
= 0; i
< n_elts
; i
++)
5535 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5536 ? CONST_VECTOR_ELT (trueop0
, i
)
5537 : CONST_VECTOR_ELT (trueop1
, i
));
5538 return gen_rtx_CONST_VECTOR (mode
, v
);
5541 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5542 if no element from a appears in the result. */
5543 if (GET_CODE (op0
) == VEC_MERGE
)
5545 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5546 if (CONST_INT_P (tem
))
5548 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5549 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5550 return simplify_gen_ternary (code
, mode
, mode
,
5551 XEXP (op0
, 1), op1
, op2
);
5552 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5553 return simplify_gen_ternary (code
, mode
, mode
,
5554 XEXP (op0
, 0), op1
, op2
);
5557 if (GET_CODE (op1
) == VEC_MERGE
)
5559 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5560 if (CONST_INT_P (tem
))
5562 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5563 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5564 return simplify_gen_ternary (code
, mode
, mode
,
5565 op0
, XEXP (op1
, 1), op2
);
5566 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5567 return simplify_gen_ternary (code
, mode
, mode
,
5568 op0
, XEXP (op1
, 0), op2
);
5573 if (rtx_equal_p (op0
, op1
)
5574 && !side_effects_p (op2
) && !side_effects_p (op1
))
5586 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5588 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5590 Works by unpacking OP into a collection of 8-bit values
5591 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5592 and then repacking them again for OUTERMODE. */
5595 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5596 enum machine_mode innermode
, unsigned int byte
)
5598 /* We support up to 512-bit values (for V8DFmode). */
5602 value_mask
= (1 << value_bit
) - 1
5604 unsigned char value
[max_bitsize
/ value_bit
];
5613 rtvec result_v
= NULL
;
5614 enum mode_class outer_class
;
5615 enum machine_mode outer_submode
;
5617 /* Some ports misuse CCmode. */
5618 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5621 /* We have no way to represent a complex constant at the rtl level. */
5622 if (COMPLEX_MODE_P (outermode
))
5625 /* Unpack the value. */
5627 if (GET_CODE (op
) == CONST_VECTOR
)
5629 num_elem
= CONST_VECTOR_NUNITS (op
);
5630 elems
= &CONST_VECTOR_ELT (op
, 0);
5631 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5637 elem_bitsize
= max_bitsize
;
5639 /* If this asserts, it is too complicated; reducing value_bit may help. */
5640 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5641 /* I don't know how to handle endianness of sub-units. */
5642 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5644 for (elem
= 0; elem
< num_elem
; elem
++)
5647 rtx el
= elems
[elem
];
5649 /* Vectors are kept in target memory order. (This is probably
5652 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5653 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5655 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5656 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5657 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5658 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5659 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5662 switch (GET_CODE (el
))
5666 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5668 *vp
++ = INTVAL (el
) >> i
;
5669 /* CONST_INTs are always logically sign-extended. */
5670 for (; i
< elem_bitsize
; i
+= value_bit
)
5671 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5675 if (GET_MODE (el
) == VOIDmode
)
5677 unsigned char extend
= 0;
5678 /* If this triggers, someone should have generated a
5679 CONST_INT instead. */
5680 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5682 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5683 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5684 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5687 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5691 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5693 for (; i
< elem_bitsize
; i
+= value_bit
)
5698 long tmp
[max_bitsize
/ 32];
5699 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5701 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5702 gcc_assert (bitsize
<= elem_bitsize
);
5703 gcc_assert (bitsize
% value_bit
== 0);
5705 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5708 /* real_to_target produces its result in words affected by
5709 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5710 and use WORDS_BIG_ENDIAN instead; see the documentation
5711 of SUBREG in rtl.texi. */
5712 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5715 if (WORDS_BIG_ENDIAN
)
5716 ibase
= bitsize
- 1 - i
;
5719 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5722 /* It shouldn't matter what's done here, so fill it with
5724 for (; i
< elem_bitsize
; i
+= value_bit
)
5730 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5732 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5733 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5737 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5738 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5739 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5741 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5742 >> (i
- HOST_BITS_PER_WIDE_INT
);
5743 for (; i
< elem_bitsize
; i
+= value_bit
)
5753 /* Now, pick the right byte to start with. */
5754 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5755 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5756 will already have offset 0. */
5757 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5759 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5761 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5762 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5763 byte
= (subword_byte
% UNITS_PER_WORD
5764 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5767 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5768 so if it's become negative it will instead be very large.) */
5769 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5771 /* Convert from bytes to chunks of size value_bit. */
5772 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5774 /* Re-pack the value. */
5776 if (VECTOR_MODE_P (outermode
))
5778 num_elem
= GET_MODE_NUNITS (outermode
);
5779 result_v
= rtvec_alloc (num_elem
);
5780 elems
= &RTVEC_ELT (result_v
, 0);
5781 outer_submode
= GET_MODE_INNER (outermode
);
5787 outer_submode
= outermode
;
5790 outer_class
= GET_MODE_CLASS (outer_submode
);
5791 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5793 gcc_assert (elem_bitsize
% value_bit
== 0);
5794 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5796 for (elem
= 0; elem
< num_elem
; elem
++)
5800 /* Vectors are stored in target memory order. (This is probably
5803 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5804 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5806 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5807 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5808 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5809 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5810 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5813 switch (outer_class
)
5816 case MODE_PARTIAL_INT
:
5818 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5821 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5823 lo
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5824 for (; i
< elem_bitsize
; i
+= value_bit
)
5825 hi
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5826 << (i
- HOST_BITS_PER_WIDE_INT
);
5828 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5830 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5831 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5832 else if (elem_bitsize
<= HOST_BITS_PER_DOUBLE_INT
)
5833 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5840 case MODE_DECIMAL_FLOAT
:
5843 long tmp
[max_bitsize
/ 32];
5845 /* real_from_target wants its input in words affected by
5846 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5847 and use WORDS_BIG_ENDIAN instead; see the documentation
5848 of SUBREG in rtl.texi. */
5849 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5851 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5854 if (WORDS_BIG_ENDIAN
)
5855 ibase
= elem_bitsize
- 1 - i
;
5858 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5861 real_from_target (&r
, tmp
, outer_submode
);
5862 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5874 f
.mode
= outer_submode
;
5877 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5879 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5880 for (; i
< elem_bitsize
; i
+= value_bit
)
5881 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5882 << (i
- HOST_BITS_PER_WIDE_INT
));
5884 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5892 if (VECTOR_MODE_P (outermode
))
5893 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5898 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5899 Return 0 if no simplifications are possible. */
5901 simplify_subreg (enum machine_mode outermode
, rtx op
,
5902 enum machine_mode innermode
, unsigned int byte
)
5904 /* Little bit of sanity checking. */
5905 gcc_assert (innermode
!= VOIDmode
);
5906 gcc_assert (outermode
!= VOIDmode
);
5907 gcc_assert (innermode
!= BLKmode
);
5908 gcc_assert (outermode
!= BLKmode
);
5910 gcc_assert (GET_MODE (op
) == innermode
5911 || GET_MODE (op
) == VOIDmode
);
5913 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5916 if (byte
>= GET_MODE_SIZE (innermode
))
5919 if (outermode
== innermode
&& !byte
)
5922 if (CONST_SCALAR_INT_P (op
)
5923 || CONST_DOUBLE_AS_FLOAT_P (op
)
5924 || GET_CODE (op
) == CONST_FIXED
5925 || GET_CODE (op
) == CONST_VECTOR
)
5926 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5928 /* Changing mode twice with SUBREG => just change it once,
5929 or not at all if changing back op starting mode. */
5930 if (GET_CODE (op
) == SUBREG
)
5932 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5933 int final_offset
= byte
+ SUBREG_BYTE (op
);
5936 if (outermode
== innermostmode
5937 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5938 return SUBREG_REG (op
);
5940 /* The SUBREG_BYTE represents offset, as if the value were stored
5941 in memory. Irritating exception is paradoxical subreg, where
5942 we define SUBREG_BYTE to be 0. On big endian machines, this
5943 value should be negative. For a moment, undo this exception. */
5944 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5946 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5947 if (WORDS_BIG_ENDIAN
)
5948 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5949 if (BYTES_BIG_ENDIAN
)
5950 final_offset
+= difference
% UNITS_PER_WORD
;
5952 if (SUBREG_BYTE (op
) == 0
5953 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5955 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5956 if (WORDS_BIG_ENDIAN
)
5957 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5958 if (BYTES_BIG_ENDIAN
)
5959 final_offset
+= difference
% UNITS_PER_WORD
;
5962 /* See whether resulting subreg will be paradoxical. */
5963 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5965 /* In nonparadoxical subregs we can't handle negative offsets. */
5966 if (final_offset
< 0)
5968 /* Bail out in case resulting subreg would be incorrect. */
5969 if (final_offset
% GET_MODE_SIZE (outermode
)
5970 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5976 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5978 /* In paradoxical subreg, see if we are still looking on lower part.
5979 If so, our SUBREG_BYTE will be 0. */
5980 if (WORDS_BIG_ENDIAN
)
5981 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5982 if (BYTES_BIG_ENDIAN
)
5983 offset
+= difference
% UNITS_PER_WORD
;
5984 if (offset
== final_offset
)
5990 /* Recurse for further possible simplifications. */
5991 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5995 if (validate_subreg (outermode
, innermostmode
,
5996 SUBREG_REG (op
), final_offset
))
5998 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5999 if (SUBREG_PROMOTED_VAR_P (op
)
6000 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
6001 && GET_MODE_CLASS (outermode
) == MODE_INT
6002 && IN_RANGE (GET_MODE_SIZE (outermode
),
6003 GET_MODE_SIZE (innermode
),
6004 GET_MODE_SIZE (innermostmode
))
6005 && subreg_lowpart_p (newx
))
6007 SUBREG_PROMOTED_VAR_P (newx
) = 1;
6008 SUBREG_PROMOTED_UNSIGNED_SET
6009 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
6016 /* SUBREG of a hard register => just change the register number
6017 and/or mode. If the hard register is not valid in that mode,
6018 suppress this simplification. If the hard register is the stack,
6019 frame, or argument pointer, leave this as a SUBREG. */
6021 if (REG_P (op
) && HARD_REGISTER_P (op
))
6023 unsigned int regno
, final_regno
;
6026 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6027 if (HARD_REGISTER_NUM_P (final_regno
))
6030 int final_offset
= byte
;
6032 /* Adjust offset for paradoxical subregs. */
6034 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
6036 int difference
= (GET_MODE_SIZE (innermode
)
6037 - GET_MODE_SIZE (outermode
));
6038 if (WORDS_BIG_ENDIAN
)
6039 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6040 if (BYTES_BIG_ENDIAN
)
6041 final_offset
+= difference
% UNITS_PER_WORD
;
6044 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
6046 /* Propagate original regno. We don't have any way to specify
6047 the offset inside original regno, so do so only for lowpart.
6048 The information is used only by alias analysis that can not
6049 grog partial register anyway. */
6051 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
6052 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6057 /* If we have a SUBREG of a register that we are replacing and we are
6058 replacing it with a MEM, make a new MEM and try replacing the
6059 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6060 or if we would be widening it. */
6063 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6064 /* Allow splitting of volatile memory references in case we don't
6065 have instruction to move the whole thing. */
6066 && (! MEM_VOLATILE_P (op
)
6067 || ! have_insn_for (SET
, innermode
))
6068 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
6069 return adjust_address_nv (op
, outermode
, byte
);
6071 /* Handle complex values represented as CONCAT
6072 of real and imaginary part. */
6073 if (GET_CODE (op
) == CONCAT
)
6075 unsigned int part_size
, final_offset
;
6078 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
6079 if (byte
< part_size
)
6081 part
= XEXP (op
, 0);
6082 final_offset
= byte
;
6086 part
= XEXP (op
, 1);
6087 final_offset
= byte
- part_size
;
6090 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
6093 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
6096 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
6097 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6101 /* A SUBREG resulting from a zero extension may fold to zero if
6102 it extracts higher bits that the ZERO_EXTEND's source bits. */
6103 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6105 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6106 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
6107 return CONST0_RTX (outermode
);
6110 if (SCALAR_INT_MODE_P (outermode
)
6111 && SCALAR_INT_MODE_P (innermode
)
6112 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
6113 && byte
== subreg_lowpart_offset (outermode
, innermode
))
6115 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
6123 /* Make a SUBREG operation or equivalent if it folds. */
6126 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
6127 enum machine_mode innermode
, unsigned int byte
)
6131 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6135 if (GET_CODE (op
) == SUBREG
6136 || GET_CODE (op
) == CONCAT
6137 || GET_MODE (op
) == VOIDmode
)
6140 if (validate_subreg (outermode
, innermode
, op
, byte
))
6141 return gen_rtx_SUBREG (outermode
, op
, byte
);
6146 /* Simplify X, an rtx expression.
6148 Return the simplified expression or NULL if no simplifications
6151 This is the preferred entry point into the simplification routines;
6152 however, we still allow passes to call the more specific routines.
6154 Right now GCC has three (yes, three) major bodies of RTL simplification
6155 code that need to be unified.
6157 1. fold_rtx in cse.c. This code uses various CSE specific
6158 information to aid in RTL simplification.
6160 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6161 it uses combine specific information to aid in RTL
6164 3. The routines in this file.
6167 Long term we want to only have one body of simplification code; to
6168 get to that state I recommend the following steps:
6170 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6171 which are not pass dependent state into these routines.
6173 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6174 use this routine whenever possible.
6176 3. Allow for pass dependent state to be provided to these
6177 routines and add simplifications based on the pass dependent
6178 state. Remove code from cse.c & combine.c that becomes
6181 It will take time, but ultimately the compiler will be easier to
6182 maintain and improve. It's totally silly that when we add a
6183 simplification that it needs to be added to 4 places (3 for RTL
6184 simplification and 1 for tree simplification. */
6187 simplify_rtx (const_rtx x
)
6189 const enum rtx_code code
= GET_CODE (x
);
6190 const enum machine_mode mode
= GET_MODE (x
);
6192 switch (GET_RTX_CLASS (code
))
6195 return simplify_unary_operation (code
, mode
,
6196 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6197 case RTX_COMM_ARITH
:
6198 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6199 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6201 /* Fall through.... */
6204 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6207 case RTX_BITFIELD_OPS
:
6208 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6209 XEXP (x
, 0), XEXP (x
, 1),
6213 case RTX_COMM_COMPARE
:
6214 return simplify_relational_operation (code
, mode
,
6215 ((GET_MODE (XEXP (x
, 0))
6217 ? GET_MODE (XEXP (x
, 0))
6218 : GET_MODE (XEXP (x
, 1))),
6224 return simplify_subreg (mode
, SUBREG_REG (x
),
6225 GET_MODE (SUBREG_REG (x
)),
6232 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6233 if (GET_CODE (XEXP (x
, 0)) == HIGH
6234 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))