1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
36 #include "diagnostic-core.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Much code operates on (low, high) pairs; the low value is an
43 unsigned wide int, the high value a signed wide int. We
44 occasionally need to sign extend from low to high as if low were a
46 #define HWI_SIGN_EXTEND(low) \
47 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
49 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
50 static bool plus_minus_operand_p (const_rtx
);
51 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
52 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
53 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
55 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
57 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
58 enum machine_mode
, rtx
, rtx
);
59 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
60 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
63 /* Negate a CONST_INT rtx, truncating (because a conversion from a
64 maximally negative number can overflow). */
66 neg_const_int (enum machine_mode mode
, const_rtx i
)
68 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
71 /* Test whether expression, X, is an immediate constant that represents
72 the most significant bit of machine mode MODE. */
75 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
77 unsigned HOST_WIDE_INT val
;
80 if (GET_MODE_CLASS (mode
) != MODE_INT
)
83 width
= GET_MODE_PRECISION (mode
);
87 if (width
<= HOST_BITS_PER_WIDE_INT
90 else if (width
<= HOST_BITS_PER_DOUBLE_INT
91 && CONST_DOUBLE_AS_INT_P (x
)
92 && CONST_DOUBLE_LOW (x
) == 0)
94 val
= CONST_DOUBLE_HIGH (x
);
95 width
-= HOST_BITS_PER_WIDE_INT
;
98 /* FIXME: We don't yet have a representation for wider modes. */
101 if (width
< HOST_BITS_PER_WIDE_INT
)
102 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
103 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
106 /* Test whether VAL is equal to the most significant bit of mode MODE
107 (after masking with the mode mask of MODE). Returns false if the
108 precision of MODE is too large to handle. */
111 val_signbit_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
115 if (GET_MODE_CLASS (mode
) != MODE_INT
)
118 width
= GET_MODE_PRECISION (mode
);
119 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
122 val
&= GET_MODE_MASK (mode
);
123 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
126 /* Test whether the most significant bit of mode MODE is set in VAL.
127 Returns false if the precision of MODE is too large to handle. */
129 val_signbit_known_set_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
133 if (GET_MODE_CLASS (mode
) != MODE_INT
)
136 width
= GET_MODE_PRECISION (mode
);
137 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
140 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
144 /* Test whether the most significant bit of mode MODE is clear in VAL.
145 Returns false if the precision of MODE is too large to handle. */
147 val_signbit_known_clear_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
151 if (GET_MODE_CLASS (mode
) != MODE_INT
)
154 width
= GET_MODE_PRECISION (mode
);
155 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
158 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
162 /* Make a binary operation by properly ordering the operands and
163 seeing if the expression folds. */
166 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
171 /* If this simplifies, do it. */
172 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
176 /* Put complex operands first and constants second if commutative. */
177 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
178 && swap_commutative_operands_p (op0
, op1
))
179 tem
= op0
, op0
= op1
, op1
= tem
;
181 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
184 /* If X is a MEM referencing the constant pool, return the real value.
185 Otherwise return X. */
187 avoid_constant_pool_reference (rtx x
)
190 enum machine_mode cmode
;
191 HOST_WIDE_INT offset
= 0;
193 switch (GET_CODE (x
))
199 /* Handle float extensions of constant pool references. */
201 c
= avoid_constant_pool_reference (tmp
);
202 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
206 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
207 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
215 if (GET_MODE (x
) == BLKmode
)
220 /* Call target hook to avoid the effects of -fpic etc.... */
221 addr
= targetm
.delegitimize_address (addr
);
223 /* Split the address into a base and integer offset. */
224 if (GET_CODE (addr
) == CONST
225 && GET_CODE (XEXP (addr
, 0)) == PLUS
226 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
228 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
229 addr
= XEXP (XEXP (addr
, 0), 0);
232 if (GET_CODE (addr
) == LO_SUM
)
233 addr
= XEXP (addr
, 1);
235 /* If this is a constant pool reference, we can turn it into its
236 constant and hope that simplifications happen. */
237 if (GET_CODE (addr
) == SYMBOL_REF
238 && CONSTANT_POOL_ADDRESS_P (addr
))
240 c
= get_pool_constant (addr
);
241 cmode
= get_pool_mode (addr
);
243 /* If we're accessing the constant in a different mode than it was
244 originally stored, attempt to fix that up via subreg simplifications.
245 If that fails we have no choice but to return the original memory. */
246 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
247 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
249 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
250 if (tem
&& CONSTANT_P (tem
))
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
265 delegitimize_mem_from_attrs (rtx x
)
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
271 && MEM_OFFSET_KNOWN_P (x
))
273 tree decl
= MEM_EXPR (x
);
274 enum machine_mode mode
= GET_MODE (x
);
275 HOST_WIDE_INT offset
= 0;
277 switch (TREE_CODE (decl
))
287 case ARRAY_RANGE_REF
:
292 case VIEW_CONVERT_EXPR
:
294 HOST_WIDE_INT bitsize
, bitpos
;
296 int unsignedp
, volatilep
= 0;
298 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
299 &mode
, &unsignedp
, &volatilep
, false);
300 if (bitsize
!= GET_MODE_BITSIZE (mode
)
301 || (bitpos
% BITS_PER_UNIT
)
302 || (toffset
&& !tree_fits_shwi_p (toffset
)))
306 offset
+= bitpos
/ BITS_PER_UNIT
;
308 offset
+= tree_to_shwi (toffset
);
315 && mode
== GET_MODE (x
)
316 && TREE_CODE (decl
) == VAR_DECL
317 && (TREE_STATIC (decl
)
318 || DECL_THREAD_LOCAL_P (decl
))
319 && DECL_RTL_SET_P (decl
)
320 && MEM_P (DECL_RTL (decl
)))
324 offset
+= MEM_OFFSET (x
);
326 newx
= DECL_RTL (decl
);
330 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
339 || (GET_CODE (o
) == PLUS
340 && GET_CODE (XEXP (o
, 1)) == CONST_INT
341 && (offset
== INTVAL (XEXP (o
, 1))
342 || (GET_CODE (n
) == PLUS
343 && GET_CODE (XEXP (n
, 1)) == CONST_INT
344 && (INTVAL (XEXP (n
, 1)) + offset
345 == INTVAL (XEXP (o
, 1)))
346 && (n
= XEXP (n
, 0))))
347 && (o
= XEXP (o
, 0))))
348 && rtx_equal_p (o
, n
)))
349 x
= adjust_address_nv (newx
, mode
, offset
);
351 else if (GET_MODE (x
) == GET_MODE (newx
)
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
364 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
365 enum machine_mode op_mode
)
369 /* If this simplifies, use it. */
370 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
373 return gen_rtx_fmt_e (code
, mode
, op
);
376 /* Likewise for ternary operations. */
379 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
380 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
384 /* If this simplifies, use it. */
385 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
389 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
396 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
397 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
401 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
405 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
414 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
415 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
417 enum rtx_code code
= GET_CODE (x
);
418 enum machine_mode mode
= GET_MODE (x
);
419 enum machine_mode op_mode
;
421 rtx op0
, op1
, op2
, newx
, op
;
425 if (__builtin_expect (fn
!= NULL
, 0))
427 newx
= fn (x
, old_rtx
, data
);
431 else if (rtx_equal_p (x
, old_rtx
))
432 return copy_rtx ((rtx
) data
);
434 switch (GET_RTX_CLASS (code
))
438 op_mode
= GET_MODE (op0
);
439 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
440 if (op0
== XEXP (x
, 0))
442 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
446 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
447 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
448 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
450 return simplify_gen_binary (code
, mode
, op0
, op1
);
453 case RTX_COMM_COMPARE
:
456 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
457 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
458 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
459 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
461 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
464 case RTX_BITFIELD_OPS
:
466 op_mode
= GET_MODE (op0
);
467 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
468 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
469 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
470 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
472 if (op_mode
== VOIDmode
)
473 op_mode
= GET_MODE (op0
);
474 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
479 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
480 if (op0
== SUBREG_REG (x
))
482 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
483 GET_MODE (SUBREG_REG (x
)),
485 return op0
? op0
: x
;
492 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
493 if (op0
== XEXP (x
, 0))
495 return replace_equiv_address_nv (x
, op0
);
497 else if (code
== LO_SUM
)
499 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
500 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
506 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
508 return gen_rtx_LO_SUM (mode
, op0
, op1
);
517 fmt
= GET_RTX_FORMAT (code
);
518 for (i
= 0; fmt
[i
]; i
++)
523 newvec
= XVEC (newx
, i
);
524 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
526 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
528 if (op
!= RTVEC_ELT (vec
, j
))
532 newvec
= shallow_copy_rtvec (vec
);
534 newx
= shallow_copy_rtx (x
);
535 XVEC (newx
, i
) = newvec
;
537 RTVEC_ELT (newvec
, j
) = op
;
545 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
546 if (op
!= XEXP (x
, i
))
549 newx
= shallow_copy_rtx (x
);
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
562 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
564 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
567 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
568 Only handle cases where the truncated value is inherently an rvalue.
570 RTL provides two ways of truncating a value:
572 1. a lowpart subreg. This form is only a truncation when both
573 the outer and inner modes (here MODE and OP_MODE respectively)
574 are scalar integers, and only then when the subreg is used as
577 It is only valid to form such truncating subregs if the
578 truncation requires no action by the target. The onus for
579 proving this is on the creator of the subreg -- e.g. the
580 caller to simplify_subreg or simplify_gen_subreg -- and typically
581 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
583 2. a TRUNCATE. This form handles both scalar and compound integers.
585 The first form is preferred where valid. However, the TRUNCATE
586 handling in simplify_unary_operation turns the second form into the
587 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
588 so it is generally safe to form rvalue truncations using:
590 simplify_gen_unary (TRUNCATE, ...)
592 and leave simplify_unary_operation to work out which representation
595 Because of the proof requirements on (1), simplify_truncation must
596 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
597 regardless of whether the outer truncation came from a SUBREG or a
598 TRUNCATE. For example, if the caller has proven that an SImode
603 is a no-op and can be represented as a subreg, it does not follow
604 that SImode truncations of X and Y are also no-ops. On a target
605 like 64-bit MIPS that requires SImode values to be stored in
606 sign-extended form, an SImode truncation of:
608 (and:DI (reg:DI X) (const_int 63))
610 is trivially a no-op because only the lower 6 bits can be set.
611 However, X is still an arbitrary 64-bit number and so we cannot
612 assume that truncating it too is a no-op. */
615 simplify_truncation (enum machine_mode mode
, rtx op
,
616 enum machine_mode op_mode
)
618 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
619 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
620 gcc_assert (precision
<= op_precision
);
622 /* Optimize truncations of zero and sign extended values. */
623 if (GET_CODE (op
) == ZERO_EXTEND
624 || GET_CODE (op
) == SIGN_EXTEND
)
626 /* There are three possibilities. If MODE is the same as the
627 origmode, we can omit both the extension and the subreg.
628 If MODE is not larger than the origmode, we can apply the
629 truncation without the extension. Finally, if the outermode
630 is larger than the origmode, we can just extend to the appropriate
632 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
633 if (mode
== origmode
)
635 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
636 return simplify_gen_unary (TRUNCATE
, mode
,
637 XEXP (op
, 0), origmode
);
639 return simplify_gen_unary (GET_CODE (op
), mode
,
640 XEXP (op
, 0), origmode
);
643 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
644 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
645 if (GET_CODE (op
) == PLUS
646 || GET_CODE (op
) == MINUS
647 || GET_CODE (op
) == MULT
)
649 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
652 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
654 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
658 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
659 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
660 the outer subreg is effectively a truncation to the original mode. */
661 if ((GET_CODE (op
) == LSHIFTRT
662 || GET_CODE (op
) == ASHIFTRT
)
663 /* Ensure that OP_MODE is at least twice as wide as MODE
664 to avoid the possibility that an outer LSHIFTRT shifts by more
665 than the sign extension's sign_bit_copies and introduces zeros
666 into the high bits of the result. */
667 && 2 * precision
<= op_precision
668 && CONST_INT_P (XEXP (op
, 1))
669 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
670 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
671 && UINTVAL (XEXP (op
, 1)) < precision
)
672 return simplify_gen_binary (ASHIFTRT
, mode
,
673 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
675 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
676 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
677 the outer subreg is effectively a truncation to the original mode. */
678 if ((GET_CODE (op
) == LSHIFTRT
679 || GET_CODE (op
) == ASHIFTRT
)
680 && CONST_INT_P (XEXP (op
, 1))
681 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
682 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
683 && UINTVAL (XEXP (op
, 1)) < precision
)
684 return simplify_gen_binary (LSHIFTRT
, mode
,
685 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
687 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
688 to (ashift:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if (GET_CODE (op
) == ASHIFT
691 && CONST_INT_P (XEXP (op
, 1))
692 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
693 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
694 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
695 && UINTVAL (XEXP (op
, 1)) < precision
)
696 return simplify_gen_binary (ASHIFT
, mode
,
697 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
699 /* Recognize a word extraction from a multi-word subreg. */
700 if ((GET_CODE (op
) == LSHIFTRT
701 || GET_CODE (op
) == ASHIFTRT
)
702 && SCALAR_INT_MODE_P (mode
)
703 && SCALAR_INT_MODE_P (op_mode
)
704 && precision
>= BITS_PER_WORD
705 && 2 * precision
<= op_precision
706 && CONST_INT_P (XEXP (op
, 1))
707 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
708 && UINTVAL (XEXP (op
, 1)) < op_precision
)
710 int byte
= subreg_lowpart_offset (mode
, op_mode
);
711 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
712 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
714 ? byte
- shifted_bytes
715 : byte
+ shifted_bytes
));
718 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
719 and try replacing the TRUNCATE and shift with it. Don't do this
720 if the MEM has a mode-dependent address. */
721 if ((GET_CODE (op
) == LSHIFTRT
722 || GET_CODE (op
) == ASHIFTRT
)
723 && SCALAR_INT_MODE_P (op_mode
)
724 && MEM_P (XEXP (op
, 0))
725 && CONST_INT_P (XEXP (op
, 1))
726 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
727 && INTVAL (XEXP (op
, 1)) > 0
728 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
729 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
730 MEM_ADDR_SPACE (XEXP (op
, 0)))
731 && ! MEM_VOLATILE_P (XEXP (op
, 0))
732 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
733 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
735 int byte
= subreg_lowpart_offset (mode
, op_mode
);
736 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
737 return adjust_address_nv (XEXP (op
, 0), mode
,
739 ? byte
- shifted_bytes
740 : byte
+ shifted_bytes
));
743 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
744 (OP:SI foo:SI) if OP is NEG or ABS. */
745 if ((GET_CODE (op
) == ABS
746 || GET_CODE (op
) == NEG
)
747 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
748 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
749 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
750 return simplify_gen_unary (GET_CODE (op
), mode
,
751 XEXP (XEXP (op
, 0), 0), mode
);
753 /* (truncate:A (subreg:B (truncate:C X) 0)) is
755 if (GET_CODE (op
) == SUBREG
756 && SCALAR_INT_MODE_P (mode
)
757 && SCALAR_INT_MODE_P (op_mode
)
758 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
759 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
760 && subreg_lowpart_p (op
))
762 rtx inner
= XEXP (SUBREG_REG (op
), 0);
763 if (GET_MODE_PRECISION (mode
)
764 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
765 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
767 /* If subreg above is paradoxical and C is narrower
768 than A, return (subreg:A (truncate:C X) 0). */
769 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
770 GET_MODE (SUBREG_REG (op
)), 0);
773 /* (truncate:A (truncate:B X)) is (truncate:A X). */
774 if (GET_CODE (op
) == TRUNCATE
)
775 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
776 GET_MODE (XEXP (op
, 0)));
781 /* Try to simplify a unary operation CODE whose output mode is to be
782 MODE with input operand OP whose mode was originally OP_MODE.
783 Return zero if no simplification can be made. */
785 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
786 rtx op
, enum machine_mode op_mode
)
790 trueop
= avoid_constant_pool_reference (op
);
792 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
796 return simplify_unary_operation_1 (code
, mode
, op
);
799 /* Perform some simplifications we can do even if the operands
802 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
804 enum rtx_code reversed
;
810 /* (not (not X)) == X. */
811 if (GET_CODE (op
) == NOT
)
814 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
815 comparison is all ones. */
816 if (COMPARISON_P (op
)
817 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
818 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
819 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
820 XEXP (op
, 0), XEXP (op
, 1));
822 /* (not (plus X -1)) can become (neg X). */
823 if (GET_CODE (op
) == PLUS
824 && XEXP (op
, 1) == constm1_rtx
)
825 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
827 /* Similarly, (not (neg X)) is (plus X -1). */
828 if (GET_CODE (op
) == NEG
)
829 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
832 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
833 if (GET_CODE (op
) == XOR
834 && CONST_INT_P (XEXP (op
, 1))
835 && (temp
= simplify_unary_operation (NOT
, mode
,
836 XEXP (op
, 1), mode
)) != 0)
837 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
839 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
840 if (GET_CODE (op
) == PLUS
841 && CONST_INT_P (XEXP (op
, 1))
842 && mode_signbit_p (mode
, XEXP (op
, 1))
843 && (temp
= simplify_unary_operation (NOT
, mode
,
844 XEXP (op
, 1), mode
)) != 0)
845 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
848 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
849 operands other than 1, but that is not valid. We could do a
850 similar simplification for (not (lshiftrt C X)) where C is
851 just the sign bit, but this doesn't seem common enough to
853 if (GET_CODE (op
) == ASHIFT
854 && XEXP (op
, 0) == const1_rtx
)
856 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
857 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
860 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
861 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
862 so we can perform the above simplification. */
863 if (STORE_FLAG_VALUE
== -1
864 && GET_CODE (op
) == ASHIFTRT
865 && GET_CODE (XEXP (op
, 1))
866 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
867 return simplify_gen_relational (GE
, mode
, VOIDmode
,
868 XEXP (op
, 0), const0_rtx
);
871 if (GET_CODE (op
) == SUBREG
872 && subreg_lowpart_p (op
)
873 && (GET_MODE_SIZE (GET_MODE (op
))
874 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
875 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
876 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
878 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
881 x
= gen_rtx_ROTATE (inner_mode
,
882 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
884 XEXP (SUBREG_REG (op
), 1));
885 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
890 /* Apply De Morgan's laws to reduce number of patterns for machines
891 with negating logical insns (and-not, nand, etc.). If result has
892 only one NOT, put it first, since that is how the patterns are
894 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
896 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
897 enum machine_mode op_mode
;
899 op_mode
= GET_MODE (in1
);
900 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
902 op_mode
= GET_MODE (in2
);
903 if (op_mode
== VOIDmode
)
905 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
907 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
910 in2
= in1
; in1
= tem
;
913 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
917 /* (not (bswap x)) -> (bswap (not x)). */
918 if (GET_CODE (op
) == BSWAP
)
920 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
921 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
926 /* (neg (neg X)) == X. */
927 if (GET_CODE (op
) == NEG
)
930 /* (neg (plus X 1)) can become (not X). */
931 if (GET_CODE (op
) == PLUS
932 && XEXP (op
, 1) == const1_rtx
)
933 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
935 /* Similarly, (neg (not X)) is (plus X 1). */
936 if (GET_CODE (op
) == NOT
)
937 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
940 /* (neg (minus X Y)) can become (minus Y X). This transformation
941 isn't safe for modes with signed zeros, since if X and Y are
942 both +0, (minus Y X) is the same as (minus X Y). If the
943 rounding mode is towards +infinity (or -infinity) then the two
944 expressions will be rounded differently. */
945 if (GET_CODE (op
) == MINUS
946 && !HONOR_SIGNED_ZEROS (mode
)
947 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
948 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
950 if (GET_CODE (op
) == PLUS
951 && !HONOR_SIGNED_ZEROS (mode
)
952 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
954 /* (neg (plus A C)) is simplified to (minus -C A). */
955 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
956 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
958 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
960 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
963 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
964 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
965 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
968 /* (neg (mult A B)) becomes (mult A (neg B)).
969 This works even for floating-point values. */
970 if (GET_CODE (op
) == MULT
971 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
973 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
974 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
977 /* NEG commutes with ASHIFT since it is multiplication. Only do
978 this if we can then eliminate the NEG (e.g., if the operand
980 if (GET_CODE (op
) == ASHIFT
)
982 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
984 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
987 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
988 C is equal to the width of MODE minus 1. */
989 if (GET_CODE (op
) == ASHIFTRT
990 && CONST_INT_P (XEXP (op
, 1))
991 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
992 return simplify_gen_binary (LSHIFTRT
, mode
,
993 XEXP (op
, 0), XEXP (op
, 1));
995 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
996 C is equal to the width of MODE minus 1. */
997 if (GET_CODE (op
) == LSHIFTRT
998 && CONST_INT_P (XEXP (op
, 1))
999 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1000 return simplify_gen_binary (ASHIFTRT
, mode
,
1001 XEXP (op
, 0), XEXP (op
, 1));
1003 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1004 if (GET_CODE (op
) == XOR
1005 && XEXP (op
, 1) == const1_rtx
1006 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1007 return plus_constant (mode
, XEXP (op
, 0), -1);
1009 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1010 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1011 if (GET_CODE (op
) == LT
1012 && XEXP (op
, 1) == const0_rtx
1013 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1015 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
1016 int isize
= GET_MODE_PRECISION (inner
);
1017 if (STORE_FLAG_VALUE
== 1)
1019 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1020 GEN_INT (isize
- 1));
1023 if (GET_MODE_PRECISION (mode
) > isize
)
1024 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1025 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1027 else if (STORE_FLAG_VALUE
== -1)
1029 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1030 GEN_INT (isize
- 1));
1033 if (GET_MODE_PRECISION (mode
) > isize
)
1034 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1035 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1041 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1042 with the umulXi3_highpart patterns. */
1043 if (GET_CODE (op
) == LSHIFTRT
1044 && GET_CODE (XEXP (op
, 0)) == MULT
)
1047 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1049 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1051 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1055 /* We can't handle truncation to a partial integer mode here
1056 because we don't know the real bitsize of the partial
1061 if (GET_MODE (op
) != VOIDmode
)
1063 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1068 /* If we know that the value is already truncated, we can
1069 replace the TRUNCATE with a SUBREG. */
1070 if (GET_MODE_NUNITS (mode
) == 1
1071 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1072 || truncated_to_mode (mode
, op
)))
1074 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1079 /* A truncate of a comparison can be replaced with a subreg if
1080 STORE_FLAG_VALUE permits. This is like the previous test,
1081 but it works even if the comparison is done in a mode larger
1082 than HOST_BITS_PER_WIDE_INT. */
1083 if (HWI_COMPUTABLE_MODE_P (mode
)
1084 && COMPARISON_P (op
)
1085 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1087 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1092 /* A truncate of a memory is just loading the low part of the memory
1093 if we are not changing the meaning of the address. */
1094 if (GET_CODE (op
) == MEM
1095 && !VECTOR_MODE_P (mode
)
1096 && !MEM_VOLATILE_P (op
)
1097 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1099 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1106 case FLOAT_TRUNCATE
:
1107 if (DECIMAL_FLOAT_MODE_P (mode
))
1110 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1111 if (GET_CODE (op
) == FLOAT_EXTEND
1112 && GET_MODE (XEXP (op
, 0)) == mode
)
1113 return XEXP (op
, 0);
1115 /* (float_truncate:SF (float_truncate:DF foo:XF))
1116 = (float_truncate:SF foo:XF).
1117 This may eliminate double rounding, so it is unsafe.
1119 (float_truncate:SF (float_extend:XF foo:DF))
1120 = (float_truncate:SF foo:DF).
1122 (float_truncate:DF (float_extend:XF foo:SF))
1123 = (float_extend:SF foo:DF). */
1124 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1125 && flag_unsafe_math_optimizations
)
1126 || GET_CODE (op
) == FLOAT_EXTEND
)
1127 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1129 > GET_MODE_SIZE (mode
)
1130 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1132 XEXP (op
, 0), mode
);
1134 /* (float_truncate (float x)) is (float x) */
1135 if (GET_CODE (op
) == FLOAT
1136 && (flag_unsafe_math_optimizations
1137 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1138 && ((unsigned)significand_size (GET_MODE (op
))
1139 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1140 - num_sign_bit_copies (XEXP (op
, 0),
1141 GET_MODE (XEXP (op
, 0))))))))
1142 return simplify_gen_unary (FLOAT
, mode
,
1144 GET_MODE (XEXP (op
, 0)));
1146 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1147 (OP:SF foo:SF) if OP is NEG or ABS. */
1148 if ((GET_CODE (op
) == ABS
1149 || GET_CODE (op
) == NEG
)
1150 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1151 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1152 return simplify_gen_unary (GET_CODE (op
), mode
,
1153 XEXP (XEXP (op
, 0), 0), mode
);
1155 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1156 is (float_truncate:SF x). */
1157 if (GET_CODE (op
) == SUBREG
1158 && subreg_lowpart_p (op
)
1159 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1160 return SUBREG_REG (op
);
1164 if (DECIMAL_FLOAT_MODE_P (mode
))
1167 /* (float_extend (float_extend x)) is (float_extend x)
1169 (float_extend (float x)) is (float x) assuming that double
1170 rounding can't happen.
1172 if (GET_CODE (op
) == FLOAT_EXTEND
1173 || (GET_CODE (op
) == FLOAT
1174 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1175 && ((unsigned)significand_size (GET_MODE (op
))
1176 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1177 - num_sign_bit_copies (XEXP (op
, 0),
1178 GET_MODE (XEXP (op
, 0)))))))
1179 return simplify_gen_unary (GET_CODE (op
), mode
,
1181 GET_MODE (XEXP (op
, 0)));
1186 /* (abs (neg <foo>)) -> (abs <foo>) */
1187 if (GET_CODE (op
) == NEG
)
1188 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1189 GET_MODE (XEXP (op
, 0)));
1191 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1193 if (GET_MODE (op
) == VOIDmode
)
1196 /* If operand is something known to be positive, ignore the ABS. */
1197 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1198 || val_signbit_known_clear_p (GET_MODE (op
),
1199 nonzero_bits (op
, GET_MODE (op
))))
1202 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1203 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1204 return gen_rtx_NEG (mode
, op
);
1209 /* (ffs (*_extend <X>)) = (ffs <X>) */
1210 if (GET_CODE (op
) == SIGN_EXTEND
1211 || GET_CODE (op
) == ZERO_EXTEND
)
1212 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1213 GET_MODE (XEXP (op
, 0)));
1217 switch (GET_CODE (op
))
1221 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1222 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1223 GET_MODE (XEXP (op
, 0)));
1227 /* Rotations don't affect popcount. */
1228 if (!side_effects_p (XEXP (op
, 1)))
1229 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1230 GET_MODE (XEXP (op
, 0)));
1239 switch (GET_CODE (op
))
1245 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1246 GET_MODE (XEXP (op
, 0)));
1250 /* Rotations don't affect parity. */
1251 if (!side_effects_p (XEXP (op
, 1)))
1252 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1253 GET_MODE (XEXP (op
, 0)));
1262 /* (bswap (bswap x)) -> x. */
1263 if (GET_CODE (op
) == BSWAP
)
1264 return XEXP (op
, 0);
1268 /* (float (sign_extend <X>)) = (float <X>). */
1269 if (GET_CODE (op
) == SIGN_EXTEND
)
1270 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1271 GET_MODE (XEXP (op
, 0)));
1275 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1276 becomes just the MINUS if its mode is MODE. This allows
1277 folding switch statements on machines using casesi (such as
1279 if (GET_CODE (op
) == TRUNCATE
1280 && GET_MODE (XEXP (op
, 0)) == mode
1281 && GET_CODE (XEXP (op
, 0)) == MINUS
1282 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1283 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1284 return XEXP (op
, 0);
1286 /* Extending a widening multiplication should be canonicalized to
1287 a wider widening multiplication. */
1288 if (GET_CODE (op
) == MULT
)
1290 rtx lhs
= XEXP (op
, 0);
1291 rtx rhs
= XEXP (op
, 1);
1292 enum rtx_code lcode
= GET_CODE (lhs
);
1293 enum rtx_code rcode
= GET_CODE (rhs
);
1295 /* Widening multiplies usually extend both operands, but sometimes
1296 they use a shift to extract a portion of a register. */
1297 if ((lcode
== SIGN_EXTEND
1298 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1299 && (rcode
== SIGN_EXTEND
1300 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1302 enum machine_mode lmode
= GET_MODE (lhs
);
1303 enum machine_mode rmode
= GET_MODE (rhs
);
1306 if (lcode
== ASHIFTRT
)
1307 /* Number of bits not shifted off the end. */
1308 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1309 else /* lcode == SIGN_EXTEND */
1310 /* Size of inner mode. */
1311 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1313 if (rcode
== ASHIFTRT
)
1314 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1315 else /* rcode == SIGN_EXTEND */
1316 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1318 /* We can only widen multiplies if the result is mathematiclly
1319 equivalent. I.e. if overflow was impossible. */
1320 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1321 return simplify_gen_binary
1323 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1324 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1328 /* Check for a sign extension of a subreg of a promoted
1329 variable, where the promotion is sign-extended, and the
1330 target mode is the same as the variable's promotion. */
1331 if (GET_CODE (op
) == SUBREG
1332 && SUBREG_PROMOTED_VAR_P (op
)
1333 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1334 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1336 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1341 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1342 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1343 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1345 gcc_assert (GET_MODE_BITSIZE (mode
)
1346 > GET_MODE_BITSIZE (GET_MODE (op
)));
1347 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1348 GET_MODE (XEXP (op
, 0)));
1351 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1352 is (sign_extend:M (subreg:O <X>)) if there is mode with
1353 GET_MODE_BITSIZE (N) - I bits.
1354 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1355 is similarly (zero_extend:M (subreg:O <X>)). */
1356 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1357 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1358 && CONST_INT_P (XEXP (op
, 1))
1359 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1360 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1362 enum machine_mode tmode
1363 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1364 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1365 gcc_assert (GET_MODE_BITSIZE (mode
)
1366 > GET_MODE_BITSIZE (GET_MODE (op
)));
1367 if (tmode
!= BLKmode
)
1370 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1372 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1373 ? SIGN_EXTEND
: ZERO_EXTEND
,
1374 mode
, inner
, tmode
);
1378 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1379 /* As we do not know which address space the pointer is referring to,
1380 we can do this only if the target does not support different pointer
1381 or address modes depending on the address space. */
1382 if (target_default_pointer_address_modes_p ()
1383 && ! POINTERS_EXTEND_UNSIGNED
1384 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1386 || (GET_CODE (op
) == SUBREG
1387 && REG_P (SUBREG_REG (op
))
1388 && REG_POINTER (SUBREG_REG (op
))
1389 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1390 return convert_memory_address (Pmode
, op
);
1395 /* Check for a zero extension of a subreg of a promoted
1396 variable, where the promotion is zero-extended, and the
1397 target mode is the same as the variable's promotion. */
1398 if (GET_CODE (op
) == SUBREG
1399 && SUBREG_PROMOTED_VAR_P (op
)
1400 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1401 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1403 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1408 /* Extending a widening multiplication should be canonicalized to
1409 a wider widening multiplication. */
1410 if (GET_CODE (op
) == MULT
)
1412 rtx lhs
= XEXP (op
, 0);
1413 rtx rhs
= XEXP (op
, 1);
1414 enum rtx_code lcode
= GET_CODE (lhs
);
1415 enum rtx_code rcode
= GET_CODE (rhs
);
1417 /* Widening multiplies usually extend both operands, but sometimes
1418 they use a shift to extract a portion of a register. */
1419 if ((lcode
== ZERO_EXTEND
1420 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1421 && (rcode
== ZERO_EXTEND
1422 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1424 enum machine_mode lmode
= GET_MODE (lhs
);
1425 enum machine_mode rmode
= GET_MODE (rhs
);
1428 if (lcode
== LSHIFTRT
)
1429 /* Number of bits not shifted off the end. */
1430 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1431 else /* lcode == ZERO_EXTEND */
1432 /* Size of inner mode. */
1433 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1435 if (rcode
== LSHIFTRT
)
1436 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1437 else /* rcode == ZERO_EXTEND */
1438 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1440 /* We can only widen multiplies if the result is mathematiclly
1441 equivalent. I.e. if overflow was impossible. */
1442 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1443 return simplify_gen_binary
1445 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1446 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1450 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1451 if (GET_CODE (op
) == ZERO_EXTEND
)
1452 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1453 GET_MODE (XEXP (op
, 0)));
1455 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1456 is (zero_extend:M (subreg:O <X>)) if there is mode with
1457 GET_MODE_BITSIZE (N) - I bits. */
1458 if (GET_CODE (op
) == LSHIFTRT
1459 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1460 && CONST_INT_P (XEXP (op
, 1))
1461 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1462 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1464 enum machine_mode tmode
1465 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1466 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1467 if (tmode
!= BLKmode
)
1470 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1472 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1476 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1477 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1479 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1480 (and:SI (reg:SI) (const_int 63)). */
1481 if (GET_CODE (op
) == SUBREG
1482 && GET_MODE_PRECISION (GET_MODE (op
))
1483 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1484 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1485 <= HOST_BITS_PER_WIDE_INT
1486 && GET_MODE_PRECISION (mode
)
1487 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1488 && subreg_lowpart_p (op
)
1489 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1490 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1492 if (GET_MODE_PRECISION (mode
)
1493 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1494 return SUBREG_REG (op
);
1495 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1496 GET_MODE (SUBREG_REG (op
)));
1499 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1500 /* As we do not know which address space the pointer is referring to,
1501 we can do this only if the target does not support different pointer
1502 or address modes depending on the address space. */
1503 if (target_default_pointer_address_modes_p ()
1504 && POINTERS_EXTEND_UNSIGNED
> 0
1505 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1507 || (GET_CODE (op
) == SUBREG
1508 && REG_P (SUBREG_REG (op
))
1509 && REG_POINTER (SUBREG_REG (op
))
1510 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1511 return convert_memory_address (Pmode
, op
);
1522 /* Try to compute the value of a unary operation CODE whose output mode is to
1523 be MODE with input operand OP whose mode was originally OP_MODE.
1524 Return zero if the value cannot be computed. */
1526 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1527 rtx op
, enum machine_mode op_mode
)
1529 unsigned int width
= GET_MODE_PRECISION (mode
);
1530 unsigned int op_width
= GET_MODE_PRECISION (op_mode
);
1532 if (code
== VEC_DUPLICATE
)
1534 gcc_assert (VECTOR_MODE_P (mode
));
1535 if (GET_MODE (op
) != VOIDmode
)
1537 if (!VECTOR_MODE_P (GET_MODE (op
)))
1538 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1540 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1543 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1544 || GET_CODE (op
) == CONST_VECTOR
)
1546 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1547 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1548 rtvec v
= rtvec_alloc (n_elts
);
1551 if (GET_CODE (op
) != CONST_VECTOR
)
1552 for (i
= 0; i
< n_elts
; i
++)
1553 RTVEC_ELT (v
, i
) = op
;
1556 enum machine_mode inmode
= GET_MODE (op
);
1557 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1558 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1560 gcc_assert (in_n_elts
< n_elts
);
1561 gcc_assert ((n_elts
% in_n_elts
) == 0);
1562 for (i
= 0; i
< n_elts
; i
++)
1563 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1565 return gen_rtx_CONST_VECTOR (mode
, v
);
1569 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1571 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1572 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1573 enum machine_mode opmode
= GET_MODE (op
);
1574 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1575 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1576 rtvec v
= rtvec_alloc (n_elts
);
1579 gcc_assert (op_n_elts
== n_elts
);
1580 for (i
= 0; i
< n_elts
; i
++)
1582 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1583 CONST_VECTOR_ELT (op
, i
),
1584 GET_MODE_INNER (opmode
));
1587 RTVEC_ELT (v
, i
) = x
;
1589 return gen_rtx_CONST_VECTOR (mode
, v
);
1592 /* The order of these tests is critical so that, for example, we don't
1593 check the wrong mode (input vs. output) for a conversion operation,
1594 such as FIX. At some point, this should be simplified. */
1596 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1598 HOST_WIDE_INT hv
, lv
;
1601 if (CONST_INT_P (op
))
1602 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1604 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1606 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1607 d
= real_value_truncate (mode
, d
);
1608 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1610 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1612 HOST_WIDE_INT hv
, lv
;
1615 if (CONST_INT_P (op
))
1616 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1618 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1620 if (op_mode
== VOIDmode
1621 || GET_MODE_PRECISION (op_mode
) > HOST_BITS_PER_DOUBLE_INT
)
1622 /* We should never get a negative number. */
1623 gcc_assert (hv
>= 0);
1624 else if (GET_MODE_PRECISION (op_mode
) <= HOST_BITS_PER_WIDE_INT
)
1625 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1627 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1628 d
= real_value_truncate (mode
, d
);
1629 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1632 if (CONST_INT_P (op
)
1633 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1635 HOST_WIDE_INT arg0
= INTVAL (op
);
1649 val
= (arg0
>= 0 ? arg0
: - arg0
);
1653 arg0
&= GET_MODE_MASK (mode
);
1654 val
= ffs_hwi (arg0
);
1658 arg0
&= GET_MODE_MASK (mode
);
1659 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1662 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 1;
1666 arg0
&= GET_MODE_MASK (mode
);
1668 val
= GET_MODE_PRECISION (mode
) - 1;
1670 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 2;
1672 val
= GET_MODE_PRECISION (mode
) - floor_log2 (~arg0
) - 2;
1676 arg0
&= GET_MODE_MASK (mode
);
1679 /* Even if the value at zero is undefined, we have to come
1680 up with some replacement. Seems good enough. */
1681 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1682 val
= GET_MODE_PRECISION (mode
);
1685 val
= ctz_hwi (arg0
);
1689 arg0
&= GET_MODE_MASK (mode
);
1692 val
++, arg0
&= arg0
- 1;
1696 arg0
&= GET_MODE_MASK (mode
);
1699 val
++, arg0
&= arg0
- 1;
1708 for (s
= 0; s
< width
; s
+= 8)
1710 unsigned int d
= width
- s
- 8;
1711 unsigned HOST_WIDE_INT byte
;
1712 byte
= (arg0
>> s
) & 0xff;
1723 /* When zero-extending a CONST_INT, we need to know its
1725 gcc_assert (op_mode
!= VOIDmode
);
1726 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1728 /* If we were really extending the mode,
1729 we would have to distinguish between zero-extension
1730 and sign-extension. */
1731 gcc_assert (width
== op_width
);
1734 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1735 val
= arg0
& GET_MODE_MASK (op_mode
);
1741 if (op_mode
== VOIDmode
)
1743 op_width
= GET_MODE_PRECISION (op_mode
);
1744 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1746 /* If we were really extending the mode,
1747 we would have to distinguish between zero-extension
1748 and sign-extension. */
1749 gcc_assert (width
== op_width
);
1752 else if (op_width
< HOST_BITS_PER_WIDE_INT
)
1754 val
= arg0
& GET_MODE_MASK (op_mode
);
1755 if (val_signbit_known_set_p (op_mode
, val
))
1756 val
|= ~GET_MODE_MASK (op_mode
);
1764 case FLOAT_TRUNCATE
:
1776 return gen_int_mode (val
, mode
);
1779 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1780 for a DImode operation on a CONST_INT. */
1781 else if (width
<= HOST_BITS_PER_DOUBLE_INT
1782 && (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1784 double_int first
, value
;
1786 if (CONST_DOUBLE_AS_INT_P (op
))
1787 first
= double_int::from_pair (CONST_DOUBLE_HIGH (op
),
1788 CONST_DOUBLE_LOW (op
));
1790 first
= double_int::from_shwi (INTVAL (op
));
1803 if (first
.is_negative ())
1812 value
.low
= ffs_hwi (first
.low
);
1813 else if (first
.high
!= 0)
1814 value
.low
= HOST_BITS_PER_WIDE_INT
+ ffs_hwi (first
.high
);
1821 if (first
.high
!= 0)
1822 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.high
) - 1
1823 - HOST_BITS_PER_WIDE_INT
;
1824 else if (first
.low
!= 0)
1825 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.low
) - 1;
1826 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1827 value
.low
= GET_MODE_PRECISION (mode
);
1833 value
.low
= ctz_hwi (first
.low
);
1834 else if (first
.high
!= 0)
1835 value
.low
= HOST_BITS_PER_WIDE_INT
+ ctz_hwi (first
.high
);
1836 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1837 value
.low
= GET_MODE_PRECISION (mode
);
1841 value
= double_int_zero
;
1845 first
.low
&= first
.low
- 1;
1850 first
.high
&= first
.high
- 1;
1855 value
= double_int_zero
;
1859 first
.low
&= first
.low
- 1;
1864 first
.high
&= first
.high
- 1;
1873 value
= double_int_zero
;
1874 for (s
= 0; s
< width
; s
+= 8)
1876 unsigned int d
= width
- s
- 8;
1877 unsigned HOST_WIDE_INT byte
;
1879 if (s
< HOST_BITS_PER_WIDE_INT
)
1880 byte
= (first
.low
>> s
) & 0xff;
1882 byte
= (first
.high
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1884 if (d
< HOST_BITS_PER_WIDE_INT
)
1885 value
.low
|= byte
<< d
;
1887 value
.high
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1893 /* This is just a change-of-mode, so do nothing. */
1898 gcc_assert (op_mode
!= VOIDmode
);
1900 if (op_width
> HOST_BITS_PER_WIDE_INT
)
1903 value
= double_int::from_uhwi (first
.low
& GET_MODE_MASK (op_mode
));
1907 if (op_mode
== VOIDmode
1908 || op_width
> HOST_BITS_PER_WIDE_INT
)
1912 value
.low
= first
.low
& GET_MODE_MASK (op_mode
);
1913 if (val_signbit_known_set_p (op_mode
, value
.low
))
1914 value
.low
|= ~GET_MODE_MASK (op_mode
);
1916 value
.high
= HWI_SIGN_EXTEND (value
.low
);
1927 return immed_double_int_const (value
, mode
);
1930 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1931 && SCALAR_FLOAT_MODE_P (mode
)
1932 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1935 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1942 d
= real_value_abs (&d
);
1945 d
= real_value_negate (&d
);
1947 case FLOAT_TRUNCATE
:
1948 d
= real_value_truncate (mode
, d
);
1951 /* All this does is change the mode, unless changing
1953 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1954 real_convert (&d
, mode
, &d
);
1957 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1964 real_to_target (tmp
, &d
, GET_MODE (op
));
1965 for (i
= 0; i
< 4; i
++)
1967 real_from_target (&d
, tmp
, mode
);
1973 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1976 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1977 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1978 && GET_MODE_CLASS (mode
) == MODE_INT
1979 && width
<= HOST_BITS_PER_DOUBLE_INT
&& width
> 0)
1981 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1982 operators are intentionally left unspecified (to ease implementation
1983 by target backends), for consistency, this routine implements the
1984 same semantics for constant folding as used by the middle-end. */
1986 /* This was formerly used only for non-IEEE float.
1987 eggert@twinsun.com says it is safe for IEEE also. */
1988 HOST_WIDE_INT xh
, xl
, th
, tl
;
1989 REAL_VALUE_TYPE x
, t
;
1990 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1994 if (REAL_VALUE_ISNAN (x
))
1997 /* Test against the signed upper bound. */
1998 if (width
> HOST_BITS_PER_WIDE_INT
)
2000 th
= ((unsigned HOST_WIDE_INT
) 1
2001 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
2007 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
2009 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
2010 if (REAL_VALUES_LESS (t
, x
))
2017 /* Test against the signed lower bound. */
2018 if (width
> HOST_BITS_PER_WIDE_INT
)
2020 th
= HOST_WIDE_INT_M1U
<< (width
- HOST_BITS_PER_WIDE_INT
- 1);
2026 tl
= HOST_WIDE_INT_M1U
<< (width
- 1);
2028 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
2029 if (REAL_VALUES_LESS (x
, t
))
2035 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
2039 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
2042 /* Test against the unsigned upper bound. */
2043 if (width
== HOST_BITS_PER_DOUBLE_INT
)
2048 else if (width
>= HOST_BITS_PER_WIDE_INT
)
2050 th
= ((unsigned HOST_WIDE_INT
) 1
2051 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
2057 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
2059 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
2060 if (REAL_VALUES_LESS (t
, x
))
2067 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
2073 return immed_double_const (xl
, xh
, mode
);
2079 /* Subroutine of simplify_binary_operation to simplify a binary operation
2080 CODE that can commute with byte swapping, with result mode MODE and
2081 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2082 Return zero if no simplification or canonicalization is possible. */
2085 simplify_byte_swapping_operation (enum rtx_code code
, enum machine_mode mode
,
2090 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2091 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
2093 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
2094 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
2095 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2098 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2099 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
2101 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2102 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
2108 /* Subroutine of simplify_binary_operation to simplify a commutative,
2109 associative binary operation CODE with result mode MODE, operating
2110 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2111 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2112 canonicalization is possible. */
2115 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
2120 /* Linearize the operator to the left. */
2121 if (GET_CODE (op1
) == code
)
2123 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2124 if (GET_CODE (op0
) == code
)
2126 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2127 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2130 /* "a op (b op c)" becomes "(b op c) op a". */
2131 if (! swap_commutative_operands_p (op1
, op0
))
2132 return simplify_gen_binary (code
, mode
, op1
, op0
);
2139 if (GET_CODE (op0
) == code
)
2141 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2142 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2144 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2145 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2148 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2149 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2151 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2153 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2154 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2156 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2163 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2164 and OP1. Return 0 if no simplification is possible.
2166 Don't use this for relational operations such as EQ or LT.
2167 Use simplify_relational_operation instead. */
2169 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2172 rtx trueop0
, trueop1
;
2175 /* Relational operations don't work here. We must know the mode
2176 of the operands in order to do the comparison correctly.
2177 Assuming a full word can give incorrect results.
2178 Consider comparing 128 with -128 in QImode. */
2179 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2180 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2182 /* Make sure the constant is second. */
2183 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2184 && swap_commutative_operands_p (op0
, op1
))
2186 tem
= op0
, op0
= op1
, op1
= tem
;
2189 trueop0
= avoid_constant_pool_reference (op0
);
2190 trueop1
= avoid_constant_pool_reference (op1
);
2192 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2195 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2198 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2199 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2200 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2201 actual constants. */
2204 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
2205 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2207 rtx tem
, reversed
, opleft
, opright
;
2209 unsigned int width
= GET_MODE_PRECISION (mode
);
2211 /* Even if we can't compute a constant result,
2212 there are some cases worth simplifying. */
2217 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2218 when x is NaN, infinite, or finite and nonzero. They aren't
2219 when x is -0 and the rounding mode is not towards -infinity,
2220 since (-0) + 0 is then 0. */
2221 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2224 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2225 transformations are safe even for IEEE. */
2226 if (GET_CODE (op0
) == NEG
)
2227 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2228 else if (GET_CODE (op1
) == NEG
)
2229 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2231 /* (~a) + 1 -> -a */
2232 if (INTEGRAL_MODE_P (mode
)
2233 && GET_CODE (op0
) == NOT
2234 && trueop1
== const1_rtx
)
2235 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2237 /* Handle both-operands-constant cases. We can only add
2238 CONST_INTs to constants since the sum of relocatable symbols
2239 can't be handled by most assemblers. Don't add CONST_INT
2240 to CONST_INT since overflow won't be computed properly if wider
2241 than HOST_BITS_PER_WIDE_INT. */
2243 if ((GET_CODE (op0
) == CONST
2244 || GET_CODE (op0
) == SYMBOL_REF
2245 || GET_CODE (op0
) == LABEL_REF
)
2246 && CONST_INT_P (op1
))
2247 return plus_constant (mode
, op0
, INTVAL (op1
));
2248 else if ((GET_CODE (op1
) == CONST
2249 || GET_CODE (op1
) == SYMBOL_REF
2250 || GET_CODE (op1
) == LABEL_REF
)
2251 && CONST_INT_P (op0
))
2252 return plus_constant (mode
, op1
, INTVAL (op0
));
2254 /* See if this is something like X * C - X or vice versa or
2255 if the multiplication is written as a shift. If so, we can
2256 distribute and make a new multiply, shift, or maybe just
2257 have X (if C is 2 in the example above). But don't make
2258 something more expensive than we had before. */
2260 if (SCALAR_INT_MODE_P (mode
))
2262 double_int coeff0
, coeff1
;
2263 rtx lhs
= op0
, rhs
= op1
;
2265 coeff0
= double_int_one
;
2266 coeff1
= double_int_one
;
2268 if (GET_CODE (lhs
) == NEG
)
2270 coeff0
= double_int_minus_one
;
2271 lhs
= XEXP (lhs
, 0);
2273 else if (GET_CODE (lhs
) == MULT
2274 && CONST_INT_P (XEXP (lhs
, 1)))
2276 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
2277 lhs
= XEXP (lhs
, 0);
2279 else if (GET_CODE (lhs
) == ASHIFT
2280 && CONST_INT_P (XEXP (lhs
, 1))
2281 && INTVAL (XEXP (lhs
, 1)) >= 0
2282 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2284 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2285 lhs
= XEXP (lhs
, 0);
2288 if (GET_CODE (rhs
) == NEG
)
2290 coeff1
= double_int_minus_one
;
2291 rhs
= XEXP (rhs
, 0);
2293 else if (GET_CODE (rhs
) == MULT
2294 && CONST_INT_P (XEXP (rhs
, 1)))
2296 coeff1
= double_int::from_shwi (INTVAL (XEXP (rhs
, 1)));
2297 rhs
= XEXP (rhs
, 0);
2299 else if (GET_CODE (rhs
) == ASHIFT
2300 && CONST_INT_P (XEXP (rhs
, 1))
2301 && INTVAL (XEXP (rhs
, 1)) >= 0
2302 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2304 coeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2305 rhs
= XEXP (rhs
, 0);
2308 if (rtx_equal_p (lhs
, rhs
))
2310 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2313 bool speed
= optimize_function_for_speed_p (cfun
);
2315 val
= coeff0
+ coeff1
;
2316 coeff
= immed_double_int_const (val
, mode
);
2318 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2319 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2324 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2325 if (CONST_SCALAR_INT_P (op1
)
2326 && GET_CODE (op0
) == XOR
2327 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2328 && mode_signbit_p (mode
, op1
))
2329 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2330 simplify_gen_binary (XOR
, mode
, op1
,
2333 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2334 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2335 && GET_CODE (op0
) == MULT
2336 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2340 in1
= XEXP (XEXP (op0
, 0), 0);
2341 in2
= XEXP (op0
, 1);
2342 return simplify_gen_binary (MINUS
, mode
, op1
,
2343 simplify_gen_binary (MULT
, mode
,
2347 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2348 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2350 if (COMPARISON_P (op0
)
2351 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2352 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2353 && (reversed
= reversed_comparison (op0
, mode
)))
2355 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2357 /* If one of the operands is a PLUS or a MINUS, see if we can
2358 simplify this by the associative law.
2359 Don't use the associative law for floating point.
2360 The inaccuracy makes it nonassociative,
2361 and subtle programs can break if operations are associated. */
2363 if (INTEGRAL_MODE_P (mode
)
2364 && (plus_minus_operand_p (op0
)
2365 || plus_minus_operand_p (op1
))
2366 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2369 /* Reassociate floating point addition only when the user
2370 specifies associative math operations. */
2371 if (FLOAT_MODE_P (mode
)
2372 && flag_associative_math
)
2374 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2381 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2382 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2383 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2384 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2386 rtx xop00
= XEXP (op0
, 0);
2387 rtx xop10
= XEXP (op1
, 0);
2390 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2392 if (REG_P (xop00
) && REG_P (xop10
)
2393 && GET_MODE (xop00
) == GET_MODE (xop10
)
2394 && REGNO (xop00
) == REGNO (xop10
)
2395 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2396 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2403 /* We can't assume x-x is 0 even with non-IEEE floating point,
2404 but since it is zero except in very strange circumstances, we
2405 will treat it as zero with -ffinite-math-only. */
2406 if (rtx_equal_p (trueop0
, trueop1
)
2407 && ! side_effects_p (op0
)
2408 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2409 return CONST0_RTX (mode
);
2411 /* Change subtraction from zero into negation. (0 - x) is the
2412 same as -x when x is NaN, infinite, or finite and nonzero.
2413 But if the mode has signed zeros, and does not round towards
2414 -infinity, then 0 - 0 is 0, not -0. */
2415 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2416 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2418 /* (-1 - a) is ~a. */
2419 if (trueop0
== constm1_rtx
)
2420 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2422 /* Subtracting 0 has no effect unless the mode has signed zeros
2423 and supports rounding towards -infinity. In such a case,
2425 if (!(HONOR_SIGNED_ZEROS (mode
)
2426 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2427 && trueop1
== CONST0_RTX (mode
))
2430 /* See if this is something like X * C - X or vice versa or
2431 if the multiplication is written as a shift. If so, we can
2432 distribute and make a new multiply, shift, or maybe just
2433 have X (if C is 2 in the example above). But don't make
2434 something more expensive than we had before. */
2436 if (SCALAR_INT_MODE_P (mode
))
2438 double_int coeff0
, negcoeff1
;
2439 rtx lhs
= op0
, rhs
= op1
;
2441 coeff0
= double_int_one
;
2442 negcoeff1
= double_int_minus_one
;
2444 if (GET_CODE (lhs
) == NEG
)
2446 coeff0
= double_int_minus_one
;
2447 lhs
= XEXP (lhs
, 0);
2449 else if (GET_CODE (lhs
) == MULT
2450 && CONST_INT_P (XEXP (lhs
, 1)))
2452 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
2453 lhs
= XEXP (lhs
, 0);
2455 else if (GET_CODE (lhs
) == ASHIFT
2456 && CONST_INT_P (XEXP (lhs
, 1))
2457 && INTVAL (XEXP (lhs
, 1)) >= 0
2458 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2460 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2461 lhs
= XEXP (lhs
, 0);
2464 if (GET_CODE (rhs
) == NEG
)
2466 negcoeff1
= double_int_one
;
2467 rhs
= XEXP (rhs
, 0);
2469 else if (GET_CODE (rhs
) == MULT
2470 && CONST_INT_P (XEXP (rhs
, 1)))
2472 negcoeff1
= double_int::from_shwi (-INTVAL (XEXP (rhs
, 1)));
2473 rhs
= XEXP (rhs
, 0);
2475 else if (GET_CODE (rhs
) == ASHIFT
2476 && CONST_INT_P (XEXP (rhs
, 1))
2477 && INTVAL (XEXP (rhs
, 1)) >= 0
2478 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2480 negcoeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2481 negcoeff1
= -negcoeff1
;
2482 rhs
= XEXP (rhs
, 0);
2485 if (rtx_equal_p (lhs
, rhs
))
2487 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2490 bool speed
= optimize_function_for_speed_p (cfun
);
2492 val
= coeff0
+ negcoeff1
;
2493 coeff
= immed_double_int_const (val
, mode
);
2495 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2496 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2501 /* (a - (-b)) -> (a + b). True even for IEEE. */
2502 if (GET_CODE (op1
) == NEG
)
2503 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2505 /* (-x - c) may be simplified as (-c - x). */
2506 if (GET_CODE (op0
) == NEG
2507 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2509 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2511 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2514 /* Don't let a relocatable value get a negative coeff. */
2515 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2516 return simplify_gen_binary (PLUS
, mode
,
2518 neg_const_int (mode
, op1
));
2520 /* (x - (x & y)) -> (x & ~y) */
2521 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2523 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2525 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2526 GET_MODE (XEXP (op1
, 1)));
2527 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2529 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2531 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2532 GET_MODE (XEXP (op1
, 0)));
2533 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2537 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2538 by reversing the comparison code if valid. */
2539 if (STORE_FLAG_VALUE
== 1
2540 && trueop0
== const1_rtx
2541 && COMPARISON_P (op1
)
2542 && (reversed
= reversed_comparison (op1
, mode
)))
2545 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2546 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2547 && GET_CODE (op1
) == MULT
2548 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2552 in1
= XEXP (XEXP (op1
, 0), 0);
2553 in2
= XEXP (op1
, 1);
2554 return simplify_gen_binary (PLUS
, mode
,
2555 simplify_gen_binary (MULT
, mode
,
2560 /* Canonicalize (minus (neg A) (mult B C)) to
2561 (minus (mult (neg B) C) A). */
2562 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2563 && GET_CODE (op1
) == MULT
2564 && GET_CODE (op0
) == NEG
)
2568 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2569 in2
= XEXP (op1
, 1);
2570 return simplify_gen_binary (MINUS
, mode
,
2571 simplify_gen_binary (MULT
, mode
,
2576 /* If one of the operands is a PLUS or a MINUS, see if we can
2577 simplify this by the associative law. This will, for example,
2578 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2579 Don't use the associative law for floating point.
2580 The inaccuracy makes it nonassociative,
2581 and subtle programs can break if operations are associated. */
2583 if (INTEGRAL_MODE_P (mode
)
2584 && (plus_minus_operand_p (op0
)
2585 || plus_minus_operand_p (op1
))
2586 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2591 if (trueop1
== constm1_rtx
)
2592 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2594 if (GET_CODE (op0
) == NEG
)
2596 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2597 /* If op1 is a MULT as well and simplify_unary_operation
2598 just moved the NEG to the second operand, simplify_gen_binary
2599 below could through simplify_associative_operation move
2600 the NEG around again and recurse endlessly. */
2602 && GET_CODE (op1
) == MULT
2603 && GET_CODE (temp
) == MULT
2604 && XEXP (op1
, 0) == XEXP (temp
, 0)
2605 && GET_CODE (XEXP (temp
, 1)) == NEG
2606 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2609 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2611 if (GET_CODE (op1
) == NEG
)
2613 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2614 /* If op0 is a MULT as well and simplify_unary_operation
2615 just moved the NEG to the second operand, simplify_gen_binary
2616 below could through simplify_associative_operation move
2617 the NEG around again and recurse endlessly. */
2619 && GET_CODE (op0
) == MULT
2620 && GET_CODE (temp
) == MULT
2621 && XEXP (op0
, 0) == XEXP (temp
, 0)
2622 && GET_CODE (XEXP (temp
, 1)) == NEG
2623 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2626 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2629 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2630 x is NaN, since x * 0 is then also NaN. Nor is it valid
2631 when the mode has signed zeros, since multiplying a negative
2632 number by 0 will give -0, not 0. */
2633 if (!HONOR_NANS (mode
)
2634 && !HONOR_SIGNED_ZEROS (mode
)
2635 && trueop1
== CONST0_RTX (mode
)
2636 && ! side_effects_p (op0
))
2639 /* In IEEE floating point, x*1 is not equivalent to x for
2641 if (!HONOR_SNANS (mode
)
2642 && trueop1
== CONST1_RTX (mode
))
2645 /* Convert multiply by constant power of two into shift unless
2646 we are still generating RTL. This test is a kludge. */
2647 if (CONST_INT_P (trueop1
)
2648 && (val
= exact_log2 (UINTVAL (trueop1
))) >= 0
2649 /* If the mode is larger than the host word size, and the
2650 uppermost bit is set, then this isn't a power of two due
2651 to implicit sign extension. */
2652 && (width
<= HOST_BITS_PER_WIDE_INT
2653 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2654 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2656 /* Likewise for multipliers wider than a word. */
2657 if (CONST_DOUBLE_AS_INT_P (trueop1
)
2658 && GET_MODE (op0
) == mode
2659 && CONST_DOUBLE_LOW (trueop1
) == 0
2660 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0
2661 && (val
< HOST_BITS_PER_DOUBLE_INT
- 1
2662 || GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_DOUBLE_INT
))
2663 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2664 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2666 /* x*2 is x+x and x*(-1) is -x */
2667 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2668 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2669 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2670 && GET_MODE (op0
) == mode
)
2673 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2675 if (REAL_VALUES_EQUAL (d
, dconst2
))
2676 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2678 if (!HONOR_SNANS (mode
)
2679 && REAL_VALUES_EQUAL (d
, dconstm1
))
2680 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2683 /* Optimize -x * -x as x * x. */
2684 if (FLOAT_MODE_P (mode
)
2685 && GET_CODE (op0
) == NEG
2686 && GET_CODE (op1
) == NEG
2687 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2688 && !side_effects_p (XEXP (op0
, 0)))
2689 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2691 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2692 if (SCALAR_FLOAT_MODE_P (mode
)
2693 && GET_CODE (op0
) == ABS
2694 && GET_CODE (op1
) == ABS
2695 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2696 && !side_effects_p (XEXP (op0
, 0)))
2697 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2699 /* Reassociate multiplication, but for floating point MULTs
2700 only when the user specifies unsafe math optimizations. */
2701 if (! FLOAT_MODE_P (mode
)
2702 || flag_unsafe_math_optimizations
)
2704 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2711 if (trueop1
== CONST0_RTX (mode
))
2713 if (INTEGRAL_MODE_P (mode
)
2714 && trueop1
== CONSTM1_RTX (mode
)
2715 && !side_effects_p (op0
))
2717 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2719 /* A | (~A) -> -1 */
2720 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2721 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2722 && ! side_effects_p (op0
)
2723 && SCALAR_INT_MODE_P (mode
))
2726 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2727 if (CONST_INT_P (op1
)
2728 && HWI_COMPUTABLE_MODE_P (mode
)
2729 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2730 && !side_effects_p (op0
))
2733 /* Canonicalize (X & C1) | C2. */
2734 if (GET_CODE (op0
) == AND
2735 && CONST_INT_P (trueop1
)
2736 && CONST_INT_P (XEXP (op0
, 1)))
2738 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2739 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2740 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2742 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2744 && !side_effects_p (XEXP (op0
, 0)))
2747 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2748 if (((c1
|c2
) & mask
) == mask
)
2749 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2751 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2752 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2754 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2755 gen_int_mode (c1
& ~c2
, mode
));
2756 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2760 /* Convert (A & B) | A to A. */
2761 if (GET_CODE (op0
) == AND
2762 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2763 || rtx_equal_p (XEXP (op0
, 1), op1
))
2764 && ! side_effects_p (XEXP (op0
, 0))
2765 && ! side_effects_p (XEXP (op0
, 1)))
2768 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2769 mode size to (rotate A CX). */
2771 if (GET_CODE (op1
) == ASHIFT
2772 || GET_CODE (op1
) == SUBREG
)
2783 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2784 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2785 && CONST_INT_P (XEXP (opleft
, 1))
2786 && CONST_INT_P (XEXP (opright
, 1))
2787 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2788 == GET_MODE_PRECISION (mode
)))
2789 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2791 /* Same, but for ashift that has been "simplified" to a wider mode
2792 by simplify_shift_const. */
2794 if (GET_CODE (opleft
) == SUBREG
2795 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2796 && GET_CODE (opright
) == LSHIFTRT
2797 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2798 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2799 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2800 && (GET_MODE_SIZE (GET_MODE (opleft
))
2801 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2802 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2803 SUBREG_REG (XEXP (opright
, 0)))
2804 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2805 && CONST_INT_P (XEXP (opright
, 1))
2806 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2807 == GET_MODE_PRECISION (mode
)))
2808 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2809 XEXP (SUBREG_REG (opleft
), 1));
2811 /* If we have (ior (and (X C1) C2)), simplify this by making
2812 C1 as small as possible if C1 actually changes. */
2813 if (CONST_INT_P (op1
)
2814 && (HWI_COMPUTABLE_MODE_P (mode
)
2815 || INTVAL (op1
) > 0)
2816 && GET_CODE (op0
) == AND
2817 && CONST_INT_P (XEXP (op0
, 1))
2818 && CONST_INT_P (op1
)
2819 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2821 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2822 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2825 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2828 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2829 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2830 the PLUS does not affect any of the bits in OP1: then we can do
2831 the IOR as a PLUS and we can associate. This is valid if OP1
2832 can be safely shifted left C bits. */
2833 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2834 && GET_CODE (XEXP (op0
, 0)) == PLUS
2835 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2836 && CONST_INT_P (XEXP (op0
, 1))
2837 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2839 int count
= INTVAL (XEXP (op0
, 1));
2840 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2842 if (mask
>> count
== INTVAL (trueop1
)
2843 && trunc_int_for_mode (mask
, mode
) == mask
2844 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2845 return simplify_gen_binary (ASHIFTRT
, mode
,
2846 plus_constant (mode
, XEXP (op0
, 0),
2851 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2855 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2861 if (trueop1
== CONST0_RTX (mode
))
2863 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2864 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2865 if (rtx_equal_p (trueop0
, trueop1
)
2866 && ! side_effects_p (op0
)
2867 && GET_MODE_CLASS (mode
) != MODE_CC
)
2868 return CONST0_RTX (mode
);
2870 /* Canonicalize XOR of the most significant bit to PLUS. */
2871 if (CONST_SCALAR_INT_P (op1
)
2872 && mode_signbit_p (mode
, op1
))
2873 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2874 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2875 if (CONST_SCALAR_INT_P (op1
)
2876 && GET_CODE (op0
) == PLUS
2877 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2878 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2879 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2880 simplify_gen_binary (XOR
, mode
, op1
,
2883 /* If we are XORing two things that have no bits in common,
2884 convert them into an IOR. This helps to detect rotation encoded
2885 using those methods and possibly other simplifications. */
2887 if (HWI_COMPUTABLE_MODE_P (mode
)
2888 && (nonzero_bits (op0
, mode
)
2889 & nonzero_bits (op1
, mode
)) == 0)
2890 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2892 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2893 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2896 int num_negated
= 0;
2898 if (GET_CODE (op0
) == NOT
)
2899 num_negated
++, op0
= XEXP (op0
, 0);
2900 if (GET_CODE (op1
) == NOT
)
2901 num_negated
++, op1
= XEXP (op1
, 0);
2903 if (num_negated
== 2)
2904 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2905 else if (num_negated
== 1)
2906 return simplify_gen_unary (NOT
, mode
,
2907 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2911 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2912 correspond to a machine insn or result in further simplifications
2913 if B is a constant. */
2915 if (GET_CODE (op0
) == AND
2916 && rtx_equal_p (XEXP (op0
, 1), op1
)
2917 && ! side_effects_p (op1
))
2918 return simplify_gen_binary (AND
, mode
,
2919 simplify_gen_unary (NOT
, mode
,
2920 XEXP (op0
, 0), mode
),
2923 else if (GET_CODE (op0
) == AND
2924 && rtx_equal_p (XEXP (op0
, 0), op1
)
2925 && ! side_effects_p (op1
))
2926 return simplify_gen_binary (AND
, mode
,
2927 simplify_gen_unary (NOT
, mode
,
2928 XEXP (op0
, 1), mode
),
2931 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2932 we can transform like this:
2933 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2934 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2935 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2936 Attempt a few simplifications when B and C are both constants. */
2937 if (GET_CODE (op0
) == AND
2938 && CONST_INT_P (op1
)
2939 && CONST_INT_P (XEXP (op0
, 1)))
2941 rtx a
= XEXP (op0
, 0);
2942 rtx b
= XEXP (op0
, 1);
2944 HOST_WIDE_INT bval
= INTVAL (b
);
2945 HOST_WIDE_INT cval
= INTVAL (c
);
2948 = simplify_binary_operation (AND
, mode
,
2949 simplify_gen_unary (NOT
, mode
, a
, mode
),
2951 if ((~cval
& bval
) == 0)
2953 /* Try to simplify ~A&C | ~B&C. */
2954 if (na_c
!= NULL_RTX
)
2955 return simplify_gen_binary (IOR
, mode
, na_c
,
2956 gen_int_mode (~bval
& cval
, mode
));
2960 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2961 if (na_c
== const0_rtx
)
2963 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2964 gen_int_mode (~cval
& bval
,
2966 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2967 gen_int_mode (~bval
& cval
,
2973 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2974 comparison if STORE_FLAG_VALUE is 1. */
2975 if (STORE_FLAG_VALUE
== 1
2976 && trueop1
== const1_rtx
2977 && COMPARISON_P (op0
)
2978 && (reversed
= reversed_comparison (op0
, mode
)))
2981 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2982 is (lt foo (const_int 0)), so we can perform the above
2983 simplification if STORE_FLAG_VALUE is 1. */
2985 if (STORE_FLAG_VALUE
== 1
2986 && trueop1
== const1_rtx
2987 && GET_CODE (op0
) == LSHIFTRT
2988 && CONST_INT_P (XEXP (op0
, 1))
2989 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2990 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2992 /* (xor (comparison foo bar) (const_int sign-bit))
2993 when STORE_FLAG_VALUE is the sign bit. */
2994 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2995 && trueop1
== const_true_rtx
2996 && COMPARISON_P (op0
)
2997 && (reversed
= reversed_comparison (op0
, mode
)))
3000 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3004 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3010 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3012 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
3014 if (HWI_COMPUTABLE_MODE_P (mode
))
3016 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
3017 HOST_WIDE_INT nzop1
;
3018 if (CONST_INT_P (trueop1
))
3020 HOST_WIDE_INT val1
= INTVAL (trueop1
);
3021 /* If we are turning off bits already known off in OP0, we need
3023 if ((nzop0
& ~val1
) == 0)
3026 nzop1
= nonzero_bits (trueop1
, mode
);
3027 /* If we are clearing all the nonzero bits, the result is zero. */
3028 if ((nzop1
& nzop0
) == 0
3029 && !side_effects_p (op0
) && !side_effects_p (op1
))
3030 return CONST0_RTX (mode
);
3032 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
3033 && GET_MODE_CLASS (mode
) != MODE_CC
)
3036 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
3037 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
3038 && ! side_effects_p (op0
)
3039 && GET_MODE_CLASS (mode
) != MODE_CC
)
3040 return CONST0_RTX (mode
);
3042 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3043 there are no nonzero bits of C outside of X's mode. */
3044 if ((GET_CODE (op0
) == SIGN_EXTEND
3045 || GET_CODE (op0
) == ZERO_EXTEND
)
3046 && CONST_INT_P (trueop1
)
3047 && HWI_COMPUTABLE_MODE_P (mode
)
3048 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
3049 & UINTVAL (trueop1
)) == 0)
3051 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3052 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
3053 gen_int_mode (INTVAL (trueop1
),
3055 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
3058 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3059 we might be able to further simplify the AND with X and potentially
3060 remove the truncation altogether. */
3061 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
3063 rtx x
= XEXP (op0
, 0);
3064 enum machine_mode xmode
= GET_MODE (x
);
3065 tem
= simplify_gen_binary (AND
, xmode
, x
,
3066 gen_int_mode (INTVAL (trueop1
), xmode
));
3067 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
3070 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3071 if (GET_CODE (op0
) == IOR
3072 && CONST_INT_P (trueop1
)
3073 && CONST_INT_P (XEXP (op0
, 1)))
3075 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
3076 return simplify_gen_binary (IOR
, mode
,
3077 simplify_gen_binary (AND
, mode
,
3078 XEXP (op0
, 0), op1
),
3079 gen_int_mode (tmp
, mode
));
3082 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3083 insn (and may simplify more). */
3084 if (GET_CODE (op0
) == XOR
3085 && rtx_equal_p (XEXP (op0
, 0), op1
)
3086 && ! side_effects_p (op1
))
3087 return simplify_gen_binary (AND
, mode
,
3088 simplify_gen_unary (NOT
, mode
,
3089 XEXP (op0
, 1), mode
),
3092 if (GET_CODE (op0
) == XOR
3093 && rtx_equal_p (XEXP (op0
, 1), op1
)
3094 && ! side_effects_p (op1
))
3095 return simplify_gen_binary (AND
, mode
,
3096 simplify_gen_unary (NOT
, mode
,
3097 XEXP (op0
, 0), mode
),
3100 /* Similarly for (~(A ^ B)) & A. */
3101 if (GET_CODE (op0
) == NOT
3102 && GET_CODE (XEXP (op0
, 0)) == XOR
3103 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3104 && ! side_effects_p (op1
))
3105 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3107 if (GET_CODE (op0
) == NOT
3108 && GET_CODE (XEXP (op0
, 0)) == XOR
3109 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3110 && ! side_effects_p (op1
))
3111 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3113 /* Convert (A | B) & A to A. */
3114 if (GET_CODE (op0
) == IOR
3115 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3116 || rtx_equal_p (XEXP (op0
, 1), op1
))
3117 && ! side_effects_p (XEXP (op0
, 0))
3118 && ! side_effects_p (XEXP (op0
, 1)))
3121 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3122 ((A & N) + B) & M -> (A + B) & M
3123 Similarly if (N & M) == 0,
3124 ((A | N) + B) & M -> (A + B) & M
3125 and for - instead of + and/or ^ instead of |.
3126 Also, if (N & M) == 0, then
3127 (A +- N) & M -> A & M. */
3128 if (CONST_INT_P (trueop1
)
3129 && HWI_COMPUTABLE_MODE_P (mode
)
3130 && ~UINTVAL (trueop1
)
3131 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3132 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3137 pmop
[0] = XEXP (op0
, 0);
3138 pmop
[1] = XEXP (op0
, 1);
3140 if (CONST_INT_P (pmop
[1])
3141 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3142 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3144 for (which
= 0; which
< 2; which
++)
3147 switch (GET_CODE (tem
))
3150 if (CONST_INT_P (XEXP (tem
, 1))
3151 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3152 == UINTVAL (trueop1
))
3153 pmop
[which
] = XEXP (tem
, 0);
3157 if (CONST_INT_P (XEXP (tem
, 1))
3158 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3159 pmop
[which
] = XEXP (tem
, 0);
3166 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3168 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3170 return simplify_gen_binary (code
, mode
, tem
, op1
);
3174 /* (and X (ior (not X) Y) -> (and X Y) */
3175 if (GET_CODE (op1
) == IOR
3176 && GET_CODE (XEXP (op1
, 0)) == NOT
3177 && op0
== XEXP (XEXP (op1
, 0), 0))
3178 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3180 /* (and (ior (not X) Y) X) -> (and X Y) */
3181 if (GET_CODE (op0
) == IOR
3182 && GET_CODE (XEXP (op0
, 0)) == NOT
3183 && op1
== XEXP (XEXP (op0
, 0), 0))
3184 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3186 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3190 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3196 /* 0/x is 0 (or x&0 if x has side-effects). */
3197 if (trueop0
== CONST0_RTX (mode
))
3199 if (side_effects_p (op1
))
3200 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3204 if (trueop1
== CONST1_RTX (mode
))
3206 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3210 /* Convert divide by power of two into shift. */
3211 if (CONST_INT_P (trueop1
)
3212 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3213 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3217 /* Handle floating point and integers separately. */
3218 if (SCALAR_FLOAT_MODE_P (mode
))
3220 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3221 safe for modes with NaNs, since 0.0 / 0.0 will then be
3222 NaN rather than 0.0. Nor is it safe for modes with signed
3223 zeros, since dividing 0 by a negative number gives -0.0 */
3224 if (trueop0
== CONST0_RTX (mode
)
3225 && !HONOR_NANS (mode
)
3226 && !HONOR_SIGNED_ZEROS (mode
)
3227 && ! side_effects_p (op1
))
3230 if (trueop1
== CONST1_RTX (mode
)
3231 && !HONOR_SNANS (mode
))
3234 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3235 && trueop1
!= CONST0_RTX (mode
))
3238 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
3241 if (REAL_VALUES_EQUAL (d
, dconstm1
)
3242 && !HONOR_SNANS (mode
))
3243 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3245 /* Change FP division by a constant into multiplication.
3246 Only do this with -freciprocal-math. */
3247 if (flag_reciprocal_math
3248 && !REAL_VALUES_EQUAL (d
, dconst0
))
3250 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3251 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3252 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3256 else if (SCALAR_INT_MODE_P (mode
))
3258 /* 0/x is 0 (or x&0 if x has side-effects). */
3259 if (trueop0
== CONST0_RTX (mode
)
3260 && !cfun
->can_throw_non_call_exceptions
)
3262 if (side_effects_p (op1
))
3263 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3267 if (trueop1
== CONST1_RTX (mode
))
3269 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3274 if (trueop1
== constm1_rtx
)
3276 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3278 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3284 /* 0%x is 0 (or x&0 if x has side-effects). */
3285 if (trueop0
== CONST0_RTX (mode
))
3287 if (side_effects_p (op1
))
3288 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3291 /* x%1 is 0 (of x&0 if x has side-effects). */
3292 if (trueop1
== CONST1_RTX (mode
))
3294 if (side_effects_p (op0
))
3295 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3296 return CONST0_RTX (mode
);
3298 /* Implement modulus by power of two as AND. */
3299 if (CONST_INT_P (trueop1
)
3300 && exact_log2 (UINTVAL (trueop1
)) > 0)
3301 return simplify_gen_binary (AND
, mode
, op0
,
3302 gen_int_mode (INTVAL (op1
) - 1, mode
));
3306 /* 0%x is 0 (or x&0 if x has side-effects). */
3307 if (trueop0
== CONST0_RTX (mode
))
3309 if (side_effects_p (op1
))
3310 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3313 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3314 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3316 if (side_effects_p (op0
))
3317 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3318 return CONST0_RTX (mode
);
3324 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3325 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3326 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3328 if (CONST_INT_P (trueop1
)
3329 && IN_RANGE (INTVAL (trueop1
),
3330 GET_MODE_BITSIZE (mode
) / 2 + (code
== ROTATE
),
3331 GET_MODE_BITSIZE (mode
) - 1))
3332 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3333 mode
, op0
, GEN_INT (GET_MODE_BITSIZE (mode
)
3334 - INTVAL (trueop1
)));
3337 if (trueop1
== CONST0_RTX (mode
))
3339 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3341 /* Rotating ~0 always results in ~0. */
3342 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3343 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3344 && ! side_effects_p (op1
))
3347 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3349 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
3350 if (val
!= INTVAL (op1
))
3351 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3358 if (trueop1
== CONST0_RTX (mode
))
3360 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3362 goto canonicalize_shift
;
3365 if (trueop1
== CONST0_RTX (mode
))
3367 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3369 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3370 if (GET_CODE (op0
) == CLZ
3371 && CONST_INT_P (trueop1
)
3372 && STORE_FLAG_VALUE
== 1
3373 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3375 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3376 unsigned HOST_WIDE_INT zero_val
= 0;
3378 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3379 && zero_val
== GET_MODE_PRECISION (imode
)
3380 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3381 return simplify_gen_relational (EQ
, mode
, imode
,
3382 XEXP (op0
, 0), const0_rtx
);
3384 goto canonicalize_shift
;
3387 if (width
<= HOST_BITS_PER_WIDE_INT
3388 && mode_signbit_p (mode
, trueop1
)
3389 && ! side_effects_p (op0
))
3391 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3393 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3399 if (width
<= HOST_BITS_PER_WIDE_INT
3400 && CONST_INT_P (trueop1
)
3401 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3402 && ! side_effects_p (op0
))
3404 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3406 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3412 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3414 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3416 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3422 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3424 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3426 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3439 /* ??? There are simplifications that can be done. */
3443 if (!VECTOR_MODE_P (mode
))
3445 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3446 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3447 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3448 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3449 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3451 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3452 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3455 /* Extract a scalar element from a nested VEC_SELECT expression
3456 (with optional nested VEC_CONCAT expression). Some targets
3457 (i386) extract scalar element from a vector using chain of
3458 nested VEC_SELECT expressions. When input operand is a memory
3459 operand, this operation can be simplified to a simple scalar
3460 load from an offseted memory address. */
3461 if (GET_CODE (trueop0
) == VEC_SELECT
)
3463 rtx op0
= XEXP (trueop0
, 0);
3464 rtx op1
= XEXP (trueop0
, 1);
3466 enum machine_mode opmode
= GET_MODE (op0
);
3467 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3468 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3470 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3476 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3477 gcc_assert (i
< n_elts
);
3479 /* Select element, pointed by nested selector. */
3480 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3482 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3483 if (GET_CODE (op0
) == VEC_CONCAT
)
3485 rtx op00
= XEXP (op0
, 0);
3486 rtx op01
= XEXP (op0
, 1);
3488 enum machine_mode mode00
, mode01
;
3489 int n_elts00
, n_elts01
;
3491 mode00
= GET_MODE (op00
);
3492 mode01
= GET_MODE (op01
);
3494 /* Find out number of elements of each operand. */
3495 if (VECTOR_MODE_P (mode00
))
3497 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3498 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3503 if (VECTOR_MODE_P (mode01
))
3505 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3506 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3511 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3513 /* Select correct operand of VEC_CONCAT
3514 and adjust selector. */
3515 if (elem
< n_elts01
)
3526 vec
= rtvec_alloc (1);
3527 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3529 tmp
= gen_rtx_fmt_ee (code
, mode
,
3530 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3533 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3534 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3535 return XEXP (trueop0
, 0);
3539 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3540 gcc_assert (GET_MODE_INNER (mode
)
3541 == GET_MODE_INNER (GET_MODE (trueop0
)));
3542 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3544 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3546 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3547 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3548 rtvec v
= rtvec_alloc (n_elts
);
3551 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3552 for (i
= 0; i
< n_elts
; i
++)
3554 rtx x
= XVECEXP (trueop1
, 0, i
);
3556 gcc_assert (CONST_INT_P (x
));
3557 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3561 return gen_rtx_CONST_VECTOR (mode
, v
);
3564 /* Recognize the identity. */
3565 if (GET_MODE (trueop0
) == mode
)
3567 bool maybe_ident
= true;
3568 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3570 rtx j
= XVECEXP (trueop1
, 0, i
);
3571 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3573 maybe_ident
= false;
3581 /* If we build {a,b} then permute it, build the result directly. */
3582 if (XVECLEN (trueop1
, 0) == 2
3583 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3584 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3585 && GET_CODE (trueop0
) == VEC_CONCAT
3586 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3587 && GET_MODE (XEXP (trueop0
, 0)) == mode
3588 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3589 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3591 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3592 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3595 gcc_assert (i0
< 4 && i1
< 4);
3596 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3597 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3599 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3602 if (XVECLEN (trueop1
, 0) == 2
3603 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3604 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3605 && GET_CODE (trueop0
) == VEC_CONCAT
3606 && GET_MODE (trueop0
) == mode
)
3608 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3609 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3612 gcc_assert (i0
< 2 && i1
< 2);
3613 subop0
= XEXP (trueop0
, i0
);
3614 subop1
= XEXP (trueop0
, i1
);
3616 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3620 if (XVECLEN (trueop1
, 0) == 1
3621 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3622 && GET_CODE (trueop0
) == VEC_CONCAT
)
3625 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3627 /* Try to find the element in the VEC_CONCAT. */
3628 while (GET_MODE (vec
) != mode
3629 && GET_CODE (vec
) == VEC_CONCAT
)
3631 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3632 if (offset
< vec_size
)
3633 vec
= XEXP (vec
, 0);
3637 vec
= XEXP (vec
, 1);
3639 vec
= avoid_constant_pool_reference (vec
);
3642 if (GET_MODE (vec
) == mode
)
3646 /* If we select elements in a vec_merge that all come from the same
3647 operand, select from that operand directly. */
3648 if (GET_CODE (op0
) == VEC_MERGE
)
3650 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3651 if (CONST_INT_P (trueop02
))
3653 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3654 bool all_operand0
= true;
3655 bool all_operand1
= true;
3656 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3658 rtx j
= XVECEXP (trueop1
, 0, i
);
3659 if (sel
& (1 << UINTVAL (j
)))
3660 all_operand1
= false;
3662 all_operand0
= false;
3664 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3665 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3666 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3667 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3674 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3675 ? GET_MODE (trueop0
)
3676 : GET_MODE_INNER (mode
));
3677 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3678 ? GET_MODE (trueop1
)
3679 : GET_MODE_INNER (mode
));
3681 gcc_assert (VECTOR_MODE_P (mode
));
3682 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3683 == GET_MODE_SIZE (mode
));
3685 if (VECTOR_MODE_P (op0_mode
))
3686 gcc_assert (GET_MODE_INNER (mode
)
3687 == GET_MODE_INNER (op0_mode
));
3689 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3691 if (VECTOR_MODE_P (op1_mode
))
3692 gcc_assert (GET_MODE_INNER (mode
)
3693 == GET_MODE_INNER (op1_mode
));
3695 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3697 if ((GET_CODE (trueop0
) == CONST_VECTOR
3698 || CONST_SCALAR_INT_P (trueop0
)
3699 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3700 && (GET_CODE (trueop1
) == CONST_VECTOR
3701 || CONST_SCALAR_INT_P (trueop1
)
3702 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3704 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3705 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3706 rtvec v
= rtvec_alloc (n_elts
);
3708 unsigned in_n_elts
= 1;
3710 if (VECTOR_MODE_P (op0_mode
))
3711 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3712 for (i
= 0; i
< n_elts
; i
++)
3716 if (!VECTOR_MODE_P (op0_mode
))
3717 RTVEC_ELT (v
, i
) = trueop0
;
3719 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3723 if (!VECTOR_MODE_P (op1_mode
))
3724 RTVEC_ELT (v
, i
) = trueop1
;
3726 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3731 return gen_rtx_CONST_VECTOR (mode
, v
);
3734 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3735 Restrict the transformation to avoid generating a VEC_SELECT with a
3736 mode unrelated to its operand. */
3737 if (GET_CODE (trueop0
) == VEC_SELECT
3738 && GET_CODE (trueop1
) == VEC_SELECT
3739 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3740 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3742 rtx par0
= XEXP (trueop0
, 1);
3743 rtx par1
= XEXP (trueop1
, 1);
3744 int len0
= XVECLEN (par0
, 0);
3745 int len1
= XVECLEN (par1
, 0);
3746 rtvec vec
= rtvec_alloc (len0
+ len1
);
3747 for (int i
= 0; i
< len0
; i
++)
3748 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3749 for (int i
= 0; i
< len1
; i
++)
3750 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3751 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3752 gen_rtx_PARALLEL (VOIDmode
, vec
));
3765 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3768 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3770 unsigned int width
= GET_MODE_PRECISION (mode
);
3772 if (VECTOR_MODE_P (mode
)
3773 && code
!= VEC_CONCAT
3774 && GET_CODE (op0
) == CONST_VECTOR
3775 && GET_CODE (op1
) == CONST_VECTOR
)
3777 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3778 enum machine_mode op0mode
= GET_MODE (op0
);
3779 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3780 enum machine_mode op1mode
= GET_MODE (op1
);
3781 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3782 rtvec v
= rtvec_alloc (n_elts
);
3785 gcc_assert (op0_n_elts
== n_elts
);
3786 gcc_assert (op1_n_elts
== n_elts
);
3787 for (i
= 0; i
< n_elts
; i
++)
3789 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3790 CONST_VECTOR_ELT (op0
, i
),
3791 CONST_VECTOR_ELT (op1
, i
));
3794 RTVEC_ELT (v
, i
) = x
;
3797 return gen_rtx_CONST_VECTOR (mode
, v
);
3800 if (VECTOR_MODE_P (mode
)
3801 && code
== VEC_CONCAT
3802 && (CONST_SCALAR_INT_P (op0
)
3803 || GET_CODE (op0
) == CONST_FIXED
3804 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3805 && (CONST_SCALAR_INT_P (op1
)
3806 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3807 || GET_CODE (op1
) == CONST_FIXED
))
3809 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3810 rtvec v
= rtvec_alloc (n_elts
);
3812 gcc_assert (n_elts
>= 2);
3815 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3816 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3818 RTVEC_ELT (v
, 0) = op0
;
3819 RTVEC_ELT (v
, 1) = op1
;
3823 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3824 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3827 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3828 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3829 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3831 for (i
= 0; i
< op0_n_elts
; ++i
)
3832 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3833 for (i
= 0; i
< op1_n_elts
; ++i
)
3834 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3837 return gen_rtx_CONST_VECTOR (mode
, v
);
3840 if (SCALAR_FLOAT_MODE_P (mode
)
3841 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3842 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3843 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3854 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3856 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3858 for (i
= 0; i
< 4; i
++)
3875 real_from_target (&r
, tmp0
, mode
);
3876 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3880 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3883 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3884 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3885 real_convert (&f0
, mode
, &f0
);
3886 real_convert (&f1
, mode
, &f1
);
3888 if (HONOR_SNANS (mode
)
3889 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3893 && REAL_VALUES_EQUAL (f1
, dconst0
)
3894 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3897 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3898 && flag_trapping_math
3899 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3901 int s0
= REAL_VALUE_NEGATIVE (f0
);
3902 int s1
= REAL_VALUE_NEGATIVE (f1
);
3907 /* Inf + -Inf = NaN plus exception. */
3912 /* Inf - Inf = NaN plus exception. */
3917 /* Inf / Inf = NaN plus exception. */
3924 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3925 && flag_trapping_math
3926 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3927 || (REAL_VALUE_ISINF (f1
)
3928 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3929 /* Inf * 0 = NaN plus exception. */
3932 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3934 real_convert (&result
, mode
, &value
);
3936 /* Don't constant fold this floating point operation if
3937 the result has overflowed and flag_trapping_math. */
3939 if (flag_trapping_math
3940 && MODE_HAS_INFINITIES (mode
)
3941 && REAL_VALUE_ISINF (result
)
3942 && !REAL_VALUE_ISINF (f0
)
3943 && !REAL_VALUE_ISINF (f1
))
3944 /* Overflow plus exception. */
3947 /* Don't constant fold this floating point operation if the
3948 result may dependent upon the run-time rounding mode and
3949 flag_rounding_math is set, or if GCC's software emulation
3950 is unable to accurately represent the result. */
3952 if ((flag_rounding_math
3953 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3954 && (inexact
|| !real_identical (&result
, &value
)))
3957 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3961 /* We can fold some multi-word operations. */
3962 if (GET_MODE_CLASS (mode
) == MODE_INT
3963 && width
== HOST_BITS_PER_DOUBLE_INT
3964 && (CONST_DOUBLE_AS_INT_P (op0
) || CONST_INT_P (op0
))
3965 && (CONST_DOUBLE_AS_INT_P (op1
) || CONST_INT_P (op1
)))
3967 double_int o0
, o1
, res
, tmp
;
3970 o0
= rtx_to_double_int (op0
);
3971 o1
= rtx_to_double_int (op1
);
3976 /* A - B == A + (-B). */
3979 /* Fall through.... */
3990 res
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
3997 tmp
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
4004 res
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
4011 tmp
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
4045 case LSHIFTRT
: case ASHIFTRT
:
4047 case ROTATE
: case ROTATERT
:
4049 unsigned HOST_WIDE_INT cnt
;
4051 if (SHIFT_COUNT_TRUNCATED
)
4054 o1
.low
&= GET_MODE_PRECISION (mode
) - 1;
4057 if (!o1
.fits_uhwi ()
4058 || o1
.to_uhwi () >= GET_MODE_PRECISION (mode
))
4061 cnt
= o1
.to_uhwi ();
4062 unsigned short prec
= GET_MODE_PRECISION (mode
);
4064 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
4065 res
= o0
.rshift (cnt
, prec
, code
== ASHIFTRT
);
4066 else if (code
== ASHIFT
)
4067 res
= o0
.alshift (cnt
, prec
);
4068 else if (code
== ROTATE
)
4069 res
= o0
.lrotate (cnt
, prec
);
4070 else /* code == ROTATERT */
4071 res
= o0
.rrotate (cnt
, prec
);
4079 return immed_double_int_const (res
, mode
);
4082 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
4083 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
4085 /* Get the integer argument values in two forms:
4086 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4088 arg0
= INTVAL (op0
);
4089 arg1
= INTVAL (op1
);
4091 if (width
< HOST_BITS_PER_WIDE_INT
)
4093 arg0
&= GET_MODE_MASK (mode
);
4094 arg1
&= GET_MODE_MASK (mode
);
4097 if (val_signbit_known_set_p (mode
, arg0s
))
4098 arg0s
|= ~GET_MODE_MASK (mode
);
4101 if (val_signbit_known_set_p (mode
, arg1s
))
4102 arg1s
|= ~GET_MODE_MASK (mode
);
4110 /* Compute the value of the arithmetic. */
4115 val
= arg0s
+ arg1s
;
4119 val
= arg0s
- arg1s
;
4123 val
= arg0s
* arg1s
;
4128 || ((unsigned HOST_WIDE_INT
) arg0s
4129 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4132 val
= arg0s
/ arg1s
;
4137 || ((unsigned HOST_WIDE_INT
) arg0s
4138 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4141 val
= arg0s
% arg1s
;
4146 || ((unsigned HOST_WIDE_INT
) arg0s
4147 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4150 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
4155 || ((unsigned HOST_WIDE_INT
) arg0s
4156 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4159 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
4177 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4178 the value is in range. We can't return any old value for
4179 out-of-range arguments because either the middle-end (via
4180 shift_truncation_mask) or the back-end might be relying on
4181 target-specific knowledge. Nor can we rely on
4182 shift_truncation_mask, since the shift might not be part of an
4183 ashlM3, lshrM3 or ashrM3 instruction. */
4184 if (SHIFT_COUNT_TRUNCATED
)
4185 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
4186 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
4189 val
= (code
== ASHIFT
4190 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
4191 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
4193 /* Sign-extend the result for arithmetic right shifts. */
4194 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
4195 val
|= HOST_WIDE_INT_M1U
<< (width
- arg1
);
4203 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
4204 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
4212 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
4213 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
4217 /* Do nothing here. */
4221 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
4225 val
= ((unsigned HOST_WIDE_INT
) arg0
4226 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
4230 val
= arg0s
> arg1s
? arg0s
: arg1s
;
4234 val
= ((unsigned HOST_WIDE_INT
) arg0
4235 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
4248 /* ??? There are simplifications that can be done. */
4255 return gen_int_mode (val
, mode
);
4263 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4266 Rather than test for specific case, we do this by a brute-force method
4267 and do all possible simplifications until no more changes occur. Then
4268 we rebuild the operation. */
4270 struct simplify_plus_minus_op_data
4277 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4281 result
= (commutative_operand_precedence (y
)
4282 - commutative_operand_precedence (x
));
4286 /* Group together equal REGs to do more simplification. */
4287 if (REG_P (x
) && REG_P (y
))
4288 return REGNO (x
) > REGNO (y
);
4294 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
4297 struct simplify_plus_minus_op_data ops
[8];
4299 int n_ops
= 2, input_ops
= 2;
4300 int changed
, n_constants
= 0, canonicalized
= 0;
4303 memset (ops
, 0, sizeof ops
);
4305 /* Set up the two operands and then expand them until nothing has been
4306 changed. If we run out of room in our array, give up; this should
4307 almost never happen. */
4312 ops
[1].neg
= (code
== MINUS
);
4318 for (i
= 0; i
< n_ops
; i
++)
4320 rtx this_op
= ops
[i
].op
;
4321 int this_neg
= ops
[i
].neg
;
4322 enum rtx_code this_code
= GET_CODE (this_op
);
4331 ops
[n_ops
].op
= XEXP (this_op
, 1);
4332 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4335 ops
[i
].op
= XEXP (this_op
, 0);
4338 canonicalized
|= this_neg
;
4342 ops
[i
].op
= XEXP (this_op
, 0);
4343 ops
[i
].neg
= ! this_neg
;
4350 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4351 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4352 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4354 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4355 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4356 ops
[n_ops
].neg
= this_neg
;
4364 /* ~a -> (-a - 1) */
4367 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4368 ops
[n_ops
++].neg
= this_neg
;
4369 ops
[i
].op
= XEXP (this_op
, 0);
4370 ops
[i
].neg
= !this_neg
;
4380 ops
[i
].op
= neg_const_int (mode
, this_op
);
4394 if (n_constants
> 1)
4397 gcc_assert (n_ops
>= 2);
4399 /* If we only have two operands, we can avoid the loops. */
4402 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4405 /* Get the two operands. Be careful with the order, especially for
4406 the cases where code == MINUS. */
4407 if (ops
[0].neg
&& ops
[1].neg
)
4409 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4412 else if (ops
[0].neg
)
4423 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4426 /* Now simplify each pair of operands until nothing changes. */
4429 /* Insertion sort is good enough for an eight-element array. */
4430 for (i
= 1; i
< n_ops
; i
++)
4432 struct simplify_plus_minus_op_data save
;
4434 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4440 ops
[j
+ 1] = ops
[j
];
4441 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4446 for (i
= n_ops
- 1; i
> 0; i
--)
4447 for (j
= i
- 1; j
>= 0; j
--)
4449 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4450 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4452 if (lhs
!= 0 && rhs
!= 0)
4454 enum rtx_code ncode
= PLUS
;
4460 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4462 else if (swap_commutative_operands_p (lhs
, rhs
))
4463 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4465 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4466 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4468 rtx tem_lhs
, tem_rhs
;
4470 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4471 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4472 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4474 if (tem
&& !CONSTANT_P (tem
))
4475 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4478 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4480 /* Reject "simplifications" that just wrap the two
4481 arguments in a CONST. Failure to do so can result
4482 in infinite recursion with simplify_binary_operation
4483 when it calls us to simplify CONST operations. */
4485 && ! (GET_CODE (tem
) == CONST
4486 && GET_CODE (XEXP (tem
, 0)) == ncode
4487 && XEXP (XEXP (tem
, 0), 0) == lhs
4488 && XEXP (XEXP (tem
, 0), 1) == rhs
))
4491 if (GET_CODE (tem
) == NEG
)
4492 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4493 if (CONST_INT_P (tem
) && lneg
)
4494 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4498 ops
[j
].op
= NULL_RTX
;
4505 /* If nothing changed, fail. */
4509 /* Pack all the operands to the lower-numbered entries. */
4510 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4520 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4522 && CONST_INT_P (ops
[1].op
)
4523 && CONSTANT_P (ops
[0].op
)
4525 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4527 /* We suppressed creation of trivial CONST expressions in the
4528 combination loop to avoid recursion. Create one manually now.
4529 The combination loop should have ensured that there is exactly
4530 one CONST_INT, and the sort will have ensured that it is last
4531 in the array and that any other constant will be next-to-last. */
4534 && CONST_INT_P (ops
[n_ops
- 1].op
)
4535 && CONSTANT_P (ops
[n_ops
- 2].op
))
4537 rtx value
= ops
[n_ops
- 1].op
;
4538 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4539 value
= neg_const_int (mode
, value
);
4540 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4545 /* Put a non-negated operand first, if possible. */
4547 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4550 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4559 /* Now make the result by performing the requested operations. */
4561 for (i
= 1; i
< n_ops
; i
++)
4562 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4563 mode
, result
, ops
[i
].op
);
4568 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4570 plus_minus_operand_p (const_rtx x
)
4572 return GET_CODE (x
) == PLUS
4573 || GET_CODE (x
) == MINUS
4574 || (GET_CODE (x
) == CONST
4575 && GET_CODE (XEXP (x
, 0)) == PLUS
4576 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4577 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4580 /* Like simplify_binary_operation except used for relational operators.
4581 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4582 not also be VOIDmode.
4584 CMP_MODE specifies in which mode the comparison is done in, so it is
4585 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4586 the operands or, if both are VOIDmode, the operands are compared in
4587 "infinite precision". */
4589 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4590 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4592 rtx tem
, trueop0
, trueop1
;
4594 if (cmp_mode
== VOIDmode
)
4595 cmp_mode
= GET_MODE (op0
);
4596 if (cmp_mode
== VOIDmode
)
4597 cmp_mode
= GET_MODE (op1
);
4599 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4602 if (SCALAR_FLOAT_MODE_P (mode
))
4604 if (tem
== const0_rtx
)
4605 return CONST0_RTX (mode
);
4606 #ifdef FLOAT_STORE_FLAG_VALUE
4608 REAL_VALUE_TYPE val
;
4609 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4610 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4616 if (VECTOR_MODE_P (mode
))
4618 if (tem
== const0_rtx
)
4619 return CONST0_RTX (mode
);
4620 #ifdef VECTOR_STORE_FLAG_VALUE
4625 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4626 if (val
== NULL_RTX
)
4628 if (val
== const1_rtx
)
4629 return CONST1_RTX (mode
);
4631 units
= GET_MODE_NUNITS (mode
);
4632 v
= rtvec_alloc (units
);
4633 for (i
= 0; i
< units
; i
++)
4634 RTVEC_ELT (v
, i
) = val
;
4635 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4645 /* For the following tests, ensure const0_rtx is op1. */
4646 if (swap_commutative_operands_p (op0
, op1
)
4647 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4648 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4650 /* If op0 is a compare, extract the comparison arguments from it. */
4651 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4652 return simplify_gen_relational (code
, mode
, VOIDmode
,
4653 XEXP (op0
, 0), XEXP (op0
, 1));
4655 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4659 trueop0
= avoid_constant_pool_reference (op0
);
4660 trueop1
= avoid_constant_pool_reference (op1
);
4661 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4665 /* This part of simplify_relational_operation is only used when CMP_MODE
4666 is not in class MODE_CC (i.e. it is a real comparison).
4668 MODE is the mode of the result, while CMP_MODE specifies in which
4669 mode the comparison is done in, so it is the mode of the operands. */
4672 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4673 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4675 enum rtx_code op0code
= GET_CODE (op0
);
4677 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4679 /* If op0 is a comparison, extract the comparison arguments
4683 if (GET_MODE (op0
) == mode
)
4684 return simplify_rtx (op0
);
4686 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4687 XEXP (op0
, 0), XEXP (op0
, 1));
4689 else if (code
== EQ
)
4691 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4692 if (new_code
!= UNKNOWN
)
4693 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4694 XEXP (op0
, 0), XEXP (op0
, 1));
4698 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4699 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4700 if ((code
== LTU
|| code
== GEU
)
4701 && GET_CODE (op0
) == PLUS
4702 && CONST_INT_P (XEXP (op0
, 1))
4703 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4704 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4705 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4706 && XEXP (op0
, 1) != const0_rtx
)
4709 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4710 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4711 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4714 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4715 if ((code
== LTU
|| code
== GEU
)
4716 && GET_CODE (op0
) == PLUS
4717 && rtx_equal_p (op1
, XEXP (op0
, 1))
4718 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4719 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4720 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4721 copy_rtx (XEXP (op0
, 0)));
4723 if (op1
== const0_rtx
)
4725 /* Canonicalize (GTU x 0) as (NE x 0). */
4727 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4728 /* Canonicalize (LEU x 0) as (EQ x 0). */
4730 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4732 else if (op1
== const1_rtx
)
4737 /* Canonicalize (GE x 1) as (GT x 0). */
4738 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4741 /* Canonicalize (GEU x 1) as (NE x 0). */
4742 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4745 /* Canonicalize (LT x 1) as (LE x 0). */
4746 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4749 /* Canonicalize (LTU x 1) as (EQ x 0). */
4750 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4756 else if (op1
== constm1_rtx
)
4758 /* Canonicalize (LE x -1) as (LT x 0). */
4760 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4761 /* Canonicalize (GT x -1) as (GE x 0). */
4763 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4766 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4767 if ((code
== EQ
|| code
== NE
)
4768 && (op0code
== PLUS
|| op0code
== MINUS
)
4770 && CONSTANT_P (XEXP (op0
, 1))
4771 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4773 rtx x
= XEXP (op0
, 0);
4774 rtx c
= XEXP (op0
, 1);
4775 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4776 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4778 /* Detect an infinite recursive condition, where we oscillate at this
4779 simplification case between:
4780 A + B == C <---> C - B == A,
4781 where A, B, and C are all constants with non-simplifiable expressions,
4782 usually SYMBOL_REFs. */
4783 if (GET_CODE (tem
) == invcode
4785 && rtx_equal_p (c
, XEXP (tem
, 1)))
4788 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4791 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4792 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4794 && op1
== const0_rtx
4795 && GET_MODE_CLASS (mode
) == MODE_INT
4796 && cmp_mode
!= VOIDmode
4797 /* ??? Work-around BImode bugs in the ia64 backend. */
4799 && cmp_mode
!= BImode
4800 && nonzero_bits (op0
, cmp_mode
) == 1
4801 && STORE_FLAG_VALUE
== 1)
4802 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4803 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4804 : lowpart_subreg (mode
, op0
, cmp_mode
);
4806 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4807 if ((code
== EQ
|| code
== NE
)
4808 && op1
== const0_rtx
4810 return simplify_gen_relational (code
, mode
, cmp_mode
,
4811 XEXP (op0
, 0), XEXP (op0
, 1));
4813 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4814 if ((code
== EQ
|| code
== NE
)
4816 && rtx_equal_p (XEXP (op0
, 0), op1
)
4817 && !side_effects_p (XEXP (op0
, 0)))
4818 return simplify_gen_relational (code
, mode
, cmp_mode
,
4819 XEXP (op0
, 1), const0_rtx
);
4821 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4822 if ((code
== EQ
|| code
== NE
)
4824 && rtx_equal_p (XEXP (op0
, 1), op1
)
4825 && !side_effects_p (XEXP (op0
, 1)))
4826 return simplify_gen_relational (code
, mode
, cmp_mode
,
4827 XEXP (op0
, 0), const0_rtx
);
4829 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4830 if ((code
== EQ
|| code
== NE
)
4832 && CONST_SCALAR_INT_P (op1
)
4833 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4834 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4835 simplify_gen_binary (XOR
, cmp_mode
,
4836 XEXP (op0
, 1), op1
));
4838 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4839 if ((code
== EQ
|| code
== NE
)
4840 && GET_CODE (op0
) == BSWAP
4841 && CONST_SCALAR_INT_P (op1
))
4842 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4843 simplify_gen_unary (BSWAP
, cmp_mode
,
4846 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4847 if ((code
== EQ
|| code
== NE
)
4848 && GET_CODE (op0
) == BSWAP
4849 && GET_CODE (op1
) == BSWAP
)
4850 return simplify_gen_relational (code
, mode
, cmp_mode
,
4851 XEXP (op0
, 0), XEXP (op1
, 0));
4853 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4859 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4860 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4861 XEXP (op0
, 0), const0_rtx
);
4866 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4867 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4868 XEXP (op0
, 0), const0_rtx
);
4887 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4888 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4889 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4890 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4891 For floating-point comparisons, assume that the operands were ordered. */
4894 comparison_result (enum rtx_code code
, int known_results
)
4900 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4903 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4907 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4910 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4914 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4917 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4920 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4922 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4925 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4927 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4930 return const_true_rtx
;
4938 /* Check if the given comparison (done in the given MODE) is actually a
4939 tautology or a contradiction.
4940 If no simplification is possible, this function returns zero.
4941 Otherwise, it returns either const_true_rtx or const0_rtx. */
4944 simplify_const_relational_operation (enum rtx_code code
,
4945 enum machine_mode mode
,
4952 gcc_assert (mode
!= VOIDmode
4953 || (GET_MODE (op0
) == VOIDmode
4954 && GET_MODE (op1
) == VOIDmode
));
4956 /* If op0 is a compare, extract the comparison arguments from it. */
4957 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4959 op1
= XEXP (op0
, 1);
4960 op0
= XEXP (op0
, 0);
4962 if (GET_MODE (op0
) != VOIDmode
)
4963 mode
= GET_MODE (op0
);
4964 else if (GET_MODE (op1
) != VOIDmode
)
4965 mode
= GET_MODE (op1
);
4970 /* We can't simplify MODE_CC values since we don't know what the
4971 actual comparison is. */
4972 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4975 /* Make sure the constant is second. */
4976 if (swap_commutative_operands_p (op0
, op1
))
4978 tem
= op0
, op0
= op1
, op1
= tem
;
4979 code
= swap_condition (code
);
4982 trueop0
= avoid_constant_pool_reference (op0
);
4983 trueop1
= avoid_constant_pool_reference (op1
);
4985 /* For integer comparisons of A and B maybe we can simplify A - B and can
4986 then simplify a comparison of that with zero. If A and B are both either
4987 a register or a CONST_INT, this can't help; testing for these cases will
4988 prevent infinite recursion here and speed things up.
4990 We can only do this for EQ and NE comparisons as otherwise we may
4991 lose or introduce overflow which we cannot disregard as undefined as
4992 we do not know the signedness of the operation on either the left or
4993 the right hand side of the comparison. */
4995 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4996 && (code
== EQ
|| code
== NE
)
4997 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4998 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4999 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
5000 /* We cannot do this if tem is a nonzero address. */
5001 && ! nonzero_address_p (tem
))
5002 return simplify_const_relational_operation (signed_condition (code
),
5003 mode
, tem
, const0_rtx
);
5005 if (! HONOR_NANS (mode
) && code
== ORDERED
)
5006 return const_true_rtx
;
5008 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
5011 /* For modes without NaNs, if the two operands are equal, we know the
5012 result except if they have side-effects. Even with NaNs we know
5013 the result of unordered comparisons and, if signaling NaNs are
5014 irrelevant, also the result of LT/GT/LTGT. */
5015 if ((! HONOR_NANS (GET_MODE (trueop0
))
5016 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
5017 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
5018 && ! HONOR_SNANS (GET_MODE (trueop0
))))
5019 && rtx_equal_p (trueop0
, trueop1
)
5020 && ! side_effects_p (trueop0
))
5021 return comparison_result (code
, CMP_EQ
);
5023 /* If the operands are floating-point constants, see if we can fold
5025 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
5026 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
5027 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
5029 REAL_VALUE_TYPE d0
, d1
;
5031 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
5032 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
5034 /* Comparisons are unordered iff at least one of the values is NaN. */
5035 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
5045 return const_true_rtx
;
5058 return comparison_result (code
,
5059 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
5060 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
5063 /* Otherwise, see if the operands are both integers. */
5064 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
5065 && (CONST_DOUBLE_AS_INT_P (trueop0
) || CONST_INT_P (trueop0
))
5066 && (CONST_DOUBLE_AS_INT_P (trueop1
) || CONST_INT_P (trueop1
)))
5068 int width
= GET_MODE_PRECISION (mode
);
5069 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
5070 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
5072 /* Get the two words comprising each integer constant. */
5073 if (CONST_DOUBLE_AS_INT_P (trueop0
))
5075 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
5076 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
5080 l0u
= l0s
= INTVAL (trueop0
);
5081 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
5084 if (CONST_DOUBLE_AS_INT_P (trueop1
))
5086 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
5087 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
5091 l1u
= l1s
= INTVAL (trueop1
);
5092 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
5095 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
5096 we have to sign or zero-extend the values. */
5097 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
5099 l0u
&= GET_MODE_MASK (mode
);
5100 l1u
&= GET_MODE_MASK (mode
);
5102 if (val_signbit_known_set_p (mode
, l0s
))
5103 l0s
|= ~GET_MODE_MASK (mode
);
5105 if (val_signbit_known_set_p (mode
, l1s
))
5106 l1s
|= ~GET_MODE_MASK (mode
);
5108 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
5109 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
5111 if (h0u
== h1u
&& l0u
== l1u
)
5112 return comparison_result (code
, CMP_EQ
);
5116 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
5117 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
5118 return comparison_result (code
, cr
);
5122 /* Optimize comparisons with upper and lower bounds. */
5123 if (HWI_COMPUTABLE_MODE_P (mode
)
5124 && CONST_INT_P (trueop1
))
5127 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
5128 HOST_WIDE_INT val
= INTVAL (trueop1
);
5129 HOST_WIDE_INT mmin
, mmax
;
5139 /* Get a reduced range if the sign bit is zero. */
5140 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
5147 rtx mmin_rtx
, mmax_rtx
;
5148 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
5150 mmin
= INTVAL (mmin_rtx
);
5151 mmax
= INTVAL (mmax_rtx
);
5154 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
5156 mmin
>>= (sign_copies
- 1);
5157 mmax
>>= (sign_copies
- 1);
5163 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5165 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5166 return const_true_rtx
;
5167 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5172 return const_true_rtx
;
5177 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5179 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5180 return const_true_rtx
;
5181 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5186 return const_true_rtx
;
5192 /* x == y is always false for y out of range. */
5193 if (val
< mmin
|| val
> mmax
)
5197 /* x > y is always false for y >= mmax, always true for y < mmin. */
5199 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5201 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5202 return const_true_rtx
;
5208 return const_true_rtx
;
5211 /* x < y is always false for y <= mmin, always true for y > mmax. */
5213 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5215 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5216 return const_true_rtx
;
5222 return const_true_rtx
;
5226 /* x != y is always true for y out of range. */
5227 if (val
< mmin
|| val
> mmax
)
5228 return const_true_rtx
;
5236 /* Optimize integer comparisons with zero. */
5237 if (trueop1
== const0_rtx
)
5239 /* Some addresses are known to be nonzero. We don't know
5240 their sign, but equality comparisons are known. */
5241 if (nonzero_address_p (trueop0
))
5243 if (code
== EQ
|| code
== LEU
)
5245 if (code
== NE
|| code
== GTU
)
5246 return const_true_rtx
;
5249 /* See if the first operand is an IOR with a constant. If so, we
5250 may be able to determine the result of this comparison. */
5251 if (GET_CODE (op0
) == IOR
)
5253 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5254 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5256 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5257 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5258 && (UINTVAL (inner_const
)
5259 & ((unsigned HOST_WIDE_INT
) 1
5269 return const_true_rtx
;
5273 return const_true_rtx
;
5287 /* Optimize comparison of ABS with zero. */
5288 if (trueop1
== CONST0_RTX (mode
)
5289 && (GET_CODE (trueop0
) == ABS
5290 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5291 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5296 /* Optimize abs(x) < 0.0. */
5297 if (!HONOR_SNANS (mode
)
5298 && (!INTEGRAL_MODE_P (mode
)
5299 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5301 if (INTEGRAL_MODE_P (mode
)
5302 && (issue_strict_overflow_warning
5303 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5304 warning (OPT_Wstrict_overflow
,
5305 ("assuming signed overflow does not occur when "
5306 "assuming abs (x) < 0 is false"));
5312 /* Optimize abs(x) >= 0.0. */
5313 if (!HONOR_NANS (mode
)
5314 && (!INTEGRAL_MODE_P (mode
)
5315 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5317 if (INTEGRAL_MODE_P (mode
)
5318 && (issue_strict_overflow_warning
5319 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5320 warning (OPT_Wstrict_overflow
,
5321 ("assuming signed overflow does not occur when "
5322 "assuming abs (x) >= 0 is true"));
5323 return const_true_rtx
;
5328 /* Optimize ! (abs(x) < 0.0). */
5329 return const_true_rtx
;
5339 /* Simplify CODE, an operation with result mode MODE and three operands,
5340 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5341 a constant. Return 0 if no simplifications is possible. */
5344 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
5345 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
5348 unsigned int width
= GET_MODE_PRECISION (mode
);
5349 bool any_change
= false;
5352 /* VOIDmode means "infinite" precision. */
5354 width
= HOST_BITS_PER_WIDE_INT
;
5359 /* Simplify negations around the multiplication. */
5360 /* -a * -b + c => a * b + c. */
5361 if (GET_CODE (op0
) == NEG
)
5363 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5365 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5367 else if (GET_CODE (op1
) == NEG
)
5369 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5371 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5374 /* Canonicalize the two multiplication operands. */
5375 /* a * -b + c => -b * a + c. */
5376 if (swap_commutative_operands_p (op0
, op1
))
5377 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
5380 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5385 if (CONST_INT_P (op0
)
5386 && CONST_INT_P (op1
)
5387 && CONST_INT_P (op2
)
5388 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5389 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5391 /* Extracting a bit-field from a constant */
5392 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5393 HOST_WIDE_INT op1val
= INTVAL (op1
);
5394 HOST_WIDE_INT op2val
= INTVAL (op2
);
5395 if (BITS_BIG_ENDIAN
)
5396 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5400 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5402 /* First zero-extend. */
5403 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5404 /* If desired, propagate sign bit. */
5405 if (code
== SIGN_EXTRACT
5406 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5408 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5411 return gen_int_mode (val
, mode
);
5416 if (CONST_INT_P (op0
))
5417 return op0
!= const0_rtx
? op1
: op2
;
5419 /* Convert c ? a : a into "a". */
5420 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5423 /* Convert a != b ? a : b into "a". */
5424 if (GET_CODE (op0
) == NE
5425 && ! side_effects_p (op0
)
5426 && ! HONOR_NANS (mode
)
5427 && ! HONOR_SIGNED_ZEROS (mode
)
5428 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5429 && rtx_equal_p (XEXP (op0
, 1), op2
))
5430 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5431 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5434 /* Convert a == b ? a : b into "b". */
5435 if (GET_CODE (op0
) == EQ
5436 && ! side_effects_p (op0
)
5437 && ! HONOR_NANS (mode
)
5438 && ! HONOR_SIGNED_ZEROS (mode
)
5439 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5440 && rtx_equal_p (XEXP (op0
, 1), op2
))
5441 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5442 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5445 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5447 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5448 ? GET_MODE (XEXP (op0
, 1))
5449 : GET_MODE (XEXP (op0
, 0)));
5452 /* Look for happy constants in op1 and op2. */
5453 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5455 HOST_WIDE_INT t
= INTVAL (op1
);
5456 HOST_WIDE_INT f
= INTVAL (op2
);
5458 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5459 code
= GET_CODE (op0
);
5460 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5463 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5471 return simplify_gen_relational (code
, mode
, cmp_mode
,
5472 XEXP (op0
, 0), XEXP (op0
, 1));
5475 if (cmp_mode
== VOIDmode
)
5476 cmp_mode
= op0_mode
;
5477 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5478 cmp_mode
, XEXP (op0
, 0),
5481 /* See if any simplifications were possible. */
5484 if (CONST_INT_P (temp
))
5485 return temp
== const0_rtx
? op2
: op1
;
5487 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5493 gcc_assert (GET_MODE (op0
) == mode
);
5494 gcc_assert (GET_MODE (op1
) == mode
);
5495 gcc_assert (VECTOR_MODE_P (mode
));
5496 trueop2
= avoid_constant_pool_reference (op2
);
5497 if (CONST_INT_P (trueop2
))
5499 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5500 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5501 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5502 unsigned HOST_WIDE_INT mask
;
5503 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5506 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5508 if (!(sel
& mask
) && !side_effects_p (op0
))
5510 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5513 rtx trueop0
= avoid_constant_pool_reference (op0
);
5514 rtx trueop1
= avoid_constant_pool_reference (op1
);
5515 if (GET_CODE (trueop0
) == CONST_VECTOR
5516 && GET_CODE (trueop1
) == CONST_VECTOR
)
5518 rtvec v
= rtvec_alloc (n_elts
);
5521 for (i
= 0; i
< n_elts
; i
++)
5522 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5523 ? CONST_VECTOR_ELT (trueop0
, i
)
5524 : CONST_VECTOR_ELT (trueop1
, i
));
5525 return gen_rtx_CONST_VECTOR (mode
, v
);
5528 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5529 if no element from a appears in the result. */
5530 if (GET_CODE (op0
) == VEC_MERGE
)
5532 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5533 if (CONST_INT_P (tem
))
5535 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5536 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5537 return simplify_gen_ternary (code
, mode
, mode
,
5538 XEXP (op0
, 1), op1
, op2
);
5539 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5540 return simplify_gen_ternary (code
, mode
, mode
,
5541 XEXP (op0
, 0), op1
, op2
);
5544 if (GET_CODE (op1
) == VEC_MERGE
)
5546 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5547 if (CONST_INT_P (tem
))
5549 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5550 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5551 return simplify_gen_ternary (code
, mode
, mode
,
5552 op0
, XEXP (op1
, 1), op2
);
5553 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5554 return simplify_gen_ternary (code
, mode
, mode
,
5555 op0
, XEXP (op1
, 0), op2
);
5560 if (rtx_equal_p (op0
, op1
)
5561 && !side_effects_p (op2
) && !side_effects_p (op1
))
5573 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5575 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5577 Works by unpacking OP into a collection of 8-bit values
5578 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5579 and then repacking them again for OUTERMODE. */
5582 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5583 enum machine_mode innermode
, unsigned int byte
)
5585 /* We support up to 512-bit values (for V8DFmode). */
5589 value_mask
= (1 << value_bit
) - 1
5591 unsigned char value
[max_bitsize
/ value_bit
];
5600 rtvec result_v
= NULL
;
5601 enum mode_class outer_class
;
5602 enum machine_mode outer_submode
;
5604 /* Some ports misuse CCmode. */
5605 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5608 /* We have no way to represent a complex constant at the rtl level. */
5609 if (COMPLEX_MODE_P (outermode
))
5612 /* Unpack the value. */
5614 if (GET_CODE (op
) == CONST_VECTOR
)
5616 num_elem
= CONST_VECTOR_NUNITS (op
);
5617 elems
= &CONST_VECTOR_ELT (op
, 0);
5618 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5624 elem_bitsize
= max_bitsize
;
5626 /* If this asserts, it is too complicated; reducing value_bit may help. */
5627 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5628 /* I don't know how to handle endianness of sub-units. */
5629 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5631 for (elem
= 0; elem
< num_elem
; elem
++)
5634 rtx el
= elems
[elem
];
5636 /* Vectors are kept in target memory order. (This is probably
5639 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5640 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5642 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5643 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5644 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5645 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5646 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5649 switch (GET_CODE (el
))
5653 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5655 *vp
++ = INTVAL (el
) >> i
;
5656 /* CONST_INTs are always logically sign-extended. */
5657 for (; i
< elem_bitsize
; i
+= value_bit
)
5658 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5662 if (GET_MODE (el
) == VOIDmode
)
5664 unsigned char extend
= 0;
5665 /* If this triggers, someone should have generated a
5666 CONST_INT instead. */
5667 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5669 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5670 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5671 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5674 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5678 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5680 for (; i
< elem_bitsize
; i
+= value_bit
)
5685 long tmp
[max_bitsize
/ 32];
5686 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5688 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5689 gcc_assert (bitsize
<= elem_bitsize
);
5690 gcc_assert (bitsize
% value_bit
== 0);
5692 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5695 /* real_to_target produces its result in words affected by
5696 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5697 and use WORDS_BIG_ENDIAN instead; see the documentation
5698 of SUBREG in rtl.texi. */
5699 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5702 if (WORDS_BIG_ENDIAN
)
5703 ibase
= bitsize
- 1 - i
;
5706 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5709 /* It shouldn't matter what's done here, so fill it with
5711 for (; i
< elem_bitsize
; i
+= value_bit
)
5717 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5719 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5720 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5724 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5725 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5726 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5728 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5729 >> (i
- HOST_BITS_PER_WIDE_INT
);
5730 for (; i
< elem_bitsize
; i
+= value_bit
)
5740 /* Now, pick the right byte to start with. */
5741 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5742 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5743 will already have offset 0. */
5744 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5746 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5748 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5749 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5750 byte
= (subword_byte
% UNITS_PER_WORD
5751 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5754 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5755 so if it's become negative it will instead be very large.) */
5756 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5758 /* Convert from bytes to chunks of size value_bit. */
5759 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5761 /* Re-pack the value. */
5763 if (VECTOR_MODE_P (outermode
))
5765 num_elem
= GET_MODE_NUNITS (outermode
);
5766 result_v
= rtvec_alloc (num_elem
);
5767 elems
= &RTVEC_ELT (result_v
, 0);
5768 outer_submode
= GET_MODE_INNER (outermode
);
5774 outer_submode
= outermode
;
5777 outer_class
= GET_MODE_CLASS (outer_submode
);
5778 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5780 gcc_assert (elem_bitsize
% value_bit
== 0);
5781 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5783 for (elem
= 0; elem
< num_elem
; elem
++)
5787 /* Vectors are stored in target memory order. (This is probably
5790 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5791 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5793 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5794 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5795 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5796 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5797 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5800 switch (outer_class
)
5803 case MODE_PARTIAL_INT
:
5805 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5808 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5810 lo
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5811 for (; i
< elem_bitsize
; i
+= value_bit
)
5812 hi
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5813 << (i
- HOST_BITS_PER_WIDE_INT
);
5815 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5817 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5818 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5819 else if (elem_bitsize
<= HOST_BITS_PER_DOUBLE_INT
)
5820 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5827 case MODE_DECIMAL_FLOAT
:
5830 long tmp
[max_bitsize
/ 32];
5832 /* real_from_target wants its input in words affected by
5833 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5834 and use WORDS_BIG_ENDIAN instead; see the documentation
5835 of SUBREG in rtl.texi. */
5836 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5838 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5841 if (WORDS_BIG_ENDIAN
)
5842 ibase
= elem_bitsize
- 1 - i
;
5845 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5848 real_from_target (&r
, tmp
, outer_submode
);
5849 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5861 f
.mode
= outer_submode
;
5864 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5866 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5867 for (; i
< elem_bitsize
; i
+= value_bit
)
5868 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5869 << (i
- HOST_BITS_PER_WIDE_INT
));
5871 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5879 if (VECTOR_MODE_P (outermode
))
5880 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5885 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5886 Return 0 if no simplifications are possible. */
5888 simplify_subreg (enum machine_mode outermode
, rtx op
,
5889 enum machine_mode innermode
, unsigned int byte
)
5891 /* Little bit of sanity checking. */
5892 gcc_assert (innermode
!= VOIDmode
);
5893 gcc_assert (outermode
!= VOIDmode
);
5894 gcc_assert (innermode
!= BLKmode
);
5895 gcc_assert (outermode
!= BLKmode
);
5897 gcc_assert (GET_MODE (op
) == innermode
5898 || GET_MODE (op
) == VOIDmode
);
5900 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5903 if (byte
>= GET_MODE_SIZE (innermode
))
5906 if (outermode
== innermode
&& !byte
)
5909 if (CONST_SCALAR_INT_P (op
)
5910 || CONST_DOUBLE_AS_FLOAT_P (op
)
5911 || GET_CODE (op
) == CONST_FIXED
5912 || GET_CODE (op
) == CONST_VECTOR
)
5913 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5915 /* Changing mode twice with SUBREG => just change it once,
5916 or not at all if changing back op starting mode. */
5917 if (GET_CODE (op
) == SUBREG
)
5919 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5920 int final_offset
= byte
+ SUBREG_BYTE (op
);
5923 if (outermode
== innermostmode
5924 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5925 return SUBREG_REG (op
);
5927 /* The SUBREG_BYTE represents offset, as if the value were stored
5928 in memory. Irritating exception is paradoxical subreg, where
5929 we define SUBREG_BYTE to be 0. On big endian machines, this
5930 value should be negative. For a moment, undo this exception. */
5931 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5933 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5934 if (WORDS_BIG_ENDIAN
)
5935 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5936 if (BYTES_BIG_ENDIAN
)
5937 final_offset
+= difference
% UNITS_PER_WORD
;
5939 if (SUBREG_BYTE (op
) == 0
5940 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5942 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5943 if (WORDS_BIG_ENDIAN
)
5944 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5945 if (BYTES_BIG_ENDIAN
)
5946 final_offset
+= difference
% UNITS_PER_WORD
;
5949 /* See whether resulting subreg will be paradoxical. */
5950 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5952 /* In nonparadoxical subregs we can't handle negative offsets. */
5953 if (final_offset
< 0)
5955 /* Bail out in case resulting subreg would be incorrect. */
5956 if (final_offset
% GET_MODE_SIZE (outermode
)
5957 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5963 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5965 /* In paradoxical subreg, see if we are still looking on lower part.
5966 If so, our SUBREG_BYTE will be 0. */
5967 if (WORDS_BIG_ENDIAN
)
5968 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5969 if (BYTES_BIG_ENDIAN
)
5970 offset
+= difference
% UNITS_PER_WORD
;
5971 if (offset
== final_offset
)
5977 /* Recurse for further possible simplifications. */
5978 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5982 if (validate_subreg (outermode
, innermostmode
,
5983 SUBREG_REG (op
), final_offset
))
5985 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5986 if (SUBREG_PROMOTED_VAR_P (op
)
5987 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5988 && GET_MODE_CLASS (outermode
) == MODE_INT
5989 && IN_RANGE (GET_MODE_SIZE (outermode
),
5990 GET_MODE_SIZE (innermode
),
5991 GET_MODE_SIZE (innermostmode
))
5992 && subreg_lowpart_p (newx
))
5994 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5995 SUBREG_PROMOTED_UNSIGNED_SET
5996 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
6003 /* SUBREG of a hard register => just change the register number
6004 and/or mode. If the hard register is not valid in that mode,
6005 suppress this simplification. If the hard register is the stack,
6006 frame, or argument pointer, leave this as a SUBREG. */
6008 if (REG_P (op
) && HARD_REGISTER_P (op
))
6010 unsigned int regno
, final_regno
;
6013 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
6014 if (HARD_REGISTER_NUM_P (final_regno
))
6017 int final_offset
= byte
;
6019 /* Adjust offset for paradoxical subregs. */
6021 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
6023 int difference
= (GET_MODE_SIZE (innermode
)
6024 - GET_MODE_SIZE (outermode
));
6025 if (WORDS_BIG_ENDIAN
)
6026 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
6027 if (BYTES_BIG_ENDIAN
)
6028 final_offset
+= difference
% UNITS_PER_WORD
;
6031 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
6033 /* Propagate original regno. We don't have any way to specify
6034 the offset inside original regno, so do so only for lowpart.
6035 The information is used only by alias analysis that can not
6036 grog partial register anyway. */
6038 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
6039 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
6044 /* If we have a SUBREG of a register that we are replacing and we are
6045 replacing it with a MEM, make a new MEM and try replacing the
6046 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6047 or if we would be widening it. */
6050 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
6051 /* Allow splitting of volatile memory references in case we don't
6052 have instruction to move the whole thing. */
6053 && (! MEM_VOLATILE_P (op
)
6054 || ! have_insn_for (SET
, innermode
))
6055 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
6056 return adjust_address_nv (op
, outermode
, byte
);
6058 /* Handle complex values represented as CONCAT
6059 of real and imaginary part. */
6060 if (GET_CODE (op
) == CONCAT
)
6062 unsigned int part_size
, final_offset
;
6065 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
6066 if (byte
< part_size
)
6068 part
= XEXP (op
, 0);
6069 final_offset
= byte
;
6073 part
= XEXP (op
, 1);
6074 final_offset
= byte
- part_size
;
6077 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
6080 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
6083 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
6084 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
6088 /* A SUBREG resulting from a zero extension may fold to zero if
6089 it extracts higher bits that the ZERO_EXTEND's source bits. */
6090 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6092 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6093 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
6094 return CONST0_RTX (outermode
);
6097 if (SCALAR_INT_MODE_P (outermode
)
6098 && SCALAR_INT_MODE_P (innermode
)
6099 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
6100 && byte
== subreg_lowpart_offset (outermode
, innermode
))
6102 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
6110 /* Make a SUBREG operation or equivalent if it folds. */
6113 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
6114 enum machine_mode innermode
, unsigned int byte
)
6118 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6122 if (GET_CODE (op
) == SUBREG
6123 || GET_CODE (op
) == CONCAT
6124 || GET_MODE (op
) == VOIDmode
)
6127 if (validate_subreg (outermode
, innermode
, op
, byte
))
6128 return gen_rtx_SUBREG (outermode
, op
, byte
);
6133 /* Simplify X, an rtx expression.
6135 Return the simplified expression or NULL if no simplifications
6138 This is the preferred entry point into the simplification routines;
6139 however, we still allow passes to call the more specific routines.
6141 Right now GCC has three (yes, three) major bodies of RTL simplification
6142 code that need to be unified.
6144 1. fold_rtx in cse.c. This code uses various CSE specific
6145 information to aid in RTL simplification.
6147 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6148 it uses combine specific information to aid in RTL
6151 3. The routines in this file.
6154 Long term we want to only have one body of simplification code; to
6155 get to that state I recommend the following steps:
6157 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6158 which are not pass dependent state into these routines.
6160 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6161 use this routine whenever possible.
6163 3. Allow for pass dependent state to be provided to these
6164 routines and add simplifications based on the pass dependent
6165 state. Remove code from cse.c & combine.c that becomes
6168 It will take time, but ultimately the compiler will be easier to
6169 maintain and improve. It's totally silly that when we add a
6170 simplification that it needs to be added to 4 places (3 for RTL
6171 simplification and 1 for tree simplification. */
6174 simplify_rtx (const_rtx x
)
6176 const enum rtx_code code
= GET_CODE (x
);
6177 const enum machine_mode mode
= GET_MODE (x
);
6179 switch (GET_RTX_CLASS (code
))
6182 return simplify_unary_operation (code
, mode
,
6183 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6184 case RTX_COMM_ARITH
:
6185 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6186 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6188 /* Fall through.... */
6191 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6194 case RTX_BITFIELD_OPS
:
6195 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6196 XEXP (x
, 0), XEXP (x
, 1),
6200 case RTX_COMM_COMPARE
:
6201 return simplify_relational_operation (code
, mode
,
6202 ((GET_MODE (XEXP (x
, 0))
6204 ? GET_MODE (XEXP (x
, 0))
6205 : GET_MODE (XEXP (x
, 1))),
6211 return simplify_subreg (mode
, SUBREG_REG (x
),
6212 GET_MODE (SUBREG_REG (x
)),
6219 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6220 if (GET_CODE (XEXP (x
, 0)) == HIGH
6221 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))