Update LOCAL_PATCHES after libsanitizer merge.
[official-gcc.git] / gcc / simplify-rtx.c
blob2ff68ceb4e335d5b4fb257da3ae7553c0eb29aed
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
48 static rtx neg_const_int (machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
65 if (!HWI_COMPUTABLE_MODE_P (mode)
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80 scalar_int_mode int_mode;
82 if (!is_int_mode (mode, &int_mode))
83 return false;
85 width = GET_MODE_PRECISION (int_mode);
86 if (width == 0)
87 return false;
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x))
95 unsigned int i;
96 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 return false;
99 for (i = 0; i < elts - 1; i++)
100 if (CONST_WIDE_INT_ELT (x, i) != 0)
101 return false;
102 val = CONST_WIDE_INT_ELT (x, elts - 1);
103 width %= HOST_BITS_PER_WIDE_INT;
104 if (width == 0)
105 width = HOST_BITS_PER_WIDE_INT;
107 #else
108 else if (width <= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x)
110 && CONST_DOUBLE_LOW (x) == 0)
112 val = CONST_DOUBLE_HIGH (x);
113 width -= HOST_BITS_PER_WIDE_INT;
115 #endif
116 else
117 /* X is not an integer constant. */
118 return false;
120 if (width < HOST_BITS_PER_WIDE_INT)
121 val &= (HOST_WIDE_INT_1U << width) - 1;
122 return val == (HOST_WIDE_INT_1U << (width - 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
129 bool
130 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
132 unsigned int width;
133 scalar_int_mode int_mode;
135 if (!is_int_mode (mode, &int_mode))
136 return false;
138 width = GET_MODE_PRECISION (int_mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
142 val &= GET_MODE_MASK (int_mode);
143 return val == (HOST_WIDE_INT_1U << (width - 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148 bool
149 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
151 unsigned int width;
153 scalar_int_mode int_mode;
154 if (!is_int_mode (mode, &int_mode))
155 return false;
157 width = GET_MODE_PRECISION (int_mode);
158 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159 return false;
161 val &= HOST_WIDE_INT_1U << (width - 1);
162 return val != 0;
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
167 bool
168 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
170 unsigned int width;
172 scalar_int_mode int_mode;
173 if (!is_int_mode (mode, &int_mode))
174 return false;
176 width = GET_MODE_PRECISION (int_mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
180 val &= HOST_WIDE_INT_1U << (width - 1);
181 return val == 0;
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
191 rtx tem;
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x)
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 poly_int64 offset = 0;
215 switch (GET_CODE (x))
217 case MEM:
218 break;
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
229 default:
230 return x;
233 if (GET_MODE (x) == BLKmode)
234 return x;
236 addr = XEXP (x, 0);
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
241 /* Split the address into a base and integer offset. */
242 addr = strip_offset (addr, &offset);
244 if (GET_CODE (addr) == LO_SUM)
245 addr = XEXP (addr, 1);
247 /* If this is a constant pool reference, we can turn it into its
248 constant and hope that simplifications happen. */
249 if (GET_CODE (addr) == SYMBOL_REF
250 && CONSTANT_POOL_ADDRESS_P (addr))
252 c = get_pool_constant (addr);
253 cmode = get_pool_mode (addr);
255 /* If we're accessing the constant in a different mode than it was
256 originally stored, attempt to fix that up via subreg simplifications.
257 If that fails we have no choice but to return the original memory. */
258 if (known_eq (offset, 0) && cmode == GET_MODE (x))
259 return c;
260 else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
262 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
263 if (tem && CONSTANT_P (tem))
264 return tem;
268 return x;
271 /* Simplify a MEM based on its attributes. This is the default
272 delegitimize_address target hook, and it's recommended that every
273 overrider call it. */
276 delegitimize_mem_from_attrs (rtx x)
278 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
279 use their base addresses as equivalent. */
280 if (MEM_P (x)
281 && MEM_EXPR (x)
282 && MEM_OFFSET_KNOWN_P (x))
284 tree decl = MEM_EXPR (x);
285 machine_mode mode = GET_MODE (x);
286 poly_int64 offset = 0;
288 switch (TREE_CODE (decl))
290 default:
291 decl = NULL;
292 break;
294 case VAR_DECL:
295 break;
297 case ARRAY_REF:
298 case ARRAY_RANGE_REF:
299 case COMPONENT_REF:
300 case BIT_FIELD_REF:
301 case REALPART_EXPR:
302 case IMAGPART_EXPR:
303 case VIEW_CONVERT_EXPR:
305 poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
306 tree toffset;
307 int unsignedp, reversep, volatilep = 0;
309 decl
310 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
311 &unsignedp, &reversep, &volatilep);
312 if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
313 || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
314 || (toffset && !poly_int_tree_p (toffset, &toffset_val)))
315 decl = NULL;
316 else
317 offset += bytepos + toffset_val;
318 break;
322 if (decl
323 && mode == GET_MODE (x)
324 && VAR_P (decl)
325 && (TREE_STATIC (decl)
326 || DECL_THREAD_LOCAL_P (decl))
327 && DECL_RTL_SET_P (decl)
328 && MEM_P (DECL_RTL (decl)))
330 rtx newx;
332 offset += MEM_OFFSET (x);
334 newx = DECL_RTL (decl);
336 if (MEM_P (newx))
338 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
339 poly_int64 n_offset, o_offset;
341 /* Avoid creating a new MEM needlessly if we already had
342 the same address. We do if there's no OFFSET and the
343 old address X is identical to NEWX, or if X is of the
344 form (plus NEWX OFFSET), or the NEWX is of the form
345 (plus Y (const_int Z)) and X is that with the offset
346 added: (plus Y (const_int Z+OFFSET)). */
347 n = strip_offset (n, &n_offset);
348 o = strip_offset (o, &o_offset);
349 if (!(known_eq (o_offset, n_offset + offset)
350 && rtx_equal_p (o, n)))
351 x = adjust_address_nv (newx, mode, offset);
353 else if (GET_MODE (x) == GET_MODE (newx)
354 && known_eq (offset, 0))
355 x = newx;
359 return x;
362 /* Make a unary operation by first seeing if it folds and otherwise making
363 the specified operation. */
366 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
367 machine_mode op_mode)
369 rtx tem;
371 /* If this simplifies, use it. */
372 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
373 return tem;
375 return gen_rtx_fmt_e (code, mode, op);
378 /* Likewise for ternary operations. */
381 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
382 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
384 rtx tem;
386 /* If this simplifies, use it. */
387 if ((tem = simplify_ternary_operation (code, mode, op0_mode,
388 op0, op1, op2)) != 0)
389 return tem;
391 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
394 /* Likewise, for relational operations.
395 CMP_MODE specifies mode comparison is done in. */
398 simplify_gen_relational (enum rtx_code code, machine_mode mode,
399 machine_mode cmp_mode, rtx op0, rtx op1)
401 rtx tem;
403 if ((tem = simplify_relational_operation (code, mode, cmp_mode,
404 op0, op1)) != 0)
405 return tem;
407 return gen_rtx_fmt_ee (code, mode, op0, op1);
410 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
411 and simplify the result. If FN is non-NULL, call this callback on each
412 X, if it returns non-NULL, replace X with its return value and simplify the
413 result. */
416 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
417 rtx (*fn) (rtx, const_rtx, void *), void *data)
419 enum rtx_code code = GET_CODE (x);
420 machine_mode mode = GET_MODE (x);
421 machine_mode op_mode;
422 const char *fmt;
423 rtx op0, op1, op2, newx, op;
424 rtvec vec, newvec;
425 int i, j;
427 if (__builtin_expect (fn != NULL, 0))
429 newx = fn (x, old_rtx, data);
430 if (newx)
431 return newx;
433 else if (rtx_equal_p (x, old_rtx))
434 return copy_rtx ((rtx) data);
436 switch (GET_RTX_CLASS (code))
438 case RTX_UNARY:
439 op0 = XEXP (x, 0);
440 op_mode = GET_MODE (op0);
441 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
442 if (op0 == XEXP (x, 0))
443 return x;
444 return simplify_gen_unary (code, mode, op0, op_mode);
446 case RTX_BIN_ARITH:
447 case RTX_COMM_ARITH:
448 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
449 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
450 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
451 return x;
452 return simplify_gen_binary (code, mode, op0, op1);
454 case RTX_COMPARE:
455 case RTX_COMM_COMPARE:
456 op0 = XEXP (x, 0);
457 op1 = XEXP (x, 1);
458 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
459 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
460 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
461 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
462 return x;
463 return simplify_gen_relational (code, mode, op_mode, op0, op1);
465 case RTX_TERNARY:
466 case RTX_BITFIELD_OPS:
467 op0 = XEXP (x, 0);
468 op_mode = GET_MODE (op0);
469 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
470 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
471 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
472 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
473 return x;
474 if (op_mode == VOIDmode)
475 op_mode = GET_MODE (op0);
476 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
478 case RTX_EXTRA:
479 if (code == SUBREG)
481 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
482 if (op0 == SUBREG_REG (x))
483 return x;
484 op0 = simplify_gen_subreg (GET_MODE (x), op0,
485 GET_MODE (SUBREG_REG (x)),
486 SUBREG_BYTE (x));
487 return op0 ? op0 : x;
489 break;
491 case RTX_OBJ:
492 if (code == MEM)
494 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
495 if (op0 == XEXP (x, 0))
496 return x;
497 return replace_equiv_address_nv (x, op0);
499 else if (code == LO_SUM)
501 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
502 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
504 /* (lo_sum (high x) y) -> y where x and y have the same base. */
505 if (GET_CODE (op0) == HIGH)
507 rtx base0, base1, offset0, offset1;
508 split_const (XEXP (op0, 0), &base0, &offset0);
509 split_const (op1, &base1, &offset1);
510 if (rtx_equal_p (base0, base1))
511 return op1;
514 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
515 return x;
516 return gen_rtx_LO_SUM (mode, op0, op1);
518 break;
520 default:
521 break;
524 newx = x;
525 fmt = GET_RTX_FORMAT (code);
526 for (i = 0; fmt[i]; i++)
527 switch (fmt[i])
529 case 'E':
530 vec = XVEC (x, i);
531 newvec = XVEC (newx, i);
532 for (j = 0; j < GET_NUM_ELEM (vec); j++)
534 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
535 old_rtx, fn, data);
536 if (op != RTVEC_ELT (vec, j))
538 if (newvec == vec)
540 newvec = shallow_copy_rtvec (vec);
541 if (x == newx)
542 newx = shallow_copy_rtx (x);
543 XVEC (newx, i) = newvec;
545 RTVEC_ELT (newvec, j) = op;
548 break;
550 case 'e':
551 if (XEXP (x, i))
553 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
554 if (op != XEXP (x, i))
556 if (x == newx)
557 newx = shallow_copy_rtx (x);
558 XEXP (newx, i) = op;
561 break;
563 return newx;
566 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
567 resulting RTX. Return a new RTX which is as simplified as possible. */
570 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
572 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
575 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
576 Only handle cases where the truncated value is inherently an rvalue.
578 RTL provides two ways of truncating a value:
580 1. a lowpart subreg. This form is only a truncation when both
581 the outer and inner modes (here MODE and OP_MODE respectively)
582 are scalar integers, and only then when the subreg is used as
583 an rvalue.
585 It is only valid to form such truncating subregs if the
586 truncation requires no action by the target. The onus for
587 proving this is on the creator of the subreg -- e.g. the
588 caller to simplify_subreg or simplify_gen_subreg -- and typically
589 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
591 2. a TRUNCATE. This form handles both scalar and compound integers.
593 The first form is preferred where valid. However, the TRUNCATE
594 handling in simplify_unary_operation turns the second form into the
595 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
596 so it is generally safe to form rvalue truncations using:
598 simplify_gen_unary (TRUNCATE, ...)
600 and leave simplify_unary_operation to work out which representation
601 should be used.
603 Because of the proof requirements on (1), simplify_truncation must
604 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
605 regardless of whether the outer truncation came from a SUBREG or a
606 TRUNCATE. For example, if the caller has proven that an SImode
607 truncation of:
609 (and:DI X Y)
611 is a no-op and can be represented as a subreg, it does not follow
612 that SImode truncations of X and Y are also no-ops. On a target
613 like 64-bit MIPS that requires SImode values to be stored in
614 sign-extended form, an SImode truncation of:
616 (and:DI (reg:DI X) (const_int 63))
618 is trivially a no-op because only the lower 6 bits can be set.
619 However, X is still an arbitrary 64-bit number and so we cannot
620 assume that truncating it too is a no-op. */
622 static rtx
623 simplify_truncation (machine_mode mode, rtx op,
624 machine_mode op_mode)
626 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
627 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
628 scalar_int_mode int_mode, int_op_mode, subreg_mode;
630 gcc_assert (precision <= op_precision);
632 /* Optimize truncations of zero and sign extended values. */
633 if (GET_CODE (op) == ZERO_EXTEND
634 || GET_CODE (op) == SIGN_EXTEND)
636 /* There are three possibilities. If MODE is the same as the
637 origmode, we can omit both the extension and the subreg.
638 If MODE is not larger than the origmode, we can apply the
639 truncation without the extension. Finally, if the outermode
640 is larger than the origmode, we can just extend to the appropriate
641 mode. */
642 machine_mode origmode = GET_MODE (XEXP (op, 0));
643 if (mode == origmode)
644 return XEXP (op, 0);
645 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
646 return simplify_gen_unary (TRUNCATE, mode,
647 XEXP (op, 0), origmode);
648 else
649 return simplify_gen_unary (GET_CODE (op), mode,
650 XEXP (op, 0), origmode);
653 /* If the machine can perform operations in the truncated mode, distribute
654 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
655 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
656 if (1
657 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
658 && (GET_CODE (op) == PLUS
659 || GET_CODE (op) == MINUS
660 || GET_CODE (op) == MULT))
662 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
663 if (op0)
665 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
666 if (op1)
667 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
671 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
672 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
673 the outer subreg is effectively a truncation to the original mode. */
674 if ((GET_CODE (op) == LSHIFTRT
675 || GET_CODE (op) == ASHIFTRT)
676 /* Ensure that OP_MODE is at least twice as wide as MODE
677 to avoid the possibility that an outer LSHIFTRT shifts by more
678 than the sign extension's sign_bit_copies and introduces zeros
679 into the high bits of the result. */
680 && 2 * precision <= op_precision
681 && CONST_INT_P (XEXP (op, 1))
682 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
683 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
684 && UINTVAL (XEXP (op, 1)) < precision)
685 return simplify_gen_binary (ASHIFTRT, mode,
686 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
688 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
689 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
690 the outer subreg is effectively a truncation to the original mode. */
691 if ((GET_CODE (op) == LSHIFTRT
692 || GET_CODE (op) == ASHIFTRT)
693 && CONST_INT_P (XEXP (op, 1))
694 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
695 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
696 && UINTVAL (XEXP (op, 1)) < precision)
697 return simplify_gen_binary (LSHIFTRT, mode,
698 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
700 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
701 to (ashift:QI (x:QI) C), where C is a suitable small constant and
702 the outer subreg is effectively a truncation to the original mode. */
703 if (GET_CODE (op) == ASHIFT
704 && CONST_INT_P (XEXP (op, 1))
705 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
706 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
707 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
708 && UINTVAL (XEXP (op, 1)) < precision)
709 return simplify_gen_binary (ASHIFT, mode,
710 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
712 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
713 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
714 and C2. */
715 if (GET_CODE (op) == AND
716 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
717 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
718 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
719 && CONST_INT_P (XEXP (op, 1)))
721 rtx op0 = (XEXP (XEXP (op, 0), 0));
722 rtx shift_op = XEXP (XEXP (op, 0), 1);
723 rtx mask_op = XEXP (op, 1);
724 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
725 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
727 if (shift < precision
728 /* If doing this transform works for an X with all bits set,
729 it works for any X. */
730 && ((GET_MODE_MASK (mode) >> shift) & mask)
731 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
732 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
733 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
735 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
736 return simplify_gen_binary (AND, mode, op0, mask_op);
740 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
741 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
742 changing len. */
743 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
744 && REG_P (XEXP (op, 0))
745 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
746 && CONST_INT_P (XEXP (op, 1))
747 && CONST_INT_P (XEXP (op, 2)))
749 rtx op0 = XEXP (op, 0);
750 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
751 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
752 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
754 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
755 if (op0)
757 pos -= op_precision - precision;
758 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
759 XEXP (op, 1), GEN_INT (pos));
762 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
764 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
765 if (op0)
766 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
767 XEXP (op, 1), XEXP (op, 2));
771 /* Recognize a word extraction from a multi-word subreg. */
772 if ((GET_CODE (op) == LSHIFTRT
773 || GET_CODE (op) == ASHIFTRT)
774 && SCALAR_INT_MODE_P (mode)
775 && SCALAR_INT_MODE_P (op_mode)
776 && precision >= BITS_PER_WORD
777 && 2 * precision <= op_precision
778 && CONST_INT_P (XEXP (op, 1))
779 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
780 && UINTVAL (XEXP (op, 1)) < op_precision)
782 poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
783 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
784 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
785 (WORDS_BIG_ENDIAN
786 ? byte - shifted_bytes
787 : byte + shifted_bytes));
790 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
791 and try replacing the TRUNCATE and shift with it. Don't do this
792 if the MEM has a mode-dependent address. */
793 if ((GET_CODE (op) == LSHIFTRT
794 || GET_CODE (op) == ASHIFTRT)
795 && is_a <scalar_int_mode> (mode, &int_mode)
796 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
797 && MEM_P (XEXP (op, 0))
798 && CONST_INT_P (XEXP (op, 1))
799 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
800 && INTVAL (XEXP (op, 1)) > 0
801 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
802 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
803 MEM_ADDR_SPACE (XEXP (op, 0)))
804 && ! MEM_VOLATILE_P (XEXP (op, 0))
805 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
806 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
808 poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
809 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
810 return adjust_address_nv (XEXP (op, 0), int_mode,
811 (WORDS_BIG_ENDIAN
812 ? byte - shifted_bytes
813 : byte + shifted_bytes));
816 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
817 (OP:SI foo:SI) if OP is NEG or ABS. */
818 if ((GET_CODE (op) == ABS
819 || GET_CODE (op) == NEG)
820 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
821 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
822 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
823 return simplify_gen_unary (GET_CODE (op), mode,
824 XEXP (XEXP (op, 0), 0), mode);
826 /* (truncate:A (subreg:B (truncate:C X) 0)) is
827 (truncate:A X). */
828 if (GET_CODE (op) == SUBREG
829 && is_a <scalar_int_mode> (mode, &int_mode)
830 && SCALAR_INT_MODE_P (op_mode)
831 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
832 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
833 && subreg_lowpart_p (op))
835 rtx inner = XEXP (SUBREG_REG (op), 0);
836 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
837 return simplify_gen_unary (TRUNCATE, int_mode, inner,
838 GET_MODE (inner));
839 else
840 /* If subreg above is paradoxical and C is narrower
841 than A, return (subreg:A (truncate:C X) 0). */
842 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
845 /* (truncate:A (truncate:B X)) is (truncate:A X). */
846 if (GET_CODE (op) == TRUNCATE)
847 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
848 GET_MODE (XEXP (op, 0)));
850 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
851 in mode A. */
852 if (GET_CODE (op) == IOR
853 && SCALAR_INT_MODE_P (mode)
854 && SCALAR_INT_MODE_P (op_mode)
855 && CONST_INT_P (XEXP (op, 1))
856 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
857 return constm1_rtx;
859 return NULL_RTX;
862 /* Try to simplify a unary operation CODE whose output mode is to be
863 MODE with input operand OP whose mode was originally OP_MODE.
864 Return zero if no simplification can be made. */
866 simplify_unary_operation (enum rtx_code code, machine_mode mode,
867 rtx op, machine_mode op_mode)
869 rtx trueop, tem;
871 trueop = avoid_constant_pool_reference (op);
873 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
874 if (tem)
875 return tem;
877 return simplify_unary_operation_1 (code, mode, op);
880 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
881 to be exact. */
883 static bool
884 exact_int_to_float_conversion_p (const_rtx op)
886 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
887 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
888 /* Constants shouldn't reach here. */
889 gcc_assert (op0_mode != VOIDmode);
890 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
891 int in_bits = in_prec;
892 if (HWI_COMPUTABLE_MODE_P (op0_mode))
894 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
895 if (GET_CODE (op) == FLOAT)
896 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
897 else if (GET_CODE (op) == UNSIGNED_FLOAT)
898 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
899 else
900 gcc_unreachable ();
901 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
903 return in_bits <= out_bits;
906 /* Perform some simplifications we can do even if the operands
907 aren't constant. */
908 static rtx
909 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
911 enum rtx_code reversed;
912 rtx temp, elt, base, step;
913 scalar_int_mode inner, int_mode, op_mode, op0_mode;
915 switch (code)
917 case NOT:
918 /* (not (not X)) == X. */
919 if (GET_CODE (op) == NOT)
920 return XEXP (op, 0);
922 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
923 comparison is all ones. */
924 if (COMPARISON_P (op)
925 && (mode == BImode || STORE_FLAG_VALUE == -1)
926 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
927 return simplify_gen_relational (reversed, mode, VOIDmode,
928 XEXP (op, 0), XEXP (op, 1));
930 /* (not (plus X -1)) can become (neg X). */
931 if (GET_CODE (op) == PLUS
932 && XEXP (op, 1) == constm1_rtx)
933 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
935 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
936 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
937 and MODE_VECTOR_INT. */
938 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
939 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
940 CONSTM1_RTX (mode));
942 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
943 if (GET_CODE (op) == XOR
944 && CONST_INT_P (XEXP (op, 1))
945 && (temp = simplify_unary_operation (NOT, mode,
946 XEXP (op, 1), mode)) != 0)
947 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
949 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
950 if (GET_CODE (op) == PLUS
951 && CONST_INT_P (XEXP (op, 1))
952 && mode_signbit_p (mode, XEXP (op, 1))
953 && (temp = simplify_unary_operation (NOT, mode,
954 XEXP (op, 1), mode)) != 0)
955 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
958 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
959 operands other than 1, but that is not valid. We could do a
960 similar simplification for (not (lshiftrt C X)) where C is
961 just the sign bit, but this doesn't seem common enough to
962 bother with. */
963 if (GET_CODE (op) == ASHIFT
964 && XEXP (op, 0) == const1_rtx)
966 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
967 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
970 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
971 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
972 so we can perform the above simplification. */
973 if (STORE_FLAG_VALUE == -1
974 && is_a <scalar_int_mode> (mode, &int_mode)
975 && GET_CODE (op) == ASHIFTRT
976 && CONST_INT_P (XEXP (op, 1))
977 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
978 return simplify_gen_relational (GE, int_mode, VOIDmode,
979 XEXP (op, 0), const0_rtx);
982 if (partial_subreg_p (op)
983 && subreg_lowpart_p (op)
984 && GET_CODE (SUBREG_REG (op)) == ASHIFT
985 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
987 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
988 rtx x;
990 x = gen_rtx_ROTATE (inner_mode,
991 simplify_gen_unary (NOT, inner_mode, const1_rtx,
992 inner_mode),
993 XEXP (SUBREG_REG (op), 1));
994 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
995 if (temp)
996 return temp;
999 /* Apply De Morgan's laws to reduce number of patterns for machines
1000 with negating logical insns (and-not, nand, etc.). If result has
1001 only one NOT, put it first, since that is how the patterns are
1002 coded. */
1003 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1005 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1006 machine_mode op_mode;
1008 op_mode = GET_MODE (in1);
1009 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1011 op_mode = GET_MODE (in2);
1012 if (op_mode == VOIDmode)
1013 op_mode = mode;
1014 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1016 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1017 std::swap (in1, in2);
1019 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1020 mode, in1, in2);
1023 /* (not (bswap x)) -> (bswap (not x)). */
1024 if (GET_CODE (op) == BSWAP)
1026 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1027 return simplify_gen_unary (BSWAP, mode, x, mode);
1029 break;
1031 case NEG:
1032 /* (neg (neg X)) == X. */
1033 if (GET_CODE (op) == NEG)
1034 return XEXP (op, 0);
1036 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1037 If comparison is not reversible use
1038 x ? y : (neg y). */
1039 if (GET_CODE (op) == IF_THEN_ELSE)
1041 rtx cond = XEXP (op, 0);
1042 rtx true_rtx = XEXP (op, 1);
1043 rtx false_rtx = XEXP (op, 2);
1045 if ((GET_CODE (true_rtx) == NEG
1046 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1047 || (GET_CODE (false_rtx) == NEG
1048 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1050 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1051 temp = reversed_comparison (cond, mode);
1052 else
1054 temp = cond;
1055 std::swap (true_rtx, false_rtx);
1057 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1058 mode, temp, true_rtx, false_rtx);
1062 /* (neg (plus X 1)) can become (not X). */
1063 if (GET_CODE (op) == PLUS
1064 && XEXP (op, 1) == const1_rtx)
1065 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1067 /* Similarly, (neg (not X)) is (plus X 1). */
1068 if (GET_CODE (op) == NOT)
1069 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1070 CONST1_RTX (mode));
1072 /* (neg (minus X Y)) can become (minus Y X). This transformation
1073 isn't safe for modes with signed zeros, since if X and Y are
1074 both +0, (minus Y X) is the same as (minus X Y). If the
1075 rounding mode is towards +infinity (or -infinity) then the two
1076 expressions will be rounded differently. */
1077 if (GET_CODE (op) == MINUS
1078 && !HONOR_SIGNED_ZEROS (mode)
1079 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1080 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1082 if (GET_CODE (op) == PLUS
1083 && !HONOR_SIGNED_ZEROS (mode)
1084 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1086 /* (neg (plus A C)) is simplified to (minus -C A). */
1087 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1088 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1090 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1091 if (temp)
1092 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1095 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1096 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1097 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1100 /* (neg (mult A B)) becomes (mult A (neg B)).
1101 This works even for floating-point values. */
1102 if (GET_CODE (op) == MULT
1103 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1105 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1106 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1109 /* NEG commutes with ASHIFT since it is multiplication. Only do
1110 this if we can then eliminate the NEG (e.g., if the operand
1111 is a constant). */
1112 if (GET_CODE (op) == ASHIFT)
1114 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1115 if (temp)
1116 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1119 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1120 C is equal to the width of MODE minus 1. */
1121 if (GET_CODE (op) == ASHIFTRT
1122 && CONST_INT_P (XEXP (op, 1))
1123 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1124 return simplify_gen_binary (LSHIFTRT, mode,
1125 XEXP (op, 0), XEXP (op, 1));
1127 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1128 C is equal to the width of MODE minus 1. */
1129 if (GET_CODE (op) == LSHIFTRT
1130 && CONST_INT_P (XEXP (op, 1))
1131 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1132 return simplify_gen_binary (ASHIFTRT, mode,
1133 XEXP (op, 0), XEXP (op, 1));
1135 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1136 if (GET_CODE (op) == XOR
1137 && XEXP (op, 1) == const1_rtx
1138 && nonzero_bits (XEXP (op, 0), mode) == 1)
1139 return plus_constant (mode, XEXP (op, 0), -1);
1141 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1142 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1143 if (GET_CODE (op) == LT
1144 && XEXP (op, 1) == const0_rtx
1145 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1147 int_mode = as_a <scalar_int_mode> (mode);
1148 int isize = GET_MODE_PRECISION (inner);
1149 if (STORE_FLAG_VALUE == 1)
1151 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1152 gen_int_shift_amount (inner,
1153 isize - 1));
1154 if (int_mode == inner)
1155 return temp;
1156 if (GET_MODE_PRECISION (int_mode) > isize)
1157 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1158 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1160 else if (STORE_FLAG_VALUE == -1)
1162 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1163 gen_int_shift_amount (inner,
1164 isize - 1));
1165 if (int_mode == inner)
1166 return temp;
1167 if (GET_MODE_PRECISION (int_mode) > isize)
1168 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1169 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1173 if (vec_series_p (op, &base, &step))
1175 /* Only create a new series if we can simplify both parts. In other
1176 cases this isn't really a simplification, and it's not necessarily
1177 a win to replace a vector operation with a scalar operation. */
1178 scalar_mode inner_mode = GET_MODE_INNER (mode);
1179 base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1180 if (base)
1182 step = simplify_unary_operation (NEG, inner_mode,
1183 step, inner_mode);
1184 if (step)
1185 return gen_vec_series (mode, base, step);
1188 break;
1190 case TRUNCATE:
1191 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1192 with the umulXi3_highpart patterns. */
1193 if (GET_CODE (op) == LSHIFTRT
1194 && GET_CODE (XEXP (op, 0)) == MULT)
1195 break;
1197 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1199 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1201 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1202 if (temp)
1203 return temp;
1205 /* We can't handle truncation to a partial integer mode here
1206 because we don't know the real bitsize of the partial
1207 integer mode. */
1208 break;
1211 if (GET_MODE (op) != VOIDmode)
1213 temp = simplify_truncation (mode, op, GET_MODE (op));
1214 if (temp)
1215 return temp;
1218 /* If we know that the value is already truncated, we can
1219 replace the TRUNCATE with a SUBREG. */
1220 if (known_eq (GET_MODE_NUNITS (mode), 1)
1221 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1222 || truncated_to_mode (mode, op)))
1224 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1225 if (temp)
1226 return temp;
1229 /* A truncate of a comparison can be replaced with a subreg if
1230 STORE_FLAG_VALUE permits. This is like the previous test,
1231 but it works even if the comparison is done in a mode larger
1232 than HOST_BITS_PER_WIDE_INT. */
1233 if (HWI_COMPUTABLE_MODE_P (mode)
1234 && COMPARISON_P (op)
1235 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1237 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1238 if (temp)
1239 return temp;
1242 /* A truncate of a memory is just loading the low part of the memory
1243 if we are not changing the meaning of the address. */
1244 if (GET_CODE (op) == MEM
1245 && !VECTOR_MODE_P (mode)
1246 && !MEM_VOLATILE_P (op)
1247 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1249 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1250 if (temp)
1251 return temp;
1254 break;
1256 case FLOAT_TRUNCATE:
1257 if (DECIMAL_FLOAT_MODE_P (mode))
1258 break;
1260 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1261 if (GET_CODE (op) == FLOAT_EXTEND
1262 && GET_MODE (XEXP (op, 0)) == mode)
1263 return XEXP (op, 0);
1265 /* (float_truncate:SF (float_truncate:DF foo:XF))
1266 = (float_truncate:SF foo:XF).
1267 This may eliminate double rounding, so it is unsafe.
1269 (float_truncate:SF (float_extend:XF foo:DF))
1270 = (float_truncate:SF foo:DF).
1272 (float_truncate:DF (float_extend:XF foo:SF))
1273 = (float_extend:DF foo:SF). */
1274 if ((GET_CODE (op) == FLOAT_TRUNCATE
1275 && flag_unsafe_math_optimizations)
1276 || GET_CODE (op) == FLOAT_EXTEND)
1277 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1278 > GET_MODE_UNIT_SIZE (mode)
1279 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1280 mode,
1281 XEXP (op, 0), mode);
1283 /* (float_truncate (float x)) is (float x) */
1284 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1285 && (flag_unsafe_math_optimizations
1286 || exact_int_to_float_conversion_p (op)))
1287 return simplify_gen_unary (GET_CODE (op), mode,
1288 XEXP (op, 0),
1289 GET_MODE (XEXP (op, 0)));
1291 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1292 (OP:SF foo:SF) if OP is NEG or ABS. */
1293 if ((GET_CODE (op) == ABS
1294 || GET_CODE (op) == NEG)
1295 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1296 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1297 return simplify_gen_unary (GET_CODE (op), mode,
1298 XEXP (XEXP (op, 0), 0), mode);
1300 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1301 is (float_truncate:SF x). */
1302 if (GET_CODE (op) == SUBREG
1303 && subreg_lowpart_p (op)
1304 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1305 return SUBREG_REG (op);
1306 break;
1308 case FLOAT_EXTEND:
1309 if (DECIMAL_FLOAT_MODE_P (mode))
1310 break;
1312 /* (float_extend (float_extend x)) is (float_extend x)
1314 (float_extend (float x)) is (float x) assuming that double
1315 rounding can't happen.
1317 if (GET_CODE (op) == FLOAT_EXTEND
1318 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1319 && exact_int_to_float_conversion_p (op)))
1320 return simplify_gen_unary (GET_CODE (op), mode,
1321 XEXP (op, 0),
1322 GET_MODE (XEXP (op, 0)));
1324 break;
1326 case ABS:
1327 /* (abs (neg <foo>)) -> (abs <foo>) */
1328 if (GET_CODE (op) == NEG)
1329 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1330 GET_MODE (XEXP (op, 0)));
1332 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1333 do nothing. */
1334 if (GET_MODE (op) == VOIDmode)
1335 break;
1337 /* If operand is something known to be positive, ignore the ABS. */
1338 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1339 || val_signbit_known_clear_p (GET_MODE (op),
1340 nonzero_bits (op, GET_MODE (op))))
1341 return op;
1343 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1344 if (is_a <scalar_int_mode> (mode, &int_mode)
1345 && (num_sign_bit_copies (op, int_mode)
1346 == GET_MODE_PRECISION (int_mode)))
1347 return gen_rtx_NEG (int_mode, op);
1349 break;
1351 case FFS:
1352 /* (ffs (*_extend <X>)) = (ffs <X>) */
1353 if (GET_CODE (op) == SIGN_EXTEND
1354 || GET_CODE (op) == ZERO_EXTEND)
1355 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1356 GET_MODE (XEXP (op, 0)));
1357 break;
1359 case POPCOUNT:
1360 switch (GET_CODE (op))
1362 case BSWAP:
1363 case ZERO_EXTEND:
1364 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1365 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1366 GET_MODE (XEXP (op, 0)));
1368 case ROTATE:
1369 case ROTATERT:
1370 /* Rotations don't affect popcount. */
1371 if (!side_effects_p (XEXP (op, 1)))
1372 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1373 GET_MODE (XEXP (op, 0)));
1374 break;
1376 default:
1377 break;
1379 break;
1381 case PARITY:
1382 switch (GET_CODE (op))
1384 case NOT:
1385 case BSWAP:
1386 case ZERO_EXTEND:
1387 case SIGN_EXTEND:
1388 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1389 GET_MODE (XEXP (op, 0)));
1391 case ROTATE:
1392 case ROTATERT:
1393 /* Rotations don't affect parity. */
1394 if (!side_effects_p (XEXP (op, 1)))
1395 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1396 GET_MODE (XEXP (op, 0)));
1397 break;
1399 default:
1400 break;
1402 break;
1404 case BSWAP:
1405 /* (bswap (bswap x)) -> x. */
1406 if (GET_CODE (op) == BSWAP)
1407 return XEXP (op, 0);
1408 break;
1410 case FLOAT:
1411 /* (float (sign_extend <X>)) = (float <X>). */
1412 if (GET_CODE (op) == SIGN_EXTEND)
1413 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1414 GET_MODE (XEXP (op, 0)));
1415 break;
1417 case SIGN_EXTEND:
1418 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1419 becomes just the MINUS if its mode is MODE. This allows
1420 folding switch statements on machines using casesi (such as
1421 the VAX). */
1422 if (GET_CODE (op) == TRUNCATE
1423 && GET_MODE (XEXP (op, 0)) == mode
1424 && GET_CODE (XEXP (op, 0)) == MINUS
1425 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1426 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1427 return XEXP (op, 0);
1429 /* Extending a widening multiplication should be canonicalized to
1430 a wider widening multiplication. */
1431 if (GET_CODE (op) == MULT)
1433 rtx lhs = XEXP (op, 0);
1434 rtx rhs = XEXP (op, 1);
1435 enum rtx_code lcode = GET_CODE (lhs);
1436 enum rtx_code rcode = GET_CODE (rhs);
1438 /* Widening multiplies usually extend both operands, but sometimes
1439 they use a shift to extract a portion of a register. */
1440 if ((lcode == SIGN_EXTEND
1441 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1442 && (rcode == SIGN_EXTEND
1443 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1445 machine_mode lmode = GET_MODE (lhs);
1446 machine_mode rmode = GET_MODE (rhs);
1447 int bits;
1449 if (lcode == ASHIFTRT)
1450 /* Number of bits not shifted off the end. */
1451 bits = (GET_MODE_UNIT_PRECISION (lmode)
1452 - INTVAL (XEXP (lhs, 1)));
1453 else /* lcode == SIGN_EXTEND */
1454 /* Size of inner mode. */
1455 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1457 if (rcode == ASHIFTRT)
1458 bits += (GET_MODE_UNIT_PRECISION (rmode)
1459 - INTVAL (XEXP (rhs, 1)));
1460 else /* rcode == SIGN_EXTEND */
1461 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1463 /* We can only widen multiplies if the result is mathematiclly
1464 equivalent. I.e. if overflow was impossible. */
1465 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1466 return simplify_gen_binary
1467 (MULT, mode,
1468 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1469 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1473 /* Check for a sign extension of a subreg of a promoted
1474 variable, where the promotion is sign-extended, and the
1475 target mode is the same as the variable's promotion. */
1476 if (GET_CODE (op) == SUBREG
1477 && SUBREG_PROMOTED_VAR_P (op)
1478 && SUBREG_PROMOTED_SIGNED_P (op)
1479 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1481 temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op));
1482 if (temp)
1483 return temp;
1486 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1487 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1488 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1490 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1491 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1492 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1493 GET_MODE (XEXP (op, 0)));
1496 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1497 is (sign_extend:M (subreg:O <X>)) if there is mode with
1498 GET_MODE_BITSIZE (N) - I bits.
1499 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1500 is similarly (zero_extend:M (subreg:O <X>)). */
1501 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1502 && GET_CODE (XEXP (op, 0)) == ASHIFT
1503 && is_a <scalar_int_mode> (mode, &int_mode)
1504 && CONST_INT_P (XEXP (op, 1))
1505 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1506 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1507 GET_MODE_BITSIZE (op_mode) > INTVAL (XEXP (op, 1))))
1509 scalar_int_mode tmode;
1510 gcc_assert (GET_MODE_BITSIZE (int_mode)
1511 > GET_MODE_BITSIZE (op_mode));
1512 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode)
1513 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1515 rtx inner =
1516 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1517 if (inner)
1518 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1519 ? SIGN_EXTEND : ZERO_EXTEND,
1520 int_mode, inner, tmode);
1524 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1525 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1526 if (GET_CODE (op) == LSHIFTRT
1527 && CONST_INT_P (XEXP (op, 1))
1528 && XEXP (op, 1) != const0_rtx)
1529 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1531 #if defined(POINTERS_EXTEND_UNSIGNED)
1532 /* As we do not know which address space the pointer is referring to,
1533 we can do this only if the target does not support different pointer
1534 or address modes depending on the address space. */
1535 if (target_default_pointer_address_modes_p ()
1536 && ! POINTERS_EXTEND_UNSIGNED
1537 && mode == Pmode && GET_MODE (op) == ptr_mode
1538 && (CONSTANT_P (op)
1539 || (GET_CODE (op) == SUBREG
1540 && REG_P (SUBREG_REG (op))
1541 && REG_POINTER (SUBREG_REG (op))
1542 && GET_MODE (SUBREG_REG (op)) == Pmode))
1543 && !targetm.have_ptr_extend ())
1545 temp
1546 = convert_memory_address_addr_space_1 (Pmode, op,
1547 ADDR_SPACE_GENERIC, false,
1548 true);
1549 if (temp)
1550 return temp;
1552 #endif
1553 break;
1555 case ZERO_EXTEND:
1556 /* Check for a zero extension of a subreg of a promoted
1557 variable, where the promotion is zero-extended, and the
1558 target mode is the same as the variable's promotion. */
1559 if (GET_CODE (op) == SUBREG
1560 && SUBREG_PROMOTED_VAR_P (op)
1561 && SUBREG_PROMOTED_UNSIGNED_P (op)
1562 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1564 temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op));
1565 if (temp)
1566 return temp;
1569 /* Extending a widening multiplication should be canonicalized to
1570 a wider widening multiplication. */
1571 if (GET_CODE (op) == MULT)
1573 rtx lhs = XEXP (op, 0);
1574 rtx rhs = XEXP (op, 1);
1575 enum rtx_code lcode = GET_CODE (lhs);
1576 enum rtx_code rcode = GET_CODE (rhs);
1578 /* Widening multiplies usually extend both operands, but sometimes
1579 they use a shift to extract a portion of a register. */
1580 if ((lcode == ZERO_EXTEND
1581 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1582 && (rcode == ZERO_EXTEND
1583 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1585 machine_mode lmode = GET_MODE (lhs);
1586 machine_mode rmode = GET_MODE (rhs);
1587 int bits;
1589 if (lcode == LSHIFTRT)
1590 /* Number of bits not shifted off the end. */
1591 bits = (GET_MODE_UNIT_PRECISION (lmode)
1592 - INTVAL (XEXP (lhs, 1)));
1593 else /* lcode == ZERO_EXTEND */
1594 /* Size of inner mode. */
1595 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1597 if (rcode == LSHIFTRT)
1598 bits += (GET_MODE_UNIT_PRECISION (rmode)
1599 - INTVAL (XEXP (rhs, 1)));
1600 else /* rcode == ZERO_EXTEND */
1601 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1603 /* We can only widen multiplies if the result is mathematiclly
1604 equivalent. I.e. if overflow was impossible. */
1605 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1606 return simplify_gen_binary
1607 (MULT, mode,
1608 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1609 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1613 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1614 if (GET_CODE (op) == ZERO_EXTEND)
1615 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1616 GET_MODE (XEXP (op, 0)));
1618 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1619 is (zero_extend:M (subreg:O <X>)) if there is mode with
1620 GET_MODE_PRECISION (N) - I bits. */
1621 if (GET_CODE (op) == LSHIFTRT
1622 && GET_CODE (XEXP (op, 0)) == ASHIFT
1623 && is_a <scalar_int_mode> (mode, &int_mode)
1624 && CONST_INT_P (XEXP (op, 1))
1625 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1626 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1627 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1629 scalar_int_mode tmode;
1630 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1631 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1633 rtx inner =
1634 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1635 if (inner)
1636 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1637 inner, tmode);
1641 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1642 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1643 of mode N. E.g.
1644 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1645 (and:SI (reg:SI) (const_int 63)). */
1646 if (partial_subreg_p (op)
1647 && is_a <scalar_int_mode> (mode, &int_mode)
1648 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1649 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1650 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1651 && subreg_lowpart_p (op)
1652 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1653 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1655 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1656 return SUBREG_REG (op);
1657 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1658 op0_mode);
1661 #if defined(POINTERS_EXTEND_UNSIGNED)
1662 /* As we do not know which address space the pointer is referring to,
1663 we can do this only if the target does not support different pointer
1664 or address modes depending on the address space. */
1665 if (target_default_pointer_address_modes_p ()
1666 && POINTERS_EXTEND_UNSIGNED > 0
1667 && mode == Pmode && GET_MODE (op) == ptr_mode
1668 && (CONSTANT_P (op)
1669 || (GET_CODE (op) == SUBREG
1670 && REG_P (SUBREG_REG (op))
1671 && REG_POINTER (SUBREG_REG (op))
1672 && GET_MODE (SUBREG_REG (op)) == Pmode))
1673 && !targetm.have_ptr_extend ())
1675 temp
1676 = convert_memory_address_addr_space_1 (Pmode, op,
1677 ADDR_SPACE_GENERIC, false,
1678 true);
1679 if (temp)
1680 return temp;
1682 #endif
1683 break;
1685 default:
1686 break;
1689 if (VECTOR_MODE_P (mode)
1690 && vec_duplicate_p (op, &elt)
1691 && code != VEC_DUPLICATE)
1693 /* Try applying the operator to ELT and see if that simplifies.
1694 We can duplicate the result if so.
1696 The reason we don't use simplify_gen_unary is that it isn't
1697 necessarily a win to convert things like:
1699 (neg:V (vec_duplicate:V (reg:S R)))
1703 (vec_duplicate:V (neg:S (reg:S R)))
1705 The first might be done entirely in vector registers while the
1706 second might need a move between register files. */
1707 temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1708 elt, GET_MODE_INNER (GET_MODE (op)));
1709 if (temp)
1710 return gen_vec_duplicate (mode, temp);
1713 return 0;
1716 /* Try to compute the value of a unary operation CODE whose output mode is to
1717 be MODE with input operand OP whose mode was originally OP_MODE.
1718 Return zero if the value cannot be computed. */
1720 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1721 rtx op, machine_mode op_mode)
1723 scalar_int_mode result_mode;
1725 if (code == VEC_DUPLICATE)
1727 gcc_assert (VECTOR_MODE_P (mode));
1728 if (GET_MODE (op) != VOIDmode)
1730 if (!VECTOR_MODE_P (GET_MODE (op)))
1731 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1732 else
1733 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1734 (GET_MODE (op)));
1736 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1737 return gen_const_vec_duplicate (mode, op);
1738 unsigned int n_elts;
1739 if (GET_CODE (op) == CONST_VECTOR
1740 && GET_MODE_NUNITS (mode).is_constant (&n_elts))
1742 /* This must be constant if we're duplicating it to a constant
1743 number of elements. */
1744 unsigned int in_n_elts = CONST_VECTOR_NUNITS (op).to_constant ();
1745 gcc_assert (in_n_elts < n_elts);
1746 gcc_assert ((n_elts % in_n_elts) == 0);
1747 rtvec v = rtvec_alloc (n_elts);
1748 for (unsigned i = 0; i < n_elts; i++)
1749 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1750 return gen_rtx_CONST_VECTOR (mode, v);
1754 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1756 unsigned int n_elts;
1757 if (!CONST_VECTOR_NUNITS (op).is_constant (&n_elts))
1758 return NULL_RTX;
1760 machine_mode opmode = GET_MODE (op);
1761 gcc_assert (known_eq (GET_MODE_NUNITS (mode), n_elts));
1762 gcc_assert (known_eq (GET_MODE_NUNITS (opmode), n_elts));
1764 rtvec v = rtvec_alloc (n_elts);
1765 unsigned int i;
1767 for (i = 0; i < n_elts; i++)
1769 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1770 CONST_VECTOR_ELT (op, i),
1771 GET_MODE_INNER (opmode));
1772 if (!x || !valid_for_const_vector_p (mode, x))
1773 return 0;
1774 RTVEC_ELT (v, i) = x;
1776 return gen_rtx_CONST_VECTOR (mode, v);
1779 /* The order of these tests is critical so that, for example, we don't
1780 check the wrong mode (input vs. output) for a conversion operation,
1781 such as FIX. At some point, this should be simplified. */
1783 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1785 REAL_VALUE_TYPE d;
1787 if (op_mode == VOIDmode)
1789 /* CONST_INT have VOIDmode as the mode. We assume that all
1790 the bits of the constant are significant, though, this is
1791 a dangerous assumption as many times CONST_INTs are
1792 created and used with garbage in the bits outside of the
1793 precision of the implied mode of the const_int. */
1794 op_mode = MAX_MODE_INT;
1797 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1799 /* Avoid the folding if flag_signaling_nans is on and
1800 operand is a signaling NaN. */
1801 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1802 return 0;
1804 d = real_value_truncate (mode, d);
1805 return const_double_from_real_value (d, mode);
1807 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1809 REAL_VALUE_TYPE d;
1811 if (op_mode == VOIDmode)
1813 /* CONST_INT have VOIDmode as the mode. We assume that all
1814 the bits of the constant are significant, though, this is
1815 a dangerous assumption as many times CONST_INTs are
1816 created and used with garbage in the bits outside of the
1817 precision of the implied mode of the const_int. */
1818 op_mode = MAX_MODE_INT;
1821 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1823 /* Avoid the folding if flag_signaling_nans is on and
1824 operand is a signaling NaN. */
1825 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1826 return 0;
1828 d = real_value_truncate (mode, d);
1829 return const_double_from_real_value (d, mode);
1832 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1834 unsigned int width = GET_MODE_PRECISION (result_mode);
1835 wide_int result;
1836 scalar_int_mode imode = (op_mode == VOIDmode
1837 ? result_mode
1838 : as_a <scalar_int_mode> (op_mode));
1839 rtx_mode_t op0 = rtx_mode_t (op, imode);
1840 int int_value;
1842 #if TARGET_SUPPORTS_WIDE_INT == 0
1843 /* This assert keeps the simplification from producing a result
1844 that cannot be represented in a CONST_DOUBLE but a lot of
1845 upstream callers expect that this function never fails to
1846 simplify something and so you if you added this to the test
1847 above the code would die later anyway. If this assert
1848 happens, you just need to make the port support wide int. */
1849 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1850 #endif
1852 switch (code)
1854 case NOT:
1855 result = wi::bit_not (op0);
1856 break;
1858 case NEG:
1859 result = wi::neg (op0);
1860 break;
1862 case ABS:
1863 result = wi::abs (op0);
1864 break;
1866 case FFS:
1867 result = wi::shwi (wi::ffs (op0), result_mode);
1868 break;
1870 case CLZ:
1871 if (wi::ne_p (op0, 0))
1872 int_value = wi::clz (op0);
1873 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1874 return NULL_RTX;
1875 result = wi::shwi (int_value, result_mode);
1876 break;
1878 case CLRSB:
1879 result = wi::shwi (wi::clrsb (op0), result_mode);
1880 break;
1882 case CTZ:
1883 if (wi::ne_p (op0, 0))
1884 int_value = wi::ctz (op0);
1885 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1886 return NULL_RTX;
1887 result = wi::shwi (int_value, result_mode);
1888 break;
1890 case POPCOUNT:
1891 result = wi::shwi (wi::popcount (op0), result_mode);
1892 break;
1894 case PARITY:
1895 result = wi::shwi (wi::parity (op0), result_mode);
1896 break;
1898 case BSWAP:
1899 result = wide_int (op0).bswap ();
1900 break;
1902 case TRUNCATE:
1903 case ZERO_EXTEND:
1904 result = wide_int::from (op0, width, UNSIGNED);
1905 break;
1907 case SIGN_EXTEND:
1908 result = wide_int::from (op0, width, SIGNED);
1909 break;
1911 case SQRT:
1912 default:
1913 return 0;
1916 return immed_wide_int_const (result, result_mode);
1919 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1920 && SCALAR_FLOAT_MODE_P (mode)
1921 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1923 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1924 switch (code)
1926 case SQRT:
1927 return 0;
1928 case ABS:
1929 d = real_value_abs (&d);
1930 break;
1931 case NEG:
1932 d = real_value_negate (&d);
1933 break;
1934 case FLOAT_TRUNCATE:
1935 /* Don't perform the operation if flag_signaling_nans is on
1936 and the operand is a signaling NaN. */
1937 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1938 return NULL_RTX;
1939 d = real_value_truncate (mode, d);
1940 break;
1941 case FLOAT_EXTEND:
1942 /* Don't perform the operation if flag_signaling_nans is on
1943 and the operand is a signaling NaN. */
1944 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1945 return NULL_RTX;
1946 /* All this does is change the mode, unless changing
1947 mode class. */
1948 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1949 real_convert (&d, mode, &d);
1950 break;
1951 case FIX:
1952 /* Don't perform the operation if flag_signaling_nans is on
1953 and the operand is a signaling NaN. */
1954 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1955 return NULL_RTX;
1956 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1957 break;
1958 case NOT:
1960 long tmp[4];
1961 int i;
1963 real_to_target (tmp, &d, GET_MODE (op));
1964 for (i = 0; i < 4; i++)
1965 tmp[i] = ~tmp[i];
1966 real_from_target (&d, tmp, mode);
1967 break;
1969 default:
1970 gcc_unreachable ();
1972 return const_double_from_real_value (d, mode);
1974 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1975 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1976 && is_int_mode (mode, &result_mode))
1978 unsigned int width = GET_MODE_PRECISION (result_mode);
1979 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1980 operators are intentionally left unspecified (to ease implementation
1981 by target backends), for consistency, this routine implements the
1982 same semantics for constant folding as used by the middle-end. */
1984 /* This was formerly used only for non-IEEE float.
1985 eggert@twinsun.com says it is safe for IEEE also. */
1986 REAL_VALUE_TYPE t;
1987 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1988 wide_int wmax, wmin;
1989 /* This is part of the abi to real_to_integer, but we check
1990 things before making this call. */
1991 bool fail;
1993 switch (code)
1995 case FIX:
1996 if (REAL_VALUE_ISNAN (*x))
1997 return const0_rtx;
1999 /* Test against the signed upper bound. */
2000 wmax = wi::max_value (width, SIGNED);
2001 real_from_integer (&t, VOIDmode, wmax, SIGNED);
2002 if (real_less (&t, x))
2003 return immed_wide_int_const (wmax, mode);
2005 /* Test against the signed lower bound. */
2006 wmin = wi::min_value (width, SIGNED);
2007 real_from_integer (&t, VOIDmode, wmin, SIGNED);
2008 if (real_less (x, &t))
2009 return immed_wide_int_const (wmin, mode);
2011 return immed_wide_int_const (real_to_integer (x, &fail, width),
2012 mode);
2014 case UNSIGNED_FIX:
2015 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2016 return const0_rtx;
2018 /* Test against the unsigned upper bound. */
2019 wmax = wi::max_value (width, UNSIGNED);
2020 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2021 if (real_less (&t, x))
2022 return immed_wide_int_const (wmax, mode);
2024 return immed_wide_int_const (real_to_integer (x, &fail, width),
2025 mode);
2027 default:
2028 gcc_unreachable ();
2032 /* Handle polynomial integers. */
2033 else if (CONST_POLY_INT_P (op))
2035 poly_wide_int result;
2036 switch (code)
2038 case NEG:
2039 result = -const_poly_int_value (op);
2040 break;
2042 case NOT:
2043 result = ~const_poly_int_value (op);
2044 break;
2046 default:
2047 return NULL_RTX;
2049 return immed_wide_int_const (result, mode);
2052 return NULL_RTX;
2055 /* Subroutine of simplify_binary_operation to simplify a binary operation
2056 CODE that can commute with byte swapping, with result mode MODE and
2057 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2058 Return zero if no simplification or canonicalization is possible. */
2060 static rtx
2061 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2062 rtx op0, rtx op1)
2064 rtx tem;
2066 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2067 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2069 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2070 simplify_gen_unary (BSWAP, mode, op1, mode));
2071 return simplify_gen_unary (BSWAP, mode, tem, mode);
2074 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2075 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2077 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2078 return simplify_gen_unary (BSWAP, mode, tem, mode);
2081 return NULL_RTX;
2084 /* Subroutine of simplify_binary_operation to simplify a commutative,
2085 associative binary operation CODE with result mode MODE, operating
2086 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2087 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2088 canonicalization is possible. */
2090 static rtx
2091 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2092 rtx op0, rtx op1)
2094 rtx tem;
2096 /* Linearize the operator to the left. */
2097 if (GET_CODE (op1) == code)
2099 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2100 if (GET_CODE (op0) == code)
2102 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2103 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2106 /* "a op (b op c)" becomes "(b op c) op a". */
2107 if (! swap_commutative_operands_p (op1, op0))
2108 return simplify_gen_binary (code, mode, op1, op0);
2110 std::swap (op0, op1);
2113 if (GET_CODE (op0) == code)
2115 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2116 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2118 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2119 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2122 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2123 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2124 if (tem != 0)
2125 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2127 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2128 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2129 if (tem != 0)
2130 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2133 return 0;
2137 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2138 and OP1. Return 0 if no simplification is possible.
2140 Don't use this for relational operations such as EQ or LT.
2141 Use simplify_relational_operation instead. */
2143 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2144 rtx op0, rtx op1)
2146 rtx trueop0, trueop1;
2147 rtx tem;
2149 /* Relational operations don't work here. We must know the mode
2150 of the operands in order to do the comparison correctly.
2151 Assuming a full word can give incorrect results.
2152 Consider comparing 128 with -128 in QImode. */
2153 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2154 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2156 /* Make sure the constant is second. */
2157 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2158 && swap_commutative_operands_p (op0, op1))
2159 std::swap (op0, op1);
2161 trueop0 = avoid_constant_pool_reference (op0);
2162 trueop1 = avoid_constant_pool_reference (op1);
2164 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2165 if (tem)
2166 return tem;
2167 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2169 if (tem)
2170 return tem;
2172 /* If the above steps did not result in a simplification and op0 or op1
2173 were constant pool references, use the referenced constants directly. */
2174 if (trueop0 != op0 || trueop1 != op1)
2175 return simplify_gen_binary (code, mode, trueop0, trueop1);
2177 return NULL_RTX;
2180 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2181 which OP0 and OP1 are both vector series or vector duplicates
2182 (which are really just series with a step of 0). If so, try to
2183 form a new series by applying CODE to the bases and to the steps.
2184 Return null if no simplification is possible.
2186 MODE is the mode of the operation and is known to be a vector
2187 integer mode. */
2189 static rtx
2190 simplify_binary_operation_series (rtx_code code, machine_mode mode,
2191 rtx op0, rtx op1)
2193 rtx base0, step0;
2194 if (vec_duplicate_p (op0, &base0))
2195 step0 = const0_rtx;
2196 else if (!vec_series_p (op0, &base0, &step0))
2197 return NULL_RTX;
2199 rtx base1, step1;
2200 if (vec_duplicate_p (op1, &base1))
2201 step1 = const0_rtx;
2202 else if (!vec_series_p (op1, &base1, &step1))
2203 return NULL_RTX;
2205 /* Only create a new series if we can simplify both parts. In other
2206 cases this isn't really a simplification, and it's not necessarily
2207 a win to replace a vector operation with a scalar operation. */
2208 scalar_mode inner_mode = GET_MODE_INNER (mode);
2209 rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2210 if (!new_base)
2211 return NULL_RTX;
2213 rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2214 if (!new_step)
2215 return NULL_RTX;
2217 return gen_vec_series (mode, new_base, new_step);
2220 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2221 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2222 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2223 actual constants. */
2225 static rtx
2226 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2227 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2229 rtx tem, reversed, opleft, opright, elt0, elt1;
2230 HOST_WIDE_INT val;
2231 scalar_int_mode int_mode, inner_mode;
2232 poly_int64 offset;
2234 /* Even if we can't compute a constant result,
2235 there are some cases worth simplifying. */
2237 switch (code)
2239 case PLUS:
2240 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2241 when x is NaN, infinite, or finite and nonzero. They aren't
2242 when x is -0 and the rounding mode is not towards -infinity,
2243 since (-0) + 0 is then 0. */
2244 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2245 return op0;
2247 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2248 transformations are safe even for IEEE. */
2249 if (GET_CODE (op0) == NEG)
2250 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2251 else if (GET_CODE (op1) == NEG)
2252 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2254 /* (~a) + 1 -> -a */
2255 if (INTEGRAL_MODE_P (mode)
2256 && GET_CODE (op0) == NOT
2257 && trueop1 == const1_rtx)
2258 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2260 /* Handle both-operands-constant cases. We can only add
2261 CONST_INTs to constants since the sum of relocatable symbols
2262 can't be handled by most assemblers. Don't add CONST_INT
2263 to CONST_INT since overflow won't be computed properly if wider
2264 than HOST_BITS_PER_WIDE_INT. */
2266 if ((GET_CODE (op0) == CONST
2267 || GET_CODE (op0) == SYMBOL_REF
2268 || GET_CODE (op0) == LABEL_REF)
2269 && poly_int_rtx_p (op1, &offset))
2270 return plus_constant (mode, op0, offset);
2271 else if ((GET_CODE (op1) == CONST
2272 || GET_CODE (op1) == SYMBOL_REF
2273 || GET_CODE (op1) == LABEL_REF)
2274 && poly_int_rtx_p (op0, &offset))
2275 return plus_constant (mode, op1, offset);
2277 /* See if this is something like X * C - X or vice versa or
2278 if the multiplication is written as a shift. If so, we can
2279 distribute and make a new multiply, shift, or maybe just
2280 have X (if C is 2 in the example above). But don't make
2281 something more expensive than we had before. */
2283 if (is_a <scalar_int_mode> (mode, &int_mode))
2285 rtx lhs = op0, rhs = op1;
2287 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2288 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2290 if (GET_CODE (lhs) == NEG)
2292 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2293 lhs = XEXP (lhs, 0);
2295 else if (GET_CODE (lhs) == MULT
2296 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2298 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2299 lhs = XEXP (lhs, 0);
2301 else if (GET_CODE (lhs) == ASHIFT
2302 && CONST_INT_P (XEXP (lhs, 1))
2303 && INTVAL (XEXP (lhs, 1)) >= 0
2304 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2306 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2307 GET_MODE_PRECISION (int_mode));
2308 lhs = XEXP (lhs, 0);
2311 if (GET_CODE (rhs) == NEG)
2313 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2314 rhs = XEXP (rhs, 0);
2316 else if (GET_CODE (rhs) == MULT
2317 && CONST_INT_P (XEXP (rhs, 1)))
2319 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2320 rhs = XEXP (rhs, 0);
2322 else if (GET_CODE (rhs) == ASHIFT
2323 && CONST_INT_P (XEXP (rhs, 1))
2324 && INTVAL (XEXP (rhs, 1)) >= 0
2325 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2327 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2328 GET_MODE_PRECISION (int_mode));
2329 rhs = XEXP (rhs, 0);
2332 if (rtx_equal_p (lhs, rhs))
2334 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2335 rtx coeff;
2336 bool speed = optimize_function_for_speed_p (cfun);
2338 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2340 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2341 return (set_src_cost (tem, int_mode, speed)
2342 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2346 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2347 if (CONST_SCALAR_INT_P (op1)
2348 && GET_CODE (op0) == XOR
2349 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2350 && mode_signbit_p (mode, op1))
2351 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2352 simplify_gen_binary (XOR, mode, op1,
2353 XEXP (op0, 1)));
2355 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2356 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2357 && GET_CODE (op0) == MULT
2358 && GET_CODE (XEXP (op0, 0)) == NEG)
2360 rtx in1, in2;
2362 in1 = XEXP (XEXP (op0, 0), 0);
2363 in2 = XEXP (op0, 1);
2364 return simplify_gen_binary (MINUS, mode, op1,
2365 simplify_gen_binary (MULT, mode,
2366 in1, in2));
2369 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2370 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2371 is 1. */
2372 if (COMPARISON_P (op0)
2373 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2374 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2375 && (reversed = reversed_comparison (op0, mode)))
2376 return
2377 simplify_gen_unary (NEG, mode, reversed, mode);
2379 /* If one of the operands is a PLUS or a MINUS, see if we can
2380 simplify this by the associative law.
2381 Don't use the associative law for floating point.
2382 The inaccuracy makes it nonassociative,
2383 and subtle programs can break if operations are associated. */
2385 if (INTEGRAL_MODE_P (mode)
2386 && (plus_minus_operand_p (op0)
2387 || plus_minus_operand_p (op1))
2388 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2389 return tem;
2391 /* Reassociate floating point addition only when the user
2392 specifies associative math operations. */
2393 if (FLOAT_MODE_P (mode)
2394 && flag_associative_math)
2396 tem = simplify_associative_operation (code, mode, op0, op1);
2397 if (tem)
2398 return tem;
2401 /* Handle vector series. */
2402 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2404 tem = simplify_binary_operation_series (code, mode, op0, op1);
2405 if (tem)
2406 return tem;
2408 break;
2410 case COMPARE:
2411 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2412 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2413 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2414 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2416 rtx xop00 = XEXP (op0, 0);
2417 rtx xop10 = XEXP (op1, 0);
2419 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2420 return xop00;
2422 if (REG_P (xop00) && REG_P (xop10)
2423 && REGNO (xop00) == REGNO (xop10)
2424 && GET_MODE (xop00) == mode
2425 && GET_MODE (xop10) == mode
2426 && GET_MODE_CLASS (mode) == MODE_CC)
2427 return xop00;
2429 break;
2431 case MINUS:
2432 /* We can't assume x-x is 0 even with non-IEEE floating point,
2433 but since it is zero except in very strange circumstances, we
2434 will treat it as zero with -ffinite-math-only. */
2435 if (rtx_equal_p (trueop0, trueop1)
2436 && ! side_effects_p (op0)
2437 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2438 return CONST0_RTX (mode);
2440 /* Change subtraction from zero into negation. (0 - x) is the
2441 same as -x when x is NaN, infinite, or finite and nonzero.
2442 But if the mode has signed zeros, and does not round towards
2443 -infinity, then 0 - 0 is 0, not -0. */
2444 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2445 return simplify_gen_unary (NEG, mode, op1, mode);
2447 /* (-1 - a) is ~a, unless the expression contains symbolic
2448 constants, in which case not retaining additions and
2449 subtractions could cause invalid assembly to be produced. */
2450 if (trueop0 == constm1_rtx
2451 && !contains_symbolic_reference_p (op1))
2452 return simplify_gen_unary (NOT, mode, op1, mode);
2454 /* Subtracting 0 has no effect unless the mode has signed zeros
2455 and supports rounding towards -infinity. In such a case,
2456 0 - 0 is -0. */
2457 if (!(HONOR_SIGNED_ZEROS (mode)
2458 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2459 && trueop1 == CONST0_RTX (mode))
2460 return op0;
2462 /* See if this is something like X * C - X or vice versa or
2463 if the multiplication is written as a shift. If so, we can
2464 distribute and make a new multiply, shift, or maybe just
2465 have X (if C is 2 in the example above). But don't make
2466 something more expensive than we had before. */
2468 if (is_a <scalar_int_mode> (mode, &int_mode))
2470 rtx lhs = op0, rhs = op1;
2472 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2473 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2475 if (GET_CODE (lhs) == NEG)
2477 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2478 lhs = XEXP (lhs, 0);
2480 else if (GET_CODE (lhs) == MULT
2481 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2483 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2484 lhs = XEXP (lhs, 0);
2486 else if (GET_CODE (lhs) == ASHIFT
2487 && CONST_INT_P (XEXP (lhs, 1))
2488 && INTVAL (XEXP (lhs, 1)) >= 0
2489 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2491 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2492 GET_MODE_PRECISION (int_mode));
2493 lhs = XEXP (lhs, 0);
2496 if (GET_CODE (rhs) == NEG)
2498 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2499 rhs = XEXP (rhs, 0);
2501 else if (GET_CODE (rhs) == MULT
2502 && CONST_INT_P (XEXP (rhs, 1)))
2504 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2505 rhs = XEXP (rhs, 0);
2507 else if (GET_CODE (rhs) == ASHIFT
2508 && CONST_INT_P (XEXP (rhs, 1))
2509 && INTVAL (XEXP (rhs, 1)) >= 0
2510 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2512 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2513 GET_MODE_PRECISION (int_mode));
2514 negcoeff1 = -negcoeff1;
2515 rhs = XEXP (rhs, 0);
2518 if (rtx_equal_p (lhs, rhs))
2520 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2521 rtx coeff;
2522 bool speed = optimize_function_for_speed_p (cfun);
2524 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2526 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2527 return (set_src_cost (tem, int_mode, speed)
2528 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2532 /* (a - (-b)) -> (a + b). True even for IEEE. */
2533 if (GET_CODE (op1) == NEG)
2534 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2536 /* (-x - c) may be simplified as (-c - x). */
2537 if (GET_CODE (op0) == NEG
2538 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2540 tem = simplify_unary_operation (NEG, mode, op1, mode);
2541 if (tem)
2542 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2545 if ((GET_CODE (op0) == CONST
2546 || GET_CODE (op0) == SYMBOL_REF
2547 || GET_CODE (op0) == LABEL_REF)
2548 && poly_int_rtx_p (op1, &offset))
2549 return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
2551 /* Don't let a relocatable value get a negative coeff. */
2552 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2553 return simplify_gen_binary (PLUS, mode,
2554 op0,
2555 neg_const_int (mode, op1));
2557 /* (x - (x & y)) -> (x & ~y) */
2558 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2560 if (rtx_equal_p (op0, XEXP (op1, 0)))
2562 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2563 GET_MODE (XEXP (op1, 1)));
2564 return simplify_gen_binary (AND, mode, op0, tem);
2566 if (rtx_equal_p (op0, XEXP (op1, 1)))
2568 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2569 GET_MODE (XEXP (op1, 0)));
2570 return simplify_gen_binary (AND, mode, op0, tem);
2574 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2575 by reversing the comparison code if valid. */
2576 if (STORE_FLAG_VALUE == 1
2577 && trueop0 == const1_rtx
2578 && COMPARISON_P (op1)
2579 && (reversed = reversed_comparison (op1, mode)))
2580 return reversed;
2582 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2583 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2584 && GET_CODE (op1) == MULT
2585 && GET_CODE (XEXP (op1, 0)) == NEG)
2587 rtx in1, in2;
2589 in1 = XEXP (XEXP (op1, 0), 0);
2590 in2 = XEXP (op1, 1);
2591 return simplify_gen_binary (PLUS, mode,
2592 simplify_gen_binary (MULT, mode,
2593 in1, in2),
2594 op0);
2597 /* Canonicalize (minus (neg A) (mult B C)) to
2598 (minus (mult (neg B) C) A). */
2599 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2600 && GET_CODE (op1) == MULT
2601 && GET_CODE (op0) == NEG)
2603 rtx in1, in2;
2605 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2606 in2 = XEXP (op1, 1);
2607 return simplify_gen_binary (MINUS, mode,
2608 simplify_gen_binary (MULT, mode,
2609 in1, in2),
2610 XEXP (op0, 0));
2613 /* If one of the operands is a PLUS or a MINUS, see if we can
2614 simplify this by the associative law. This will, for example,
2615 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2616 Don't use the associative law for floating point.
2617 The inaccuracy makes it nonassociative,
2618 and subtle programs can break if operations are associated. */
2620 if (INTEGRAL_MODE_P (mode)
2621 && (plus_minus_operand_p (op0)
2622 || plus_minus_operand_p (op1))
2623 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2624 return tem;
2626 /* Handle vector series. */
2627 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2629 tem = simplify_binary_operation_series (code, mode, op0, op1);
2630 if (tem)
2631 return tem;
2633 break;
2635 case MULT:
2636 if (trueop1 == constm1_rtx)
2637 return simplify_gen_unary (NEG, mode, op0, mode);
2639 if (GET_CODE (op0) == NEG)
2641 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2642 /* If op1 is a MULT as well and simplify_unary_operation
2643 just moved the NEG to the second operand, simplify_gen_binary
2644 below could through simplify_associative_operation move
2645 the NEG around again and recurse endlessly. */
2646 if (temp
2647 && GET_CODE (op1) == MULT
2648 && GET_CODE (temp) == MULT
2649 && XEXP (op1, 0) == XEXP (temp, 0)
2650 && GET_CODE (XEXP (temp, 1)) == NEG
2651 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2652 temp = NULL_RTX;
2653 if (temp)
2654 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2656 if (GET_CODE (op1) == NEG)
2658 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2659 /* If op0 is a MULT as well and simplify_unary_operation
2660 just moved the NEG to the second operand, simplify_gen_binary
2661 below could through simplify_associative_operation move
2662 the NEG around again and recurse endlessly. */
2663 if (temp
2664 && GET_CODE (op0) == MULT
2665 && GET_CODE (temp) == MULT
2666 && XEXP (op0, 0) == XEXP (temp, 0)
2667 && GET_CODE (XEXP (temp, 1)) == NEG
2668 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2669 temp = NULL_RTX;
2670 if (temp)
2671 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2674 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2675 x is NaN, since x * 0 is then also NaN. Nor is it valid
2676 when the mode has signed zeros, since multiplying a negative
2677 number by 0 will give -0, not 0. */
2678 if (!HONOR_NANS (mode)
2679 && !HONOR_SIGNED_ZEROS (mode)
2680 && trueop1 == CONST0_RTX (mode)
2681 && ! side_effects_p (op0))
2682 return op1;
2684 /* In IEEE floating point, x*1 is not equivalent to x for
2685 signalling NaNs. */
2686 if (!HONOR_SNANS (mode)
2687 && trueop1 == CONST1_RTX (mode))
2688 return op0;
2690 /* Convert multiply by constant power of two into shift. */
2691 if (CONST_SCALAR_INT_P (trueop1))
2693 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2694 if (val >= 0)
2695 return simplify_gen_binary (ASHIFT, mode, op0,
2696 gen_int_shift_amount (mode, val));
2699 /* x*2 is x+x and x*(-1) is -x */
2700 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2701 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2702 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2703 && GET_MODE (op0) == mode)
2705 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2707 if (real_equal (d1, &dconst2))
2708 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2710 if (!HONOR_SNANS (mode)
2711 && real_equal (d1, &dconstm1))
2712 return simplify_gen_unary (NEG, mode, op0, mode);
2715 /* Optimize -x * -x as x * x. */
2716 if (FLOAT_MODE_P (mode)
2717 && GET_CODE (op0) == NEG
2718 && GET_CODE (op1) == NEG
2719 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2720 && !side_effects_p (XEXP (op0, 0)))
2721 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2723 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2724 if (SCALAR_FLOAT_MODE_P (mode)
2725 && GET_CODE (op0) == ABS
2726 && GET_CODE (op1) == ABS
2727 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2728 && !side_effects_p (XEXP (op0, 0)))
2729 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2731 /* Reassociate multiplication, but for floating point MULTs
2732 only when the user specifies unsafe math optimizations. */
2733 if (! FLOAT_MODE_P (mode)
2734 || flag_unsafe_math_optimizations)
2736 tem = simplify_associative_operation (code, mode, op0, op1);
2737 if (tem)
2738 return tem;
2740 break;
2742 case IOR:
2743 if (trueop1 == CONST0_RTX (mode))
2744 return op0;
2745 if (INTEGRAL_MODE_P (mode)
2746 && trueop1 == CONSTM1_RTX (mode)
2747 && !side_effects_p (op0))
2748 return op1;
2749 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2750 return op0;
2751 /* A | (~A) -> -1 */
2752 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2753 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2754 && ! side_effects_p (op0)
2755 && SCALAR_INT_MODE_P (mode))
2756 return constm1_rtx;
2758 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2759 if (CONST_INT_P (op1)
2760 && HWI_COMPUTABLE_MODE_P (mode)
2761 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2762 && !side_effects_p (op0))
2763 return op1;
2765 /* Canonicalize (X & C1) | C2. */
2766 if (GET_CODE (op0) == AND
2767 && CONST_INT_P (trueop1)
2768 && CONST_INT_P (XEXP (op0, 1)))
2770 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2771 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2772 HOST_WIDE_INT c2 = INTVAL (trueop1);
2774 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2775 if ((c1 & c2) == c1
2776 && !side_effects_p (XEXP (op0, 0)))
2777 return trueop1;
2779 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2780 if (((c1|c2) & mask) == mask)
2781 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2784 /* Convert (A & B) | A to A. */
2785 if (GET_CODE (op0) == AND
2786 && (rtx_equal_p (XEXP (op0, 0), op1)
2787 || rtx_equal_p (XEXP (op0, 1), op1))
2788 && ! side_effects_p (XEXP (op0, 0))
2789 && ! side_effects_p (XEXP (op0, 1)))
2790 return op1;
2792 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2793 mode size to (rotate A CX). */
2795 if (GET_CODE (op1) == ASHIFT
2796 || GET_CODE (op1) == SUBREG)
2798 opleft = op1;
2799 opright = op0;
2801 else
2803 opright = op1;
2804 opleft = op0;
2807 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2808 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2809 && CONST_INT_P (XEXP (opleft, 1))
2810 && CONST_INT_P (XEXP (opright, 1))
2811 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2812 == GET_MODE_UNIT_PRECISION (mode)))
2813 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2815 /* Same, but for ashift that has been "simplified" to a wider mode
2816 by simplify_shift_const. */
2818 if (GET_CODE (opleft) == SUBREG
2819 && is_a <scalar_int_mode> (mode, &int_mode)
2820 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2821 &inner_mode)
2822 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2823 && GET_CODE (opright) == LSHIFTRT
2824 && GET_CODE (XEXP (opright, 0)) == SUBREG
2825 && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
2826 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2827 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2828 SUBREG_REG (XEXP (opright, 0)))
2829 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2830 && CONST_INT_P (XEXP (opright, 1))
2831 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2832 + INTVAL (XEXP (opright, 1))
2833 == GET_MODE_PRECISION (int_mode)))
2834 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2835 XEXP (SUBREG_REG (opleft), 1));
2837 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2838 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2839 the PLUS does not affect any of the bits in OP1: then we can do
2840 the IOR as a PLUS and we can associate. This is valid if OP1
2841 can be safely shifted left C bits. */
2842 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2843 && GET_CODE (XEXP (op0, 0)) == PLUS
2844 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2845 && CONST_INT_P (XEXP (op0, 1))
2846 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2848 int count = INTVAL (XEXP (op0, 1));
2849 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2851 if (mask >> count == INTVAL (trueop1)
2852 && trunc_int_for_mode (mask, mode) == mask
2853 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2854 return simplify_gen_binary (ASHIFTRT, mode,
2855 plus_constant (mode, XEXP (op0, 0),
2856 mask),
2857 XEXP (op0, 1));
2860 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2861 if (tem)
2862 return tem;
2864 tem = simplify_associative_operation (code, mode, op0, op1);
2865 if (tem)
2866 return tem;
2867 break;
2869 case XOR:
2870 if (trueop1 == CONST0_RTX (mode))
2871 return op0;
2872 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2873 return simplify_gen_unary (NOT, mode, op0, mode);
2874 if (rtx_equal_p (trueop0, trueop1)
2875 && ! side_effects_p (op0)
2876 && GET_MODE_CLASS (mode) != MODE_CC)
2877 return CONST0_RTX (mode);
2879 /* Canonicalize XOR of the most significant bit to PLUS. */
2880 if (CONST_SCALAR_INT_P (op1)
2881 && mode_signbit_p (mode, op1))
2882 return simplify_gen_binary (PLUS, mode, op0, op1);
2883 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2884 if (CONST_SCALAR_INT_P (op1)
2885 && GET_CODE (op0) == PLUS
2886 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2887 && mode_signbit_p (mode, XEXP (op0, 1)))
2888 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2889 simplify_gen_binary (XOR, mode, op1,
2890 XEXP (op0, 1)));
2892 /* If we are XORing two things that have no bits in common,
2893 convert them into an IOR. This helps to detect rotation encoded
2894 using those methods and possibly other simplifications. */
2896 if (HWI_COMPUTABLE_MODE_P (mode)
2897 && (nonzero_bits (op0, mode)
2898 & nonzero_bits (op1, mode)) == 0)
2899 return (simplify_gen_binary (IOR, mode, op0, op1));
2901 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2902 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2903 (NOT y). */
2905 int num_negated = 0;
2907 if (GET_CODE (op0) == NOT)
2908 num_negated++, op0 = XEXP (op0, 0);
2909 if (GET_CODE (op1) == NOT)
2910 num_negated++, op1 = XEXP (op1, 0);
2912 if (num_negated == 2)
2913 return simplify_gen_binary (XOR, mode, op0, op1);
2914 else if (num_negated == 1)
2915 return simplify_gen_unary (NOT, mode,
2916 simplify_gen_binary (XOR, mode, op0, op1),
2917 mode);
2920 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2921 correspond to a machine insn or result in further simplifications
2922 if B is a constant. */
2924 if (GET_CODE (op0) == AND
2925 && rtx_equal_p (XEXP (op0, 1), op1)
2926 && ! side_effects_p (op1))
2927 return simplify_gen_binary (AND, mode,
2928 simplify_gen_unary (NOT, mode,
2929 XEXP (op0, 0), mode),
2930 op1);
2932 else if (GET_CODE (op0) == AND
2933 && rtx_equal_p (XEXP (op0, 0), op1)
2934 && ! side_effects_p (op1))
2935 return simplify_gen_binary (AND, mode,
2936 simplify_gen_unary (NOT, mode,
2937 XEXP (op0, 1), mode),
2938 op1);
2940 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2941 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2942 out bits inverted twice and not set by C. Similarly, given
2943 (xor (and (xor A B) C) D), simplify without inverting C in
2944 the xor operand: (xor (and A C) (B&C)^D).
2946 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2947 && GET_CODE (XEXP (op0, 0)) == XOR
2948 && CONST_INT_P (op1)
2949 && CONST_INT_P (XEXP (op0, 1))
2950 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2952 enum rtx_code op = GET_CODE (op0);
2953 rtx a = XEXP (XEXP (op0, 0), 0);
2954 rtx b = XEXP (XEXP (op0, 0), 1);
2955 rtx c = XEXP (op0, 1);
2956 rtx d = op1;
2957 HOST_WIDE_INT bval = INTVAL (b);
2958 HOST_WIDE_INT cval = INTVAL (c);
2959 HOST_WIDE_INT dval = INTVAL (d);
2960 HOST_WIDE_INT xcval;
2962 if (op == IOR)
2963 xcval = ~cval;
2964 else
2965 xcval = cval;
2967 return simplify_gen_binary (XOR, mode,
2968 simplify_gen_binary (op, mode, a, c),
2969 gen_int_mode ((bval & xcval) ^ dval,
2970 mode));
2973 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2974 we can transform like this:
2975 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2976 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2977 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2978 Attempt a few simplifications when B and C are both constants. */
2979 if (GET_CODE (op0) == AND
2980 && CONST_INT_P (op1)
2981 && CONST_INT_P (XEXP (op0, 1)))
2983 rtx a = XEXP (op0, 0);
2984 rtx b = XEXP (op0, 1);
2985 rtx c = op1;
2986 HOST_WIDE_INT bval = INTVAL (b);
2987 HOST_WIDE_INT cval = INTVAL (c);
2989 /* Instead of computing ~A&C, we compute its negated value,
2990 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2991 optimize for sure. If it does not simplify, we still try
2992 to compute ~A&C below, but since that always allocates
2993 RTL, we don't try that before committing to returning a
2994 simplified expression. */
2995 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2996 GEN_INT (~cval));
2998 if ((~cval & bval) == 0)
3000 rtx na_c = NULL_RTX;
3001 if (n_na_c)
3002 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
3003 else
3005 /* If ~A does not simplify, don't bother: we don't
3006 want to simplify 2 operations into 3, and if na_c
3007 were to simplify with na, n_na_c would have
3008 simplified as well. */
3009 rtx na = simplify_unary_operation (NOT, mode, a, mode);
3010 if (na)
3011 na_c = simplify_gen_binary (AND, mode, na, c);
3014 /* Try to simplify ~A&C | ~B&C. */
3015 if (na_c != NULL_RTX)
3016 return simplify_gen_binary (IOR, mode, na_c,
3017 gen_int_mode (~bval & cval, mode));
3019 else
3021 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3022 if (n_na_c == CONSTM1_RTX (mode))
3024 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3025 gen_int_mode (~cval & bval,
3026 mode));
3027 return simplify_gen_binary (IOR, mode, a_nc_b,
3028 gen_int_mode (~bval & cval,
3029 mode));
3034 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3035 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3036 machines, and also has shorter instruction path length. */
3037 if (GET_CODE (op0) == AND
3038 && GET_CODE (XEXP (op0, 0)) == XOR
3039 && CONST_INT_P (XEXP (op0, 1))
3040 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3042 rtx a = trueop1;
3043 rtx b = XEXP (XEXP (op0, 0), 1);
3044 rtx c = XEXP (op0, 1);
3045 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3046 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3047 rtx bc = simplify_gen_binary (AND, mode, b, c);
3048 return simplify_gen_binary (IOR, mode, a_nc, bc);
3050 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3051 else if (GET_CODE (op0) == AND
3052 && GET_CODE (XEXP (op0, 0)) == XOR
3053 && CONST_INT_P (XEXP (op0, 1))
3054 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3056 rtx a = XEXP (XEXP (op0, 0), 0);
3057 rtx b = trueop1;
3058 rtx c = XEXP (op0, 1);
3059 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3060 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3061 rtx ac = simplify_gen_binary (AND, mode, a, c);
3062 return simplify_gen_binary (IOR, mode, ac, b_nc);
3065 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3066 comparison if STORE_FLAG_VALUE is 1. */
3067 if (STORE_FLAG_VALUE == 1
3068 && trueop1 == const1_rtx
3069 && COMPARISON_P (op0)
3070 && (reversed = reversed_comparison (op0, mode)))
3071 return reversed;
3073 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3074 is (lt foo (const_int 0)), so we can perform the above
3075 simplification if STORE_FLAG_VALUE is 1. */
3077 if (is_a <scalar_int_mode> (mode, &int_mode)
3078 && STORE_FLAG_VALUE == 1
3079 && trueop1 == const1_rtx
3080 && GET_CODE (op0) == LSHIFTRT
3081 && CONST_INT_P (XEXP (op0, 1))
3082 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3083 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3085 /* (xor (comparison foo bar) (const_int sign-bit))
3086 when STORE_FLAG_VALUE is the sign bit. */
3087 if (is_a <scalar_int_mode> (mode, &int_mode)
3088 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3089 && trueop1 == const_true_rtx
3090 && COMPARISON_P (op0)
3091 && (reversed = reversed_comparison (op0, int_mode)))
3092 return reversed;
3094 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3095 if (tem)
3096 return tem;
3098 tem = simplify_associative_operation (code, mode, op0, op1);
3099 if (tem)
3100 return tem;
3101 break;
3103 case AND:
3104 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3105 return trueop1;
3106 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3107 return op0;
3108 if (HWI_COMPUTABLE_MODE_P (mode))
3110 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3111 HOST_WIDE_INT nzop1;
3112 if (CONST_INT_P (trueop1))
3114 HOST_WIDE_INT val1 = INTVAL (trueop1);
3115 /* If we are turning off bits already known off in OP0, we need
3116 not do an AND. */
3117 if ((nzop0 & ~val1) == 0)
3118 return op0;
3120 nzop1 = nonzero_bits (trueop1, mode);
3121 /* If we are clearing all the nonzero bits, the result is zero. */
3122 if ((nzop1 & nzop0) == 0
3123 && !side_effects_p (op0) && !side_effects_p (op1))
3124 return CONST0_RTX (mode);
3126 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3127 && GET_MODE_CLASS (mode) != MODE_CC)
3128 return op0;
3129 /* A & (~A) -> 0 */
3130 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3131 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3132 && ! side_effects_p (op0)
3133 && GET_MODE_CLASS (mode) != MODE_CC)
3134 return CONST0_RTX (mode);
3136 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3137 there are no nonzero bits of C outside of X's mode. */
3138 if ((GET_CODE (op0) == SIGN_EXTEND
3139 || GET_CODE (op0) == ZERO_EXTEND)
3140 && CONST_INT_P (trueop1)
3141 && HWI_COMPUTABLE_MODE_P (mode)
3142 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3143 & UINTVAL (trueop1)) == 0)
3145 machine_mode imode = GET_MODE (XEXP (op0, 0));
3146 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3147 gen_int_mode (INTVAL (trueop1),
3148 imode));
3149 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3152 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3153 we might be able to further simplify the AND with X and potentially
3154 remove the truncation altogether. */
3155 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3157 rtx x = XEXP (op0, 0);
3158 machine_mode xmode = GET_MODE (x);
3159 tem = simplify_gen_binary (AND, xmode, x,
3160 gen_int_mode (INTVAL (trueop1), xmode));
3161 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3164 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3165 if (GET_CODE (op0) == IOR
3166 && CONST_INT_P (trueop1)
3167 && CONST_INT_P (XEXP (op0, 1)))
3169 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3170 return simplify_gen_binary (IOR, mode,
3171 simplify_gen_binary (AND, mode,
3172 XEXP (op0, 0), op1),
3173 gen_int_mode (tmp, mode));
3176 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3177 insn (and may simplify more). */
3178 if (GET_CODE (op0) == XOR
3179 && rtx_equal_p (XEXP (op0, 0), op1)
3180 && ! side_effects_p (op1))
3181 return simplify_gen_binary (AND, mode,
3182 simplify_gen_unary (NOT, mode,
3183 XEXP (op0, 1), mode),
3184 op1);
3186 if (GET_CODE (op0) == XOR
3187 && rtx_equal_p (XEXP (op0, 1), op1)
3188 && ! side_effects_p (op1))
3189 return simplify_gen_binary (AND, mode,
3190 simplify_gen_unary (NOT, mode,
3191 XEXP (op0, 0), mode),
3192 op1);
3194 /* Similarly for (~(A ^ B)) & A. */
3195 if (GET_CODE (op0) == NOT
3196 && GET_CODE (XEXP (op0, 0)) == XOR
3197 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3198 && ! side_effects_p (op1))
3199 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3201 if (GET_CODE (op0) == NOT
3202 && GET_CODE (XEXP (op0, 0)) == XOR
3203 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3204 && ! side_effects_p (op1))
3205 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3207 /* Convert (A | B) & A to A. */
3208 if (GET_CODE (op0) == IOR
3209 && (rtx_equal_p (XEXP (op0, 0), op1)
3210 || rtx_equal_p (XEXP (op0, 1), op1))
3211 && ! side_effects_p (XEXP (op0, 0))
3212 && ! side_effects_p (XEXP (op0, 1)))
3213 return op1;
3215 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3216 ((A & N) + B) & M -> (A + B) & M
3217 Similarly if (N & M) == 0,
3218 ((A | N) + B) & M -> (A + B) & M
3219 and for - instead of + and/or ^ instead of |.
3220 Also, if (N & M) == 0, then
3221 (A +- N) & M -> A & M. */
3222 if (CONST_INT_P (trueop1)
3223 && HWI_COMPUTABLE_MODE_P (mode)
3224 && ~UINTVAL (trueop1)
3225 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3226 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3228 rtx pmop[2];
3229 int which;
3231 pmop[0] = XEXP (op0, 0);
3232 pmop[1] = XEXP (op0, 1);
3234 if (CONST_INT_P (pmop[1])
3235 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3236 return simplify_gen_binary (AND, mode, pmop[0], op1);
3238 for (which = 0; which < 2; which++)
3240 tem = pmop[which];
3241 switch (GET_CODE (tem))
3243 case AND:
3244 if (CONST_INT_P (XEXP (tem, 1))
3245 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3246 == UINTVAL (trueop1))
3247 pmop[which] = XEXP (tem, 0);
3248 break;
3249 case IOR:
3250 case XOR:
3251 if (CONST_INT_P (XEXP (tem, 1))
3252 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3253 pmop[which] = XEXP (tem, 0);
3254 break;
3255 default:
3256 break;
3260 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3262 tem = simplify_gen_binary (GET_CODE (op0), mode,
3263 pmop[0], pmop[1]);
3264 return simplify_gen_binary (code, mode, tem, op1);
3268 /* (and X (ior (not X) Y) -> (and X Y) */
3269 if (GET_CODE (op1) == IOR
3270 && GET_CODE (XEXP (op1, 0)) == NOT
3271 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3272 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3274 /* (and (ior (not X) Y) X) -> (and X Y) */
3275 if (GET_CODE (op0) == IOR
3276 && GET_CODE (XEXP (op0, 0)) == NOT
3277 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3278 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3280 /* (and X (ior Y (not X)) -> (and X Y) */
3281 if (GET_CODE (op1) == IOR
3282 && GET_CODE (XEXP (op1, 1)) == NOT
3283 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3284 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3286 /* (and (ior Y (not X)) X) -> (and X Y) */
3287 if (GET_CODE (op0) == IOR
3288 && GET_CODE (XEXP (op0, 1)) == NOT
3289 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3290 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3292 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3293 if (tem)
3294 return tem;
3296 tem = simplify_associative_operation (code, mode, op0, op1);
3297 if (tem)
3298 return tem;
3299 break;
3301 case UDIV:
3302 /* 0/x is 0 (or x&0 if x has side-effects). */
3303 if (trueop0 == CONST0_RTX (mode)
3304 && !cfun->can_throw_non_call_exceptions)
3306 if (side_effects_p (op1))
3307 return simplify_gen_binary (AND, mode, op1, trueop0);
3308 return trueop0;
3310 /* x/1 is x. */
3311 if (trueop1 == CONST1_RTX (mode))
3313 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3314 if (tem)
3315 return tem;
3317 /* Convert divide by power of two into shift. */
3318 if (CONST_INT_P (trueop1)
3319 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3320 return simplify_gen_binary (LSHIFTRT, mode, op0,
3321 gen_int_shift_amount (mode, val));
3322 break;
3324 case DIV:
3325 /* Handle floating point and integers separately. */
3326 if (SCALAR_FLOAT_MODE_P (mode))
3328 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3329 safe for modes with NaNs, since 0.0 / 0.0 will then be
3330 NaN rather than 0.0. Nor is it safe for modes with signed
3331 zeros, since dividing 0 by a negative number gives -0.0 */
3332 if (trueop0 == CONST0_RTX (mode)
3333 && !HONOR_NANS (mode)
3334 && !HONOR_SIGNED_ZEROS (mode)
3335 && ! side_effects_p (op1))
3336 return op0;
3337 /* x/1.0 is x. */
3338 if (trueop1 == CONST1_RTX (mode)
3339 && !HONOR_SNANS (mode))
3340 return op0;
3342 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3343 && trueop1 != CONST0_RTX (mode))
3345 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3347 /* x/-1.0 is -x. */
3348 if (real_equal (d1, &dconstm1)
3349 && !HONOR_SNANS (mode))
3350 return simplify_gen_unary (NEG, mode, op0, mode);
3352 /* Change FP division by a constant into multiplication.
3353 Only do this with -freciprocal-math. */
3354 if (flag_reciprocal_math
3355 && !real_equal (d1, &dconst0))
3357 REAL_VALUE_TYPE d;
3358 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3359 tem = const_double_from_real_value (d, mode);
3360 return simplify_gen_binary (MULT, mode, op0, tem);
3364 else if (SCALAR_INT_MODE_P (mode))
3366 /* 0/x is 0 (or x&0 if x has side-effects). */
3367 if (trueop0 == CONST0_RTX (mode)
3368 && !cfun->can_throw_non_call_exceptions)
3370 if (side_effects_p (op1))
3371 return simplify_gen_binary (AND, mode, op1, trueop0);
3372 return trueop0;
3374 /* x/1 is x. */
3375 if (trueop1 == CONST1_RTX (mode))
3377 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3378 if (tem)
3379 return tem;
3381 /* x/-1 is -x. */
3382 if (trueop1 == constm1_rtx)
3384 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3385 if (x)
3386 return simplify_gen_unary (NEG, mode, x, mode);
3389 break;
3391 case UMOD:
3392 /* 0%x is 0 (or x&0 if x has side-effects). */
3393 if (trueop0 == CONST0_RTX (mode))
3395 if (side_effects_p (op1))
3396 return simplify_gen_binary (AND, mode, op1, trueop0);
3397 return trueop0;
3399 /* x%1 is 0 (of x&0 if x has side-effects). */
3400 if (trueop1 == CONST1_RTX (mode))
3402 if (side_effects_p (op0))
3403 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3404 return CONST0_RTX (mode);
3406 /* Implement modulus by power of two as AND. */
3407 if (CONST_INT_P (trueop1)
3408 && exact_log2 (UINTVAL (trueop1)) > 0)
3409 return simplify_gen_binary (AND, mode, op0,
3410 gen_int_mode (UINTVAL (trueop1) - 1,
3411 mode));
3412 break;
3414 case MOD:
3415 /* 0%x is 0 (or x&0 if x has side-effects). */
3416 if (trueop0 == CONST0_RTX (mode))
3418 if (side_effects_p (op1))
3419 return simplify_gen_binary (AND, mode, op1, trueop0);
3420 return trueop0;
3422 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3423 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3425 if (side_effects_p (op0))
3426 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3427 return CONST0_RTX (mode);
3429 break;
3431 case ROTATERT:
3432 case ROTATE:
3433 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3434 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3435 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3436 amount instead. */
3437 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3438 if (CONST_INT_P (trueop1)
3439 && IN_RANGE (INTVAL (trueop1),
3440 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3441 GET_MODE_UNIT_PRECISION (mode) - 1))
3443 int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
3444 rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
3445 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3446 mode, op0, new_amount_rtx);
3448 #endif
3449 /* FALLTHRU */
3450 case ASHIFTRT:
3451 if (trueop1 == CONST0_RTX (mode))
3452 return op0;
3453 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3454 return op0;
3455 /* Rotating ~0 always results in ~0. */
3456 if (CONST_INT_P (trueop0)
3457 && HWI_COMPUTABLE_MODE_P (mode)
3458 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3459 && ! side_effects_p (op1))
3460 return op0;
3462 canonicalize_shift:
3463 /* Given:
3464 scalar modes M1, M2
3465 scalar constants c1, c2
3466 size (M2) > size (M1)
3467 c1 == size (M2) - size (M1)
3468 optimize:
3469 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3470 <low_part>)
3471 (const_int <c2>))
3473 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3474 <low_part>). */
3475 if ((code == ASHIFTRT || code == LSHIFTRT)
3476 && is_a <scalar_int_mode> (mode, &int_mode)
3477 && SUBREG_P (op0)
3478 && CONST_INT_P (op1)
3479 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3480 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3481 &inner_mode)
3482 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3483 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3484 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3485 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3486 && subreg_lowpart_p (op0))
3488 rtx tmp = gen_int_shift_amount
3489 (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
3490 tmp = simplify_gen_binary (code, inner_mode,
3491 XEXP (SUBREG_REG (op0), 0),
3492 tmp);
3493 return lowpart_subreg (int_mode, tmp, inner_mode);
3496 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3498 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3499 if (val != INTVAL (op1))
3500 return simplify_gen_binary (code, mode, op0,
3501 gen_int_shift_amount (mode, val));
3503 break;
3505 case ASHIFT:
3506 case SS_ASHIFT:
3507 case US_ASHIFT:
3508 if (trueop1 == CONST0_RTX (mode))
3509 return op0;
3510 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3511 return op0;
3512 goto canonicalize_shift;
3514 case LSHIFTRT:
3515 if (trueop1 == CONST0_RTX (mode))
3516 return op0;
3517 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3518 return op0;
3519 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3520 if (GET_CODE (op0) == CLZ
3521 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3522 && CONST_INT_P (trueop1)
3523 && STORE_FLAG_VALUE == 1
3524 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3526 unsigned HOST_WIDE_INT zero_val = 0;
3528 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3529 && zero_val == GET_MODE_PRECISION (inner_mode)
3530 && INTVAL (trueop1) == exact_log2 (zero_val))
3531 return simplify_gen_relational (EQ, mode, inner_mode,
3532 XEXP (op0, 0), const0_rtx);
3534 goto canonicalize_shift;
3536 case SMIN:
3537 if (HWI_COMPUTABLE_MODE_P (mode)
3538 && mode_signbit_p (mode, trueop1)
3539 && ! side_effects_p (op0))
3540 return op1;
3541 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3542 return op0;
3543 tem = simplify_associative_operation (code, mode, op0, op1);
3544 if (tem)
3545 return tem;
3546 break;
3548 case SMAX:
3549 if (HWI_COMPUTABLE_MODE_P (mode)
3550 && CONST_INT_P (trueop1)
3551 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3552 && ! side_effects_p (op0))
3553 return op1;
3554 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3555 return op0;
3556 tem = simplify_associative_operation (code, mode, op0, op1);
3557 if (tem)
3558 return tem;
3559 break;
3561 case UMIN:
3562 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3563 return op1;
3564 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3565 return op0;
3566 tem = simplify_associative_operation (code, mode, op0, op1);
3567 if (tem)
3568 return tem;
3569 break;
3571 case UMAX:
3572 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3573 return op1;
3574 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3575 return op0;
3576 tem = simplify_associative_operation (code, mode, op0, op1);
3577 if (tem)
3578 return tem;
3579 break;
3581 case SS_PLUS:
3582 case US_PLUS:
3583 case SS_MINUS:
3584 case US_MINUS:
3585 case SS_MULT:
3586 case US_MULT:
3587 case SS_DIV:
3588 case US_DIV:
3589 /* ??? There are simplifications that can be done. */
3590 return 0;
3592 case VEC_SERIES:
3593 if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
3594 return gen_vec_duplicate (mode, op0);
3595 if (valid_for_const_vector_p (mode, op0)
3596 && valid_for_const_vector_p (mode, op1))
3597 return gen_const_vec_series (mode, op0, op1);
3598 return 0;
3600 case VEC_SELECT:
3601 if (!VECTOR_MODE_P (mode))
3603 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3604 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3605 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3606 gcc_assert (XVECLEN (trueop1, 0) == 1);
3608 /* We can't reason about selections made at runtime. */
3609 if (!CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3610 return 0;
3612 if (vec_duplicate_p (trueop0, &elt0))
3613 return elt0;
3615 if (GET_CODE (trueop0) == CONST_VECTOR)
3616 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3617 (trueop1, 0, 0)));
3619 /* Extract a scalar element from a nested VEC_SELECT expression
3620 (with optional nested VEC_CONCAT expression). Some targets
3621 (i386) extract scalar element from a vector using chain of
3622 nested VEC_SELECT expressions. When input operand is a memory
3623 operand, this operation can be simplified to a simple scalar
3624 load from an offseted memory address. */
3625 int n_elts;
3626 if (GET_CODE (trueop0) == VEC_SELECT
3627 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
3628 .is_constant (&n_elts)))
3630 rtx op0 = XEXP (trueop0, 0);
3631 rtx op1 = XEXP (trueop0, 1);
3633 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3634 int elem;
3636 rtvec vec;
3637 rtx tmp_op, tmp;
3639 gcc_assert (GET_CODE (op1) == PARALLEL);
3640 gcc_assert (i < n_elts);
3642 /* Select element, pointed by nested selector. */
3643 elem = INTVAL (XVECEXP (op1, 0, i));
3645 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3646 if (GET_CODE (op0) == VEC_CONCAT)
3648 rtx op00 = XEXP (op0, 0);
3649 rtx op01 = XEXP (op0, 1);
3651 machine_mode mode00, mode01;
3652 int n_elts00, n_elts01;
3654 mode00 = GET_MODE (op00);
3655 mode01 = GET_MODE (op01);
3657 /* Find out the number of elements of each operand.
3658 Since the concatenated result has a constant number
3659 of elements, the operands must too. */
3660 n_elts00 = GET_MODE_NUNITS (mode00).to_constant ();
3661 n_elts01 = GET_MODE_NUNITS (mode01).to_constant ();
3663 gcc_assert (n_elts == n_elts00 + n_elts01);
3665 /* Select correct operand of VEC_CONCAT
3666 and adjust selector. */
3667 if (elem < n_elts01)
3668 tmp_op = op00;
3669 else
3671 tmp_op = op01;
3672 elem -= n_elts00;
3675 else
3676 tmp_op = op0;
3678 vec = rtvec_alloc (1);
3679 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3681 tmp = gen_rtx_fmt_ee (code, mode,
3682 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3683 return tmp;
3686 else
3688 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3689 gcc_assert (GET_MODE_INNER (mode)
3690 == GET_MODE_INNER (GET_MODE (trueop0)));
3691 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3693 if (vec_duplicate_p (trueop0, &elt0))
3694 /* It doesn't matter which elements are selected by trueop1,
3695 because they are all the same. */
3696 return gen_vec_duplicate (mode, elt0);
3698 if (GET_CODE (trueop0) == CONST_VECTOR)
3700 unsigned n_elts = XVECLEN (trueop1, 0);
3701 rtvec v = rtvec_alloc (n_elts);
3702 unsigned int i;
3704 gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
3705 for (i = 0; i < n_elts; i++)
3707 rtx x = XVECEXP (trueop1, 0, i);
3709 if (!CONST_INT_P (x))
3710 return 0;
3712 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3713 INTVAL (x));
3716 return gen_rtx_CONST_VECTOR (mode, v);
3719 /* Recognize the identity. */
3720 if (GET_MODE (trueop0) == mode)
3722 bool maybe_ident = true;
3723 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3725 rtx j = XVECEXP (trueop1, 0, i);
3726 if (!CONST_INT_P (j) || INTVAL (j) != i)
3728 maybe_ident = false;
3729 break;
3732 if (maybe_ident)
3733 return trueop0;
3736 /* If we build {a,b} then permute it, build the result directly. */
3737 if (XVECLEN (trueop1, 0) == 2
3738 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3739 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3740 && GET_CODE (trueop0) == VEC_CONCAT
3741 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3742 && GET_MODE (XEXP (trueop0, 0)) == mode
3743 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3744 && GET_MODE (XEXP (trueop0, 1)) == mode)
3746 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3747 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3748 rtx subop0, subop1;
3750 gcc_assert (i0 < 4 && i1 < 4);
3751 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3752 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3754 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3757 if (XVECLEN (trueop1, 0) == 2
3758 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3759 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3760 && GET_CODE (trueop0) == VEC_CONCAT
3761 && GET_MODE (trueop0) == mode)
3763 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3764 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3765 rtx subop0, subop1;
3767 gcc_assert (i0 < 2 && i1 < 2);
3768 subop0 = XEXP (trueop0, i0);
3769 subop1 = XEXP (trueop0, i1);
3771 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3774 /* If we select one half of a vec_concat, return that. */
3775 int l0, l1;
3776 if (GET_CODE (trueop0) == VEC_CONCAT
3777 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
3778 .is_constant (&l0))
3779 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 1)))
3780 .is_constant (&l1))
3781 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3783 rtx subop0 = XEXP (trueop0, 0);
3784 rtx subop1 = XEXP (trueop0, 1);
3785 machine_mode mode0 = GET_MODE (subop0);
3786 machine_mode mode1 = GET_MODE (subop1);
3787 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3788 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3790 bool success = true;
3791 for (int i = 1; i < l0; ++i)
3793 rtx j = XVECEXP (trueop1, 0, i);
3794 if (!CONST_INT_P (j) || INTVAL (j) != i)
3796 success = false;
3797 break;
3800 if (success)
3801 return subop0;
3803 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3805 bool success = true;
3806 for (int i = 1; i < l1; ++i)
3808 rtx j = XVECEXP (trueop1, 0, i);
3809 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3811 success = false;
3812 break;
3815 if (success)
3816 return subop1;
3821 if (XVECLEN (trueop1, 0) == 1
3822 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3823 && GET_CODE (trueop0) == VEC_CONCAT)
3825 rtx vec = trueop0;
3826 offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3828 /* Try to find the element in the VEC_CONCAT. */
3829 while (GET_MODE (vec) != mode
3830 && GET_CODE (vec) == VEC_CONCAT)
3832 poly_int64 vec_size;
3834 if (CONST_INT_P (XEXP (vec, 0)))
3836 /* vec_concat of two const_ints doesn't make sense with
3837 respect to modes. */
3838 if (CONST_INT_P (XEXP (vec, 1)))
3839 return 0;
3841 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3842 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3844 else
3845 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3847 if (known_lt (offset, vec_size))
3848 vec = XEXP (vec, 0);
3849 else if (known_ge (offset, vec_size))
3851 offset -= vec_size;
3852 vec = XEXP (vec, 1);
3854 else
3855 break;
3856 vec = avoid_constant_pool_reference (vec);
3859 if (GET_MODE (vec) == mode)
3860 return vec;
3863 /* If we select elements in a vec_merge that all come from the same
3864 operand, select from that operand directly. */
3865 if (GET_CODE (op0) == VEC_MERGE)
3867 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3868 if (CONST_INT_P (trueop02))
3870 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3871 bool all_operand0 = true;
3872 bool all_operand1 = true;
3873 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3875 rtx j = XVECEXP (trueop1, 0, i);
3876 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3877 all_operand1 = false;
3878 else
3879 all_operand0 = false;
3881 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3882 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3883 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3884 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3888 /* If we have two nested selects that are inverses of each
3889 other, replace them with the source operand. */
3890 if (GET_CODE (trueop0) == VEC_SELECT
3891 && GET_MODE (XEXP (trueop0, 0)) == mode)
3893 rtx op0_subop1 = XEXP (trueop0, 1);
3894 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3895 gcc_assert (known_eq (XVECLEN (trueop1, 0), GET_MODE_NUNITS (mode)));
3897 /* Apply the outer ordering vector to the inner one. (The inner
3898 ordering vector is expressly permitted to be of a different
3899 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3900 then the two VEC_SELECTs cancel. */
3901 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3903 rtx x = XVECEXP (trueop1, 0, i);
3904 if (!CONST_INT_P (x))
3905 return 0;
3906 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3907 if (!CONST_INT_P (y) || i != INTVAL (y))
3908 return 0;
3910 return XEXP (trueop0, 0);
3913 return 0;
3914 case VEC_CONCAT:
3916 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3917 ? GET_MODE (trueop0)
3918 : GET_MODE_INNER (mode));
3919 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3920 ? GET_MODE (trueop1)
3921 : GET_MODE_INNER (mode));
3923 gcc_assert (VECTOR_MODE_P (mode));
3924 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode)
3925 + GET_MODE_SIZE (op1_mode),
3926 GET_MODE_SIZE (mode)));
3928 if (VECTOR_MODE_P (op0_mode))
3929 gcc_assert (GET_MODE_INNER (mode)
3930 == GET_MODE_INNER (op0_mode));
3931 else
3932 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3934 if (VECTOR_MODE_P (op1_mode))
3935 gcc_assert (GET_MODE_INNER (mode)
3936 == GET_MODE_INNER (op1_mode));
3937 else
3938 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3940 unsigned int n_elts, in_n_elts;
3941 if ((GET_CODE (trueop0) == CONST_VECTOR
3942 || CONST_SCALAR_INT_P (trueop0)
3943 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3944 && (GET_CODE (trueop1) == CONST_VECTOR
3945 || CONST_SCALAR_INT_P (trueop1)
3946 || CONST_DOUBLE_AS_FLOAT_P (trueop1))
3947 && GET_MODE_NUNITS (mode).is_constant (&n_elts)
3948 && GET_MODE_NUNITS (op0_mode).is_constant (&in_n_elts))
3950 rtvec v = rtvec_alloc (n_elts);
3951 unsigned int i;
3952 for (i = 0; i < n_elts; i++)
3954 if (i < in_n_elts)
3956 if (!VECTOR_MODE_P (op0_mode))
3957 RTVEC_ELT (v, i) = trueop0;
3958 else
3959 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3961 else
3963 if (!VECTOR_MODE_P (op1_mode))
3964 RTVEC_ELT (v, i) = trueop1;
3965 else
3966 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3967 i - in_n_elts);
3971 return gen_rtx_CONST_VECTOR (mode, v);
3974 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3975 Restrict the transformation to avoid generating a VEC_SELECT with a
3976 mode unrelated to its operand. */
3977 if (GET_CODE (trueop0) == VEC_SELECT
3978 && GET_CODE (trueop1) == VEC_SELECT
3979 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3980 && GET_MODE (XEXP (trueop0, 0)) == mode)
3982 rtx par0 = XEXP (trueop0, 1);
3983 rtx par1 = XEXP (trueop1, 1);
3984 int len0 = XVECLEN (par0, 0);
3985 int len1 = XVECLEN (par1, 0);
3986 rtvec vec = rtvec_alloc (len0 + len1);
3987 for (int i = 0; i < len0; i++)
3988 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3989 for (int i = 0; i < len1; i++)
3990 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3991 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3992 gen_rtx_PARALLEL (VOIDmode, vec));
3995 return 0;
3997 default:
3998 gcc_unreachable ();
4001 if (mode == GET_MODE (op0)
4002 && mode == GET_MODE (op1)
4003 && vec_duplicate_p (op0, &elt0)
4004 && vec_duplicate_p (op1, &elt1))
4006 /* Try applying the operator to ELT and see if that simplifies.
4007 We can duplicate the result if so.
4009 The reason we don't use simplify_gen_binary is that it isn't
4010 necessarily a win to convert things like:
4012 (plus:V (vec_duplicate:V (reg:S R1))
4013 (vec_duplicate:V (reg:S R2)))
4017 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4019 The first might be done entirely in vector registers while the
4020 second might need a move between register files. */
4021 tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4022 elt0, elt1);
4023 if (tem)
4024 return gen_vec_duplicate (mode, tem);
4027 return 0;
4031 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4032 rtx op0, rtx op1)
4034 if (VECTOR_MODE_P (mode)
4035 && code != VEC_CONCAT
4036 && GET_CODE (op0) == CONST_VECTOR
4037 && GET_CODE (op1) == CONST_VECTOR)
4039 unsigned int n_elts;
4040 if (!CONST_VECTOR_NUNITS (op0).is_constant (&n_elts))
4041 return NULL_RTX;
4043 gcc_assert (known_eq (n_elts, CONST_VECTOR_NUNITS (op1)));
4044 gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
4045 rtvec v = rtvec_alloc (n_elts);
4046 unsigned int i;
4048 for (i = 0; i < n_elts; i++)
4050 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4051 CONST_VECTOR_ELT (op0, i),
4052 CONST_VECTOR_ELT (op1, i));
4053 if (!x || !valid_for_const_vector_p (mode, x))
4054 return 0;
4055 RTVEC_ELT (v, i) = x;
4058 return gen_rtx_CONST_VECTOR (mode, v);
4061 if (VECTOR_MODE_P (mode)
4062 && code == VEC_CONCAT
4063 && (CONST_SCALAR_INT_P (op0)
4064 || CONST_FIXED_P (op0)
4065 || CONST_DOUBLE_AS_FLOAT_P (op0))
4066 && (CONST_SCALAR_INT_P (op1)
4067 || CONST_DOUBLE_AS_FLOAT_P (op1)
4068 || CONST_FIXED_P (op1)))
4070 /* Both inputs have a constant number of elements, so the result
4071 must too. */
4072 unsigned n_elts = GET_MODE_NUNITS (mode).to_constant ();
4073 rtvec v = rtvec_alloc (n_elts);
4075 gcc_assert (n_elts >= 2);
4076 if (n_elts == 2)
4078 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4079 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4081 RTVEC_ELT (v, 0) = op0;
4082 RTVEC_ELT (v, 1) = op1;
4084 else
4086 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)).to_constant ();
4087 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)).to_constant ();
4088 unsigned i;
4090 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4091 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4092 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4094 for (i = 0; i < op0_n_elts; ++i)
4095 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op0, i);
4096 for (i = 0; i < op1_n_elts; ++i)
4097 RTVEC_ELT (v, op0_n_elts+i) = CONST_VECTOR_ELT (op1, i);
4100 return gen_rtx_CONST_VECTOR (mode, v);
4103 if (SCALAR_FLOAT_MODE_P (mode)
4104 && CONST_DOUBLE_AS_FLOAT_P (op0)
4105 && CONST_DOUBLE_AS_FLOAT_P (op1)
4106 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4108 if (code == AND
4109 || code == IOR
4110 || code == XOR)
4112 long tmp0[4];
4113 long tmp1[4];
4114 REAL_VALUE_TYPE r;
4115 int i;
4117 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4118 GET_MODE (op0));
4119 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4120 GET_MODE (op1));
4121 for (i = 0; i < 4; i++)
4123 switch (code)
4125 case AND:
4126 tmp0[i] &= tmp1[i];
4127 break;
4128 case IOR:
4129 tmp0[i] |= tmp1[i];
4130 break;
4131 case XOR:
4132 tmp0[i] ^= tmp1[i];
4133 break;
4134 default:
4135 gcc_unreachable ();
4138 real_from_target (&r, tmp0, mode);
4139 return const_double_from_real_value (r, mode);
4141 else
4143 REAL_VALUE_TYPE f0, f1, value, result;
4144 const REAL_VALUE_TYPE *opr0, *opr1;
4145 bool inexact;
4147 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4148 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4150 if (HONOR_SNANS (mode)
4151 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4152 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4153 return 0;
4155 real_convert (&f0, mode, opr0);
4156 real_convert (&f1, mode, opr1);
4158 if (code == DIV
4159 && real_equal (&f1, &dconst0)
4160 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4161 return 0;
4163 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4164 && flag_trapping_math
4165 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4167 int s0 = REAL_VALUE_NEGATIVE (f0);
4168 int s1 = REAL_VALUE_NEGATIVE (f1);
4170 switch (code)
4172 case PLUS:
4173 /* Inf + -Inf = NaN plus exception. */
4174 if (s0 != s1)
4175 return 0;
4176 break;
4177 case MINUS:
4178 /* Inf - Inf = NaN plus exception. */
4179 if (s0 == s1)
4180 return 0;
4181 break;
4182 case DIV:
4183 /* Inf / Inf = NaN plus exception. */
4184 return 0;
4185 default:
4186 break;
4190 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4191 && flag_trapping_math
4192 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4193 || (REAL_VALUE_ISINF (f1)
4194 && real_equal (&f0, &dconst0))))
4195 /* Inf * 0 = NaN plus exception. */
4196 return 0;
4198 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4199 &f0, &f1);
4200 real_convert (&result, mode, &value);
4202 /* Don't constant fold this floating point operation if
4203 the result has overflowed and flag_trapping_math. */
4205 if (flag_trapping_math
4206 && MODE_HAS_INFINITIES (mode)
4207 && REAL_VALUE_ISINF (result)
4208 && !REAL_VALUE_ISINF (f0)
4209 && !REAL_VALUE_ISINF (f1))
4210 /* Overflow plus exception. */
4211 return 0;
4213 /* Don't constant fold this floating point operation if the
4214 result may dependent upon the run-time rounding mode and
4215 flag_rounding_math is set, or if GCC's software emulation
4216 is unable to accurately represent the result. */
4218 if ((flag_rounding_math
4219 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4220 && (inexact || !real_identical (&result, &value)))
4221 return NULL_RTX;
4223 return const_double_from_real_value (result, mode);
4227 /* We can fold some multi-word operations. */
4228 scalar_int_mode int_mode;
4229 if (is_a <scalar_int_mode> (mode, &int_mode)
4230 && CONST_SCALAR_INT_P (op0)
4231 && CONST_SCALAR_INT_P (op1))
4233 wide_int result;
4234 wi::overflow_type overflow;
4235 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4236 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4238 #if TARGET_SUPPORTS_WIDE_INT == 0
4239 /* This assert keeps the simplification from producing a result
4240 that cannot be represented in a CONST_DOUBLE but a lot of
4241 upstream callers expect that this function never fails to
4242 simplify something and so you if you added this to the test
4243 above the code would die later anyway. If this assert
4244 happens, you just need to make the port support wide int. */
4245 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4246 #endif
4247 switch (code)
4249 case MINUS:
4250 result = wi::sub (pop0, pop1);
4251 break;
4253 case PLUS:
4254 result = wi::add (pop0, pop1);
4255 break;
4257 case MULT:
4258 result = wi::mul (pop0, pop1);
4259 break;
4261 case DIV:
4262 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4263 if (overflow)
4264 return NULL_RTX;
4265 break;
4267 case MOD:
4268 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4269 if (overflow)
4270 return NULL_RTX;
4271 break;
4273 case UDIV:
4274 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4275 if (overflow)
4276 return NULL_RTX;
4277 break;
4279 case UMOD:
4280 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4281 if (overflow)
4282 return NULL_RTX;
4283 break;
4285 case AND:
4286 result = wi::bit_and (pop0, pop1);
4287 break;
4289 case IOR:
4290 result = wi::bit_or (pop0, pop1);
4291 break;
4293 case XOR:
4294 result = wi::bit_xor (pop0, pop1);
4295 break;
4297 case SMIN:
4298 result = wi::smin (pop0, pop1);
4299 break;
4301 case SMAX:
4302 result = wi::smax (pop0, pop1);
4303 break;
4305 case UMIN:
4306 result = wi::umin (pop0, pop1);
4307 break;
4309 case UMAX:
4310 result = wi::umax (pop0, pop1);
4311 break;
4313 case LSHIFTRT:
4314 case ASHIFTRT:
4315 case ASHIFT:
4317 wide_int wop1 = pop1;
4318 if (SHIFT_COUNT_TRUNCATED)
4319 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4320 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4321 return NULL_RTX;
4323 switch (code)
4325 case LSHIFTRT:
4326 result = wi::lrshift (pop0, wop1);
4327 break;
4329 case ASHIFTRT:
4330 result = wi::arshift (pop0, wop1);
4331 break;
4333 case ASHIFT:
4334 result = wi::lshift (pop0, wop1);
4335 break;
4337 default:
4338 gcc_unreachable ();
4340 break;
4342 case ROTATE:
4343 case ROTATERT:
4345 if (wi::neg_p (pop1))
4346 return NULL_RTX;
4348 switch (code)
4350 case ROTATE:
4351 result = wi::lrotate (pop0, pop1);
4352 break;
4354 case ROTATERT:
4355 result = wi::rrotate (pop0, pop1);
4356 break;
4358 default:
4359 gcc_unreachable ();
4361 break;
4363 default:
4364 return NULL_RTX;
4366 return immed_wide_int_const (result, int_mode);
4369 /* Handle polynomial integers. */
4370 if (NUM_POLY_INT_COEFFS > 1
4371 && is_a <scalar_int_mode> (mode, &int_mode)
4372 && poly_int_rtx_p (op0)
4373 && poly_int_rtx_p (op1))
4375 poly_wide_int result;
4376 switch (code)
4378 case PLUS:
4379 result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
4380 break;
4382 case MINUS:
4383 result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
4384 break;
4386 case MULT:
4387 if (CONST_SCALAR_INT_P (op1))
4388 result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
4389 else
4390 return NULL_RTX;
4391 break;
4393 case ASHIFT:
4394 if (CONST_SCALAR_INT_P (op1))
4396 wide_int shift = rtx_mode_t (op1, mode);
4397 if (SHIFT_COUNT_TRUNCATED)
4398 shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
4399 else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
4400 return NULL_RTX;
4401 result = wi::to_poly_wide (op0, mode) << shift;
4403 else
4404 return NULL_RTX;
4405 break;
4407 case IOR:
4408 if (!CONST_SCALAR_INT_P (op1)
4409 || !can_ior_p (wi::to_poly_wide (op0, mode),
4410 rtx_mode_t (op1, mode), &result))
4411 return NULL_RTX;
4412 break;
4414 default:
4415 return NULL_RTX;
4417 return immed_wide_int_const (result, int_mode);
4420 return NULL_RTX;
4425 /* Return a positive integer if X should sort after Y. The value
4426 returned is 1 if and only if X and Y are both regs. */
4428 static int
4429 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4431 int result;
4433 result = (commutative_operand_precedence (y)
4434 - commutative_operand_precedence (x));
4435 if (result)
4436 return result + result;
4438 /* Group together equal REGs to do more simplification. */
4439 if (REG_P (x) && REG_P (y))
4440 return REGNO (x) > REGNO (y);
4442 return 0;
4445 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4446 operands may be another PLUS or MINUS.
4448 Rather than test for specific case, we do this by a brute-force method
4449 and do all possible simplifications until no more changes occur. Then
4450 we rebuild the operation.
4452 May return NULL_RTX when no changes were made. */
4454 static rtx
4455 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4456 rtx op1)
4458 struct simplify_plus_minus_op_data
4460 rtx op;
4461 short neg;
4462 } ops[16];
4463 rtx result, tem;
4464 int n_ops = 2;
4465 int changed, n_constants, canonicalized = 0;
4466 int i, j;
4468 memset (ops, 0, sizeof ops);
4470 /* Set up the two operands and then expand them until nothing has been
4471 changed. If we run out of room in our array, give up; this should
4472 almost never happen. */
4474 ops[0].op = op0;
4475 ops[0].neg = 0;
4476 ops[1].op = op1;
4477 ops[1].neg = (code == MINUS);
4481 changed = 0;
4482 n_constants = 0;
4484 for (i = 0; i < n_ops; i++)
4486 rtx this_op = ops[i].op;
4487 int this_neg = ops[i].neg;
4488 enum rtx_code this_code = GET_CODE (this_op);
4490 switch (this_code)
4492 case PLUS:
4493 case MINUS:
4494 if (n_ops == ARRAY_SIZE (ops))
4495 return NULL_RTX;
4497 ops[n_ops].op = XEXP (this_op, 1);
4498 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4499 n_ops++;
4501 ops[i].op = XEXP (this_op, 0);
4502 changed = 1;
4503 /* If this operand was negated then we will potentially
4504 canonicalize the expression. Similarly if we don't
4505 place the operands adjacent we're re-ordering the
4506 expression and thus might be performing a
4507 canonicalization. Ignore register re-ordering.
4508 ??? It might be better to shuffle the ops array here,
4509 but then (plus (plus (A, B), plus (C, D))) wouldn't
4510 be seen as non-canonical. */
4511 if (this_neg
4512 || (i != n_ops - 2
4513 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4514 canonicalized = 1;
4515 break;
4517 case NEG:
4518 ops[i].op = XEXP (this_op, 0);
4519 ops[i].neg = ! this_neg;
4520 changed = 1;
4521 canonicalized = 1;
4522 break;
4524 case CONST:
4525 if (n_ops != ARRAY_SIZE (ops)
4526 && GET_CODE (XEXP (this_op, 0)) == PLUS
4527 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4528 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4530 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4531 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4532 ops[n_ops].neg = this_neg;
4533 n_ops++;
4534 changed = 1;
4535 canonicalized = 1;
4537 break;
4539 case NOT:
4540 /* ~a -> (-a - 1) */
4541 if (n_ops != ARRAY_SIZE (ops))
4543 ops[n_ops].op = CONSTM1_RTX (mode);
4544 ops[n_ops++].neg = this_neg;
4545 ops[i].op = XEXP (this_op, 0);
4546 ops[i].neg = !this_neg;
4547 changed = 1;
4548 canonicalized = 1;
4550 break;
4552 case CONST_INT:
4553 n_constants++;
4554 if (this_neg)
4556 ops[i].op = neg_const_int (mode, this_op);
4557 ops[i].neg = 0;
4558 changed = 1;
4559 canonicalized = 1;
4561 break;
4563 default:
4564 break;
4568 while (changed);
4570 if (n_constants > 1)
4571 canonicalized = 1;
4573 gcc_assert (n_ops >= 2);
4575 /* If we only have two operands, we can avoid the loops. */
4576 if (n_ops == 2)
4578 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4579 rtx lhs, rhs;
4581 /* Get the two operands. Be careful with the order, especially for
4582 the cases where code == MINUS. */
4583 if (ops[0].neg && ops[1].neg)
4585 lhs = gen_rtx_NEG (mode, ops[0].op);
4586 rhs = ops[1].op;
4588 else if (ops[0].neg)
4590 lhs = ops[1].op;
4591 rhs = ops[0].op;
4593 else
4595 lhs = ops[0].op;
4596 rhs = ops[1].op;
4599 return simplify_const_binary_operation (code, mode, lhs, rhs);
4602 /* Now simplify each pair of operands until nothing changes. */
4603 while (1)
4605 /* Insertion sort is good enough for a small array. */
4606 for (i = 1; i < n_ops; i++)
4608 struct simplify_plus_minus_op_data save;
4609 int cmp;
4611 j = i - 1;
4612 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4613 if (cmp <= 0)
4614 continue;
4615 /* Just swapping registers doesn't count as canonicalization. */
4616 if (cmp != 1)
4617 canonicalized = 1;
4619 save = ops[i];
4621 ops[j + 1] = ops[j];
4622 while (j--
4623 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4624 ops[j + 1] = save;
4627 changed = 0;
4628 for (i = n_ops - 1; i > 0; i--)
4629 for (j = i - 1; j >= 0; j--)
4631 rtx lhs = ops[j].op, rhs = ops[i].op;
4632 int lneg = ops[j].neg, rneg = ops[i].neg;
4634 if (lhs != 0 && rhs != 0)
4636 enum rtx_code ncode = PLUS;
4638 if (lneg != rneg)
4640 ncode = MINUS;
4641 if (lneg)
4642 std::swap (lhs, rhs);
4644 else if (swap_commutative_operands_p (lhs, rhs))
4645 std::swap (lhs, rhs);
4647 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4648 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4650 rtx tem_lhs, tem_rhs;
4652 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4653 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4654 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4655 tem_rhs);
4657 if (tem && !CONSTANT_P (tem))
4658 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4660 else
4661 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4663 if (tem)
4665 /* Reject "simplifications" that just wrap the two
4666 arguments in a CONST. Failure to do so can result
4667 in infinite recursion with simplify_binary_operation
4668 when it calls us to simplify CONST operations.
4669 Also, if we find such a simplification, don't try
4670 any more combinations with this rhs: We must have
4671 something like symbol+offset, ie. one of the
4672 trivial CONST expressions we handle later. */
4673 if (GET_CODE (tem) == CONST
4674 && GET_CODE (XEXP (tem, 0)) == ncode
4675 && XEXP (XEXP (tem, 0), 0) == lhs
4676 && XEXP (XEXP (tem, 0), 1) == rhs)
4677 break;
4678 lneg &= rneg;
4679 if (GET_CODE (tem) == NEG)
4680 tem = XEXP (tem, 0), lneg = !lneg;
4681 if (CONST_INT_P (tem) && lneg)
4682 tem = neg_const_int (mode, tem), lneg = 0;
4684 ops[i].op = tem;
4685 ops[i].neg = lneg;
4686 ops[j].op = NULL_RTX;
4687 changed = 1;
4688 canonicalized = 1;
4693 if (!changed)
4694 break;
4696 /* Pack all the operands to the lower-numbered entries. */
4697 for (i = 0, j = 0; j < n_ops; j++)
4698 if (ops[j].op)
4700 ops[i] = ops[j];
4701 i++;
4703 n_ops = i;
4706 /* If nothing changed, check that rematerialization of rtl instructions
4707 is still required. */
4708 if (!canonicalized)
4710 /* Perform rematerialization if only all operands are registers and
4711 all operations are PLUS. */
4712 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4713 around rs6000 and how it uses the CA register. See PR67145. */
4714 for (i = 0; i < n_ops; i++)
4715 if (ops[i].neg
4716 || !REG_P (ops[i].op)
4717 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4718 && fixed_regs[REGNO (ops[i].op)]
4719 && !global_regs[REGNO (ops[i].op)]
4720 && ops[i].op != frame_pointer_rtx
4721 && ops[i].op != arg_pointer_rtx
4722 && ops[i].op != stack_pointer_rtx))
4723 return NULL_RTX;
4724 goto gen_result;
4727 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4728 if (n_ops == 2
4729 && CONST_INT_P (ops[1].op)
4730 && CONSTANT_P (ops[0].op)
4731 && ops[0].neg)
4732 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4734 /* We suppressed creation of trivial CONST expressions in the
4735 combination loop to avoid recursion. Create one manually now.
4736 The combination loop should have ensured that there is exactly
4737 one CONST_INT, and the sort will have ensured that it is last
4738 in the array and that any other constant will be next-to-last. */
4740 if (n_ops > 1
4741 && CONST_INT_P (ops[n_ops - 1].op)
4742 && CONSTANT_P (ops[n_ops - 2].op))
4744 rtx value = ops[n_ops - 1].op;
4745 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4746 value = neg_const_int (mode, value);
4747 if (CONST_INT_P (value))
4749 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4750 INTVAL (value));
4751 n_ops--;
4755 /* Put a non-negated operand first, if possible. */
4757 for (i = 0; i < n_ops && ops[i].neg; i++)
4758 continue;
4759 if (i == n_ops)
4760 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4761 else if (i != 0)
4763 tem = ops[0].op;
4764 ops[0] = ops[i];
4765 ops[i].op = tem;
4766 ops[i].neg = 1;
4769 /* Now make the result by performing the requested operations. */
4770 gen_result:
4771 result = ops[0].op;
4772 for (i = 1; i < n_ops; i++)
4773 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4774 mode, result, ops[i].op);
4776 return result;
4779 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4780 static bool
4781 plus_minus_operand_p (const_rtx x)
4783 return GET_CODE (x) == PLUS
4784 || GET_CODE (x) == MINUS
4785 || (GET_CODE (x) == CONST
4786 && GET_CODE (XEXP (x, 0)) == PLUS
4787 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4788 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4791 /* Like simplify_binary_operation except used for relational operators.
4792 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4793 not also be VOIDmode.
4795 CMP_MODE specifies in which mode the comparison is done in, so it is
4796 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4797 the operands or, if both are VOIDmode, the operands are compared in
4798 "infinite precision". */
4800 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4801 machine_mode cmp_mode, rtx op0, rtx op1)
4803 rtx tem, trueop0, trueop1;
4805 if (cmp_mode == VOIDmode)
4806 cmp_mode = GET_MODE (op0);
4807 if (cmp_mode == VOIDmode)
4808 cmp_mode = GET_MODE (op1);
4810 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4811 if (tem)
4813 if (SCALAR_FLOAT_MODE_P (mode))
4815 if (tem == const0_rtx)
4816 return CONST0_RTX (mode);
4817 #ifdef FLOAT_STORE_FLAG_VALUE
4819 REAL_VALUE_TYPE val;
4820 val = FLOAT_STORE_FLAG_VALUE (mode);
4821 return const_double_from_real_value (val, mode);
4823 #else
4824 return NULL_RTX;
4825 #endif
4827 if (VECTOR_MODE_P (mode))
4829 if (tem == const0_rtx)
4830 return CONST0_RTX (mode);
4831 #ifdef VECTOR_STORE_FLAG_VALUE
4833 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4834 if (val == NULL_RTX)
4835 return NULL_RTX;
4836 if (val == const1_rtx)
4837 return CONST1_RTX (mode);
4839 return gen_const_vec_duplicate (mode, val);
4841 #else
4842 return NULL_RTX;
4843 #endif
4846 return tem;
4849 /* For the following tests, ensure const0_rtx is op1. */
4850 if (swap_commutative_operands_p (op0, op1)
4851 || (op0 == const0_rtx && op1 != const0_rtx))
4852 std::swap (op0, op1), code = swap_condition (code);
4854 /* If op0 is a compare, extract the comparison arguments from it. */
4855 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4856 return simplify_gen_relational (code, mode, VOIDmode,
4857 XEXP (op0, 0), XEXP (op0, 1));
4859 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4860 || CC0_P (op0))
4861 return NULL_RTX;
4863 trueop0 = avoid_constant_pool_reference (op0);
4864 trueop1 = avoid_constant_pool_reference (op1);
4865 return simplify_relational_operation_1 (code, mode, cmp_mode,
4866 trueop0, trueop1);
4869 /* This part of simplify_relational_operation is only used when CMP_MODE
4870 is not in class MODE_CC (i.e. it is a real comparison).
4872 MODE is the mode of the result, while CMP_MODE specifies in which
4873 mode the comparison is done in, so it is the mode of the operands. */
4875 static rtx
4876 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4877 machine_mode cmp_mode, rtx op0, rtx op1)
4879 enum rtx_code op0code = GET_CODE (op0);
4881 if (op1 == const0_rtx && COMPARISON_P (op0))
4883 /* If op0 is a comparison, extract the comparison arguments
4884 from it. */
4885 if (code == NE)
4887 if (GET_MODE (op0) == mode)
4888 return simplify_rtx (op0);
4889 else
4890 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4891 XEXP (op0, 0), XEXP (op0, 1));
4893 else if (code == EQ)
4895 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4896 if (new_code != UNKNOWN)
4897 return simplify_gen_relational (new_code, mode, VOIDmode,
4898 XEXP (op0, 0), XEXP (op0, 1));
4902 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4903 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4904 if ((code == LTU || code == GEU)
4905 && GET_CODE (op0) == PLUS
4906 && CONST_INT_P (XEXP (op0, 1))
4907 && (rtx_equal_p (op1, XEXP (op0, 0))
4908 || rtx_equal_p (op1, XEXP (op0, 1)))
4909 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4910 && XEXP (op0, 1) != const0_rtx)
4912 rtx new_cmp
4913 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4914 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4915 cmp_mode, XEXP (op0, 0), new_cmp);
4918 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4919 transformed into (LTU a -C). */
4920 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4921 && CONST_INT_P (XEXP (op0, 1))
4922 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4923 && XEXP (op0, 1) != const0_rtx)
4925 rtx new_cmp
4926 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4927 return simplify_gen_relational (LTU, mode, cmp_mode,
4928 XEXP (op0, 0), new_cmp);
4931 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4932 if ((code == LTU || code == GEU)
4933 && GET_CODE (op0) == PLUS
4934 && rtx_equal_p (op1, XEXP (op0, 1))
4935 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4936 && !rtx_equal_p (op1, XEXP (op0, 0)))
4937 return simplify_gen_relational (code, mode, cmp_mode, op0,
4938 copy_rtx (XEXP (op0, 0)));
4940 if (op1 == const0_rtx)
4942 /* Canonicalize (GTU x 0) as (NE x 0). */
4943 if (code == GTU)
4944 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4945 /* Canonicalize (LEU x 0) as (EQ x 0). */
4946 if (code == LEU)
4947 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4949 else if (op1 == const1_rtx)
4951 switch (code)
4953 case GE:
4954 /* Canonicalize (GE x 1) as (GT x 0). */
4955 return simplify_gen_relational (GT, mode, cmp_mode,
4956 op0, const0_rtx);
4957 case GEU:
4958 /* Canonicalize (GEU x 1) as (NE x 0). */
4959 return simplify_gen_relational (NE, mode, cmp_mode,
4960 op0, const0_rtx);
4961 case LT:
4962 /* Canonicalize (LT x 1) as (LE x 0). */
4963 return simplify_gen_relational (LE, mode, cmp_mode,
4964 op0, const0_rtx);
4965 case LTU:
4966 /* Canonicalize (LTU x 1) as (EQ x 0). */
4967 return simplify_gen_relational (EQ, mode, cmp_mode,
4968 op0, const0_rtx);
4969 default:
4970 break;
4973 else if (op1 == constm1_rtx)
4975 /* Canonicalize (LE x -1) as (LT x 0). */
4976 if (code == LE)
4977 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4978 /* Canonicalize (GT x -1) as (GE x 0). */
4979 if (code == GT)
4980 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4983 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4984 if ((code == EQ || code == NE)
4985 && (op0code == PLUS || op0code == MINUS)
4986 && CONSTANT_P (op1)
4987 && CONSTANT_P (XEXP (op0, 1))
4988 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4990 rtx x = XEXP (op0, 0);
4991 rtx c = XEXP (op0, 1);
4992 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4993 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4995 /* Detect an infinite recursive condition, where we oscillate at this
4996 simplification case between:
4997 A + B == C <---> C - B == A,
4998 where A, B, and C are all constants with non-simplifiable expressions,
4999 usually SYMBOL_REFs. */
5000 if (GET_CODE (tem) == invcode
5001 && CONSTANT_P (x)
5002 && rtx_equal_p (c, XEXP (tem, 1)))
5003 return NULL_RTX;
5005 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
5008 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5009 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5010 scalar_int_mode int_mode, int_cmp_mode;
5011 if (code == NE
5012 && op1 == const0_rtx
5013 && is_int_mode (mode, &int_mode)
5014 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
5015 /* ??? Work-around BImode bugs in the ia64 backend. */
5016 && int_mode != BImode
5017 && int_cmp_mode != BImode
5018 && nonzero_bits (op0, int_cmp_mode) == 1
5019 && STORE_FLAG_VALUE == 1)
5020 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
5021 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
5022 : lowpart_subreg (int_mode, op0, int_cmp_mode);
5024 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5025 if ((code == EQ || code == NE)
5026 && op1 == const0_rtx
5027 && op0code == XOR)
5028 return simplify_gen_relational (code, mode, cmp_mode,
5029 XEXP (op0, 0), XEXP (op0, 1));
5031 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5032 if ((code == EQ || code == NE)
5033 && op0code == XOR
5034 && rtx_equal_p (XEXP (op0, 0), op1)
5035 && !side_effects_p (XEXP (op0, 0)))
5036 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
5037 CONST0_RTX (mode));
5039 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5040 if ((code == EQ || code == NE)
5041 && op0code == XOR
5042 && rtx_equal_p (XEXP (op0, 1), op1)
5043 && !side_effects_p (XEXP (op0, 1)))
5044 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5045 CONST0_RTX (mode));
5047 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5048 if ((code == EQ || code == NE)
5049 && op0code == XOR
5050 && CONST_SCALAR_INT_P (op1)
5051 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5052 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5053 simplify_gen_binary (XOR, cmp_mode,
5054 XEXP (op0, 1), op1));
5056 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5057 constant folding if x/y is a constant. */
5058 if ((code == EQ || code == NE)
5059 && (op0code == AND || op0code == IOR)
5060 && !side_effects_p (op1)
5061 && op1 != CONST0_RTX (cmp_mode))
5063 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5064 (eq/ne (and (not y) x) 0). */
5065 if ((op0code == AND && rtx_equal_p (XEXP (op0, 0), op1))
5066 || (op0code == IOR && rtx_equal_p (XEXP (op0, 1), op1)))
5068 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1),
5069 cmp_mode);
5070 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5072 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5073 CONST0_RTX (cmp_mode));
5076 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5077 (eq/ne (and (not x) y) 0). */
5078 if ((op0code == AND && rtx_equal_p (XEXP (op0, 1), op1))
5079 || (op0code == IOR && rtx_equal_p (XEXP (op0, 0), op1)))
5081 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0),
5082 cmp_mode);
5083 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5085 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5086 CONST0_RTX (cmp_mode));
5090 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5091 if ((code == EQ || code == NE)
5092 && GET_CODE (op0) == BSWAP
5093 && CONST_SCALAR_INT_P (op1))
5094 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5095 simplify_gen_unary (BSWAP, cmp_mode,
5096 op1, cmp_mode));
5098 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5099 if ((code == EQ || code == NE)
5100 && GET_CODE (op0) == BSWAP
5101 && GET_CODE (op1) == BSWAP)
5102 return simplify_gen_relational (code, mode, cmp_mode,
5103 XEXP (op0, 0), XEXP (op1, 0));
5105 if (op0code == POPCOUNT && op1 == const0_rtx)
5106 switch (code)
5108 case EQ:
5109 case LE:
5110 case LEU:
5111 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5112 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5113 XEXP (op0, 0), const0_rtx);
5115 case NE:
5116 case GT:
5117 case GTU:
5118 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5119 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5120 XEXP (op0, 0), const0_rtx);
5122 default:
5123 break;
5126 return NULL_RTX;
5129 enum
5131 CMP_EQ = 1,
5132 CMP_LT = 2,
5133 CMP_GT = 4,
5134 CMP_LTU = 8,
5135 CMP_GTU = 16
5139 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5140 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5141 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5142 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5143 For floating-point comparisons, assume that the operands were ordered. */
5145 static rtx
5146 comparison_result (enum rtx_code code, int known_results)
5148 switch (code)
5150 case EQ:
5151 case UNEQ:
5152 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5153 case NE:
5154 case LTGT:
5155 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5157 case LT:
5158 case UNLT:
5159 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5160 case GE:
5161 case UNGE:
5162 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5164 case GT:
5165 case UNGT:
5166 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5167 case LE:
5168 case UNLE:
5169 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5171 case LTU:
5172 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5173 case GEU:
5174 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5176 case GTU:
5177 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5178 case LEU:
5179 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5181 case ORDERED:
5182 return const_true_rtx;
5183 case UNORDERED:
5184 return const0_rtx;
5185 default:
5186 gcc_unreachable ();
5190 /* Check if the given comparison (done in the given MODE) is actually
5191 a tautology or a contradiction. If the mode is VOID_mode, the
5192 comparison is done in "infinite precision". If no simplification
5193 is possible, this function returns zero. Otherwise, it returns
5194 either const_true_rtx or const0_rtx. */
5197 simplify_const_relational_operation (enum rtx_code code,
5198 machine_mode mode,
5199 rtx op0, rtx op1)
5201 rtx tem;
5202 rtx trueop0;
5203 rtx trueop1;
5205 gcc_assert (mode != VOIDmode
5206 || (GET_MODE (op0) == VOIDmode
5207 && GET_MODE (op1) == VOIDmode));
5209 /* If op0 is a compare, extract the comparison arguments from it. */
5210 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5212 op1 = XEXP (op0, 1);
5213 op0 = XEXP (op0, 0);
5215 if (GET_MODE (op0) != VOIDmode)
5216 mode = GET_MODE (op0);
5217 else if (GET_MODE (op1) != VOIDmode)
5218 mode = GET_MODE (op1);
5219 else
5220 return 0;
5223 /* We can't simplify MODE_CC values since we don't know what the
5224 actual comparison is. */
5225 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5226 return 0;
5228 /* Make sure the constant is second. */
5229 if (swap_commutative_operands_p (op0, op1))
5231 std::swap (op0, op1);
5232 code = swap_condition (code);
5235 trueop0 = avoid_constant_pool_reference (op0);
5236 trueop1 = avoid_constant_pool_reference (op1);
5238 /* For integer comparisons of A and B maybe we can simplify A - B and can
5239 then simplify a comparison of that with zero. If A and B are both either
5240 a register or a CONST_INT, this can't help; testing for these cases will
5241 prevent infinite recursion here and speed things up.
5243 We can only do this for EQ and NE comparisons as otherwise we may
5244 lose or introduce overflow which we cannot disregard as undefined as
5245 we do not know the signedness of the operation on either the left or
5246 the right hand side of the comparison. */
5248 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5249 && (code == EQ || code == NE)
5250 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5251 && (REG_P (op1) || CONST_INT_P (trueop1)))
5252 && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
5253 /* We cannot do this if tem is a nonzero address. */
5254 && ! nonzero_address_p (tem))
5255 return simplify_const_relational_operation (signed_condition (code),
5256 mode, tem, const0_rtx);
5258 if (! HONOR_NANS (mode) && code == ORDERED)
5259 return const_true_rtx;
5261 if (! HONOR_NANS (mode) && code == UNORDERED)
5262 return const0_rtx;
5264 /* For modes without NaNs, if the two operands are equal, we know the
5265 result except if they have side-effects. Even with NaNs we know
5266 the result of unordered comparisons and, if signaling NaNs are
5267 irrelevant, also the result of LT/GT/LTGT. */
5268 if ((! HONOR_NANS (trueop0)
5269 || code == UNEQ || code == UNLE || code == UNGE
5270 || ((code == LT || code == GT || code == LTGT)
5271 && ! HONOR_SNANS (trueop0)))
5272 && rtx_equal_p (trueop0, trueop1)
5273 && ! side_effects_p (trueop0))
5274 return comparison_result (code, CMP_EQ);
5276 /* If the operands are floating-point constants, see if we can fold
5277 the result. */
5278 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5279 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5280 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5282 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5283 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5285 /* Comparisons are unordered iff at least one of the values is NaN. */
5286 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5287 switch (code)
5289 case UNEQ:
5290 case UNLT:
5291 case UNGT:
5292 case UNLE:
5293 case UNGE:
5294 case NE:
5295 case UNORDERED:
5296 return const_true_rtx;
5297 case EQ:
5298 case LT:
5299 case GT:
5300 case LE:
5301 case GE:
5302 case LTGT:
5303 case ORDERED:
5304 return const0_rtx;
5305 default:
5306 return 0;
5309 return comparison_result (code,
5310 (real_equal (d0, d1) ? CMP_EQ :
5311 real_less (d0, d1) ? CMP_LT : CMP_GT));
5314 /* Otherwise, see if the operands are both integers. */
5315 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5316 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5318 /* It would be nice if we really had a mode here. However, the
5319 largest int representable on the target is as good as
5320 infinite. */
5321 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5322 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5323 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5325 if (wi::eq_p (ptrueop0, ptrueop1))
5326 return comparison_result (code, CMP_EQ);
5327 else
5329 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5330 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5331 return comparison_result (code, cr);
5335 /* Optimize comparisons with upper and lower bounds. */
5336 scalar_int_mode int_mode;
5337 if (CONST_INT_P (trueop1)
5338 && is_a <scalar_int_mode> (mode, &int_mode)
5339 && HWI_COMPUTABLE_MODE_P (int_mode)
5340 && !side_effects_p (trueop0))
5342 int sign;
5343 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5344 HOST_WIDE_INT val = INTVAL (trueop1);
5345 HOST_WIDE_INT mmin, mmax;
5347 if (code == GEU
5348 || code == LEU
5349 || code == GTU
5350 || code == LTU)
5351 sign = 0;
5352 else
5353 sign = 1;
5355 /* Get a reduced range if the sign bit is zero. */
5356 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5358 mmin = 0;
5359 mmax = nonzero;
5361 else
5363 rtx mmin_rtx, mmax_rtx;
5364 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5366 mmin = INTVAL (mmin_rtx);
5367 mmax = INTVAL (mmax_rtx);
5368 if (sign)
5370 unsigned int sign_copies
5371 = num_sign_bit_copies (trueop0, int_mode);
5373 mmin >>= (sign_copies - 1);
5374 mmax >>= (sign_copies - 1);
5378 switch (code)
5380 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5381 case GEU:
5382 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5383 return const_true_rtx;
5384 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5385 return const0_rtx;
5386 break;
5387 case GE:
5388 if (val <= mmin)
5389 return const_true_rtx;
5390 if (val > mmax)
5391 return const0_rtx;
5392 break;
5394 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5395 case LEU:
5396 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5397 return const_true_rtx;
5398 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5399 return const0_rtx;
5400 break;
5401 case LE:
5402 if (val >= mmax)
5403 return const_true_rtx;
5404 if (val < mmin)
5405 return const0_rtx;
5406 break;
5408 case EQ:
5409 /* x == y is always false for y out of range. */
5410 if (val < mmin || val > mmax)
5411 return const0_rtx;
5412 break;
5414 /* x > y is always false for y >= mmax, always true for y < mmin. */
5415 case GTU:
5416 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5417 return const0_rtx;
5418 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5419 return const_true_rtx;
5420 break;
5421 case GT:
5422 if (val >= mmax)
5423 return const0_rtx;
5424 if (val < mmin)
5425 return const_true_rtx;
5426 break;
5428 /* x < y is always false for y <= mmin, always true for y > mmax. */
5429 case LTU:
5430 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5431 return const0_rtx;
5432 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5433 return const_true_rtx;
5434 break;
5435 case LT:
5436 if (val <= mmin)
5437 return const0_rtx;
5438 if (val > mmax)
5439 return const_true_rtx;
5440 break;
5442 case NE:
5443 /* x != y is always true for y out of range. */
5444 if (val < mmin || val > mmax)
5445 return const_true_rtx;
5446 break;
5448 default:
5449 break;
5453 /* Optimize integer comparisons with zero. */
5454 if (is_a <scalar_int_mode> (mode, &int_mode)
5455 && trueop1 == const0_rtx
5456 && !side_effects_p (trueop0))
5458 /* Some addresses are known to be nonzero. We don't know
5459 their sign, but equality comparisons are known. */
5460 if (nonzero_address_p (trueop0))
5462 if (code == EQ || code == LEU)
5463 return const0_rtx;
5464 if (code == NE || code == GTU)
5465 return const_true_rtx;
5468 /* See if the first operand is an IOR with a constant. If so, we
5469 may be able to determine the result of this comparison. */
5470 if (GET_CODE (op0) == IOR)
5472 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5473 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5475 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5476 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5477 && (UINTVAL (inner_const)
5478 & (HOST_WIDE_INT_1U
5479 << sign_bitnum)));
5481 switch (code)
5483 case EQ:
5484 case LEU:
5485 return const0_rtx;
5486 case NE:
5487 case GTU:
5488 return const_true_rtx;
5489 case LT:
5490 case LE:
5491 if (has_sign)
5492 return const_true_rtx;
5493 break;
5494 case GT:
5495 case GE:
5496 if (has_sign)
5497 return const0_rtx;
5498 break;
5499 default:
5500 break;
5506 /* Optimize comparison of ABS with zero. */
5507 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5508 && (GET_CODE (trueop0) == ABS
5509 || (GET_CODE (trueop0) == FLOAT_EXTEND
5510 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5512 switch (code)
5514 case LT:
5515 /* Optimize abs(x) < 0.0. */
5516 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5517 return const0_rtx;
5518 break;
5520 case GE:
5521 /* Optimize abs(x) >= 0.0. */
5522 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5523 return const_true_rtx;
5524 break;
5526 case UNGE:
5527 /* Optimize ! (abs(x) < 0.0). */
5528 return const_true_rtx;
5530 default:
5531 break;
5535 return 0;
5538 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5539 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5540 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5541 can be simplified to that or NULL_RTX if not.
5542 Assume X is compared against zero with CMP_CODE and the true
5543 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5545 static rtx
5546 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5548 if (cmp_code != EQ && cmp_code != NE)
5549 return NULL_RTX;
5551 /* Result on X == 0 and X !=0 respectively. */
5552 rtx on_zero, on_nonzero;
5553 if (cmp_code == EQ)
5555 on_zero = true_val;
5556 on_nonzero = false_val;
5558 else
5560 on_zero = false_val;
5561 on_nonzero = true_val;
5564 rtx_code op_code = GET_CODE (on_nonzero);
5565 if ((op_code != CLZ && op_code != CTZ)
5566 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5567 || !CONST_INT_P (on_zero))
5568 return NULL_RTX;
5570 HOST_WIDE_INT op_val;
5571 scalar_int_mode mode ATTRIBUTE_UNUSED
5572 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5573 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5574 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5575 && op_val == INTVAL (on_zero))
5576 return on_nonzero;
5578 return NULL_RTX;
5581 /* Try to simplify X given that it appears within operand OP of a
5582 VEC_MERGE operation whose mask is MASK. X need not use the same
5583 vector mode as the VEC_MERGE, but it must have the same number of
5584 elements.
5586 Return the simplified X on success, otherwise return NULL_RTX. */
5589 simplify_merge_mask (rtx x, rtx mask, int op)
5591 gcc_assert (VECTOR_MODE_P (GET_MODE (x)));
5592 poly_uint64 nunits = GET_MODE_NUNITS (GET_MODE (x));
5593 if (GET_CODE (x) == VEC_MERGE && rtx_equal_p (XEXP (x, 2), mask))
5595 if (side_effects_p (XEXP (x, 1 - op)))
5596 return NULL_RTX;
5598 return XEXP (x, op);
5600 if (UNARY_P (x)
5601 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
5602 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits))
5604 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
5605 if (top0)
5606 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), top0,
5607 GET_MODE (XEXP (x, 0)));
5609 if (BINARY_P (x)
5610 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
5611 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
5612 && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
5613 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits))
5615 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
5616 rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
5617 if (top0 || top1)
5618 return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
5619 top0 ? top0 : XEXP (x, 0),
5620 top1 ? top1 : XEXP (x, 1));
5622 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_TERNARY
5623 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
5624 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
5625 && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
5626 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits)
5627 && VECTOR_MODE_P (GET_MODE (XEXP (x, 2)))
5628 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 2))), nunits))
5630 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
5631 rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
5632 rtx top2 = simplify_merge_mask (XEXP (x, 2), mask, op);
5633 if (top0 || top1 || top2)
5634 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
5635 GET_MODE (XEXP (x, 0)),
5636 top0 ? top0 : XEXP (x, 0),
5637 top1 ? top1 : XEXP (x, 1),
5638 top2 ? top2 : XEXP (x, 2));
5640 return NULL_RTX;
5644 /* Simplify CODE, an operation with result mode MODE and three operands,
5645 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5646 a constant. Return 0 if no simplifications is possible. */
5649 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5650 machine_mode op0_mode, rtx op0, rtx op1,
5651 rtx op2)
5653 bool any_change = false;
5654 rtx tem, trueop2;
5655 scalar_int_mode int_mode, int_op0_mode;
5656 unsigned int n_elts;
5658 switch (code)
5660 case FMA:
5661 /* Simplify negations around the multiplication. */
5662 /* -a * -b + c => a * b + c. */
5663 if (GET_CODE (op0) == NEG)
5665 tem = simplify_unary_operation (NEG, mode, op1, mode);
5666 if (tem)
5667 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5669 else if (GET_CODE (op1) == NEG)
5671 tem = simplify_unary_operation (NEG, mode, op0, mode);
5672 if (tem)
5673 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5676 /* Canonicalize the two multiplication operands. */
5677 /* a * -b + c => -b * a + c. */
5678 if (swap_commutative_operands_p (op0, op1))
5679 std::swap (op0, op1), any_change = true;
5681 if (any_change)
5682 return gen_rtx_FMA (mode, op0, op1, op2);
5683 return NULL_RTX;
5685 case SIGN_EXTRACT:
5686 case ZERO_EXTRACT:
5687 if (CONST_INT_P (op0)
5688 && CONST_INT_P (op1)
5689 && CONST_INT_P (op2)
5690 && is_a <scalar_int_mode> (mode, &int_mode)
5691 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5692 && HWI_COMPUTABLE_MODE_P (int_mode))
5694 /* Extracting a bit-field from a constant */
5695 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5696 HOST_WIDE_INT op1val = INTVAL (op1);
5697 HOST_WIDE_INT op2val = INTVAL (op2);
5698 if (!BITS_BIG_ENDIAN)
5699 val >>= op2val;
5700 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5701 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5702 else
5703 /* Not enough information to calculate the bit position. */
5704 break;
5706 if (HOST_BITS_PER_WIDE_INT != op1val)
5708 /* First zero-extend. */
5709 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5710 /* If desired, propagate sign bit. */
5711 if (code == SIGN_EXTRACT
5712 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5713 != 0)
5714 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5717 return gen_int_mode (val, int_mode);
5719 break;
5721 case IF_THEN_ELSE:
5722 if (CONST_INT_P (op0))
5723 return op0 != const0_rtx ? op1 : op2;
5725 /* Convert c ? a : a into "a". */
5726 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5727 return op1;
5729 /* Convert a != b ? a : b into "a". */
5730 if (GET_CODE (op0) == NE
5731 && ! side_effects_p (op0)
5732 && ! HONOR_NANS (mode)
5733 && ! HONOR_SIGNED_ZEROS (mode)
5734 && ((rtx_equal_p (XEXP (op0, 0), op1)
5735 && rtx_equal_p (XEXP (op0, 1), op2))
5736 || (rtx_equal_p (XEXP (op0, 0), op2)
5737 && rtx_equal_p (XEXP (op0, 1), op1))))
5738 return op1;
5740 /* Convert a == b ? a : b into "b". */
5741 if (GET_CODE (op0) == EQ
5742 && ! side_effects_p (op0)
5743 && ! HONOR_NANS (mode)
5744 && ! HONOR_SIGNED_ZEROS (mode)
5745 && ((rtx_equal_p (XEXP (op0, 0), op1)
5746 && rtx_equal_p (XEXP (op0, 1), op2))
5747 || (rtx_equal_p (XEXP (op0, 0), op2)
5748 && rtx_equal_p (XEXP (op0, 1), op1))))
5749 return op2;
5751 /* Convert (!c) != {0,...,0} ? a : b into
5752 c != {0,...,0} ? b : a for vector modes. */
5753 if (VECTOR_MODE_P (GET_MODE (op1))
5754 && GET_CODE (op0) == NE
5755 && GET_CODE (XEXP (op0, 0)) == NOT
5756 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5758 rtx cv = XEXP (op0, 1);
5759 int nunits;
5760 bool ok = true;
5761 if (!CONST_VECTOR_NUNITS (cv).is_constant (&nunits))
5762 ok = false;
5763 else
5764 for (int i = 0; i < nunits; ++i)
5765 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5767 ok = false;
5768 break;
5770 if (ok)
5772 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5773 XEXP (XEXP (op0, 0), 0),
5774 XEXP (op0, 1));
5775 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5776 return retval;
5780 /* Convert x == 0 ? N : clz (x) into clz (x) when
5781 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5782 Similarly for ctz (x). */
5783 if (COMPARISON_P (op0) && !side_effects_p (op0)
5784 && XEXP (op0, 1) == const0_rtx)
5786 rtx simplified
5787 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5788 op1, op2);
5789 if (simplified)
5790 return simplified;
5793 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5795 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5796 ? GET_MODE (XEXP (op0, 1))
5797 : GET_MODE (XEXP (op0, 0)));
5798 rtx temp;
5800 /* Look for happy constants in op1 and op2. */
5801 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5803 HOST_WIDE_INT t = INTVAL (op1);
5804 HOST_WIDE_INT f = INTVAL (op2);
5806 if (t == STORE_FLAG_VALUE && f == 0)
5807 code = GET_CODE (op0);
5808 else if (t == 0 && f == STORE_FLAG_VALUE)
5810 enum rtx_code tmp;
5811 tmp = reversed_comparison_code (op0, NULL);
5812 if (tmp == UNKNOWN)
5813 break;
5814 code = tmp;
5816 else
5817 break;
5819 return simplify_gen_relational (code, mode, cmp_mode,
5820 XEXP (op0, 0), XEXP (op0, 1));
5823 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5824 cmp_mode, XEXP (op0, 0),
5825 XEXP (op0, 1));
5827 /* See if any simplifications were possible. */
5828 if (temp)
5830 if (CONST_INT_P (temp))
5831 return temp == const0_rtx ? op2 : op1;
5832 else if (temp)
5833 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5836 break;
5838 case VEC_MERGE:
5839 gcc_assert (GET_MODE (op0) == mode);
5840 gcc_assert (GET_MODE (op1) == mode);
5841 gcc_assert (VECTOR_MODE_P (mode));
5842 trueop2 = avoid_constant_pool_reference (op2);
5843 if (CONST_INT_P (trueop2)
5844 && GET_MODE_NUNITS (mode).is_constant (&n_elts))
5846 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5847 unsigned HOST_WIDE_INT mask;
5848 if (n_elts == HOST_BITS_PER_WIDE_INT)
5849 mask = -1;
5850 else
5851 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5853 if (!(sel & mask) && !side_effects_p (op0))
5854 return op1;
5855 if ((sel & mask) == mask && !side_effects_p (op1))
5856 return op0;
5858 rtx trueop0 = avoid_constant_pool_reference (op0);
5859 rtx trueop1 = avoid_constant_pool_reference (op1);
5860 if (GET_CODE (trueop0) == CONST_VECTOR
5861 && GET_CODE (trueop1) == CONST_VECTOR)
5863 rtvec v = rtvec_alloc (n_elts);
5864 unsigned int i;
5866 for (i = 0; i < n_elts; i++)
5867 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5868 ? CONST_VECTOR_ELT (trueop0, i)
5869 : CONST_VECTOR_ELT (trueop1, i));
5870 return gen_rtx_CONST_VECTOR (mode, v);
5873 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5874 if no element from a appears in the result. */
5875 if (GET_CODE (op0) == VEC_MERGE)
5877 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5878 if (CONST_INT_P (tem))
5880 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5881 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5882 return simplify_gen_ternary (code, mode, mode,
5883 XEXP (op0, 1), op1, op2);
5884 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5885 return simplify_gen_ternary (code, mode, mode,
5886 XEXP (op0, 0), op1, op2);
5889 if (GET_CODE (op1) == VEC_MERGE)
5891 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5892 if (CONST_INT_P (tem))
5894 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5895 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5896 return simplify_gen_ternary (code, mode, mode,
5897 op0, XEXP (op1, 1), op2);
5898 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5899 return simplify_gen_ternary (code, mode, mode,
5900 op0, XEXP (op1, 0), op2);
5904 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5905 with a. */
5906 if (GET_CODE (op0) == VEC_DUPLICATE
5907 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5908 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5909 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0, 0))), 1))
5911 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5912 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5914 if (XEXP (XEXP (op0, 0), 0) == op1
5915 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5916 return op1;
5919 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
5920 (const_int N))
5921 with (vec_concat (X) (B)) if N == 1 or
5922 (vec_concat (A) (X)) if N == 2. */
5923 if (GET_CODE (op0) == VEC_DUPLICATE
5924 && GET_CODE (op1) == CONST_VECTOR
5925 && known_eq (CONST_VECTOR_NUNITS (op1), 2)
5926 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
5927 && IN_RANGE (sel, 1, 2))
5929 rtx newop0 = XEXP (op0, 0);
5930 rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
5931 if (sel == 2)
5932 std::swap (newop0, newop1);
5933 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5935 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
5936 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
5937 Only applies for vectors of two elements. */
5938 if (GET_CODE (op0) == VEC_DUPLICATE
5939 && GET_CODE (op1) == VEC_CONCAT
5940 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
5941 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
5942 && IN_RANGE (sel, 1, 2))
5944 rtx newop0 = XEXP (op0, 0);
5945 rtx newop1 = XEXP (op1, 2 - sel);
5946 rtx otherop = XEXP (op1, sel - 1);
5947 if (sel == 2)
5948 std::swap (newop0, newop1);
5949 /* Don't want to throw away the other part of the vec_concat if
5950 it has side-effects. */
5951 if (!side_effects_p (otherop))
5952 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5955 /* Replace:
5957 (vec_merge:outer (vec_duplicate:outer x:inner)
5958 (subreg:outer y:inner 0)
5959 (const_int N))
5961 with (vec_concat:outer x:inner y:inner) if N == 1,
5962 or (vec_concat:outer y:inner x:inner) if N == 2.
5964 Implicitly, this means we have a paradoxical subreg, but such
5965 a check is cheap, so make it anyway.
5967 Only applies for vectors of two elements. */
5968 if (GET_CODE (op0) == VEC_DUPLICATE
5969 && GET_CODE (op1) == SUBREG
5970 && GET_MODE (op1) == GET_MODE (op0)
5971 && GET_MODE (SUBREG_REG (op1)) == GET_MODE (XEXP (op0, 0))
5972 && paradoxical_subreg_p (op1)
5973 && subreg_lowpart_p (op1)
5974 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
5975 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
5976 && IN_RANGE (sel, 1, 2))
5978 rtx newop0 = XEXP (op0, 0);
5979 rtx newop1 = SUBREG_REG (op1);
5980 if (sel == 2)
5981 std::swap (newop0, newop1);
5982 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5985 /* Same as above but with switched operands:
5986 Replace (vec_merge:outer (subreg:outer x:inner 0)
5987 (vec_duplicate:outer y:inner)
5988 (const_int N))
5990 with (vec_concat:outer x:inner y:inner) if N == 1,
5991 or (vec_concat:outer y:inner x:inner) if N == 2. */
5992 if (GET_CODE (op1) == VEC_DUPLICATE
5993 && GET_CODE (op0) == SUBREG
5994 && GET_MODE (op0) == GET_MODE (op1)
5995 && GET_MODE (SUBREG_REG (op0)) == GET_MODE (XEXP (op1, 0))
5996 && paradoxical_subreg_p (op0)
5997 && subreg_lowpart_p (op0)
5998 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
5999 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6000 && IN_RANGE (sel, 1, 2))
6002 rtx newop0 = SUBREG_REG (op0);
6003 rtx newop1 = XEXP (op1, 0);
6004 if (sel == 2)
6005 std::swap (newop0, newop1);
6006 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6009 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6010 (const_int n))
6011 with (vec_concat x y) or (vec_concat y x) depending on value
6012 of N. */
6013 if (GET_CODE (op0) == VEC_DUPLICATE
6014 && GET_CODE (op1) == VEC_DUPLICATE
6015 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6016 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6017 && IN_RANGE (sel, 1, 2))
6019 rtx newop0 = XEXP (op0, 0);
6020 rtx newop1 = XEXP (op1, 0);
6021 if (sel == 2)
6022 std::swap (newop0, newop1);
6024 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6028 if (rtx_equal_p (op0, op1)
6029 && !side_effects_p (op2) && !side_effects_p (op1))
6030 return op0;
6032 if (!side_effects_p (op2))
6034 rtx top0 = simplify_merge_mask (op0, op2, 0);
6035 rtx top1 = simplify_merge_mask (op1, op2, 1);
6036 if (top0 || top1)
6037 return simplify_gen_ternary (code, mode, mode,
6038 top0 ? top0 : op0,
6039 top1 ? top1 : op1, op2);
6042 break;
6044 default:
6045 gcc_unreachable ();
6048 return 0;
6051 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
6052 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
6053 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
6055 Works by unpacking INNER_BYTES bytes of OP into a collection of 8-bit values
6056 represented as a little-endian array of 'unsigned char', selecting by BYTE,
6057 and then repacking them again for OUTERMODE. If OP is a CONST_VECTOR,
6058 FIRST_ELEM is the number of the first element to extract, otherwise
6059 FIRST_ELEM is ignored. */
6061 static rtx
6062 simplify_immed_subreg (fixed_size_mode outermode, rtx op,
6063 machine_mode innermode, unsigned int byte,
6064 unsigned int first_elem, unsigned int inner_bytes)
6066 enum {
6067 value_bit = 8,
6068 value_mask = (1 << value_bit) - 1
6070 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
6071 int value_start;
6072 int i;
6073 int elem;
6075 int num_elem;
6076 rtx * elems;
6077 int elem_bitsize;
6078 rtx result_s = NULL;
6079 rtvec result_v = NULL;
6080 enum mode_class outer_class;
6081 scalar_mode outer_submode;
6082 int max_bitsize;
6084 /* Some ports misuse CCmode. */
6085 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
6086 return op;
6088 /* We have no way to represent a complex constant at the rtl level. */
6089 if (COMPLEX_MODE_P (outermode))
6090 return NULL_RTX;
6092 /* We support any size mode. */
6093 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
6094 inner_bytes * BITS_PER_UNIT);
6096 /* Unpack the value. */
6098 if (GET_CODE (op) == CONST_VECTOR)
6100 num_elem = CEIL (inner_bytes, GET_MODE_UNIT_SIZE (innermode));
6101 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
6103 else
6105 num_elem = 1;
6106 elem_bitsize = max_bitsize;
6108 /* If this asserts, it is too complicated; reducing value_bit may help. */
6109 gcc_assert (BITS_PER_UNIT % value_bit == 0);
6110 /* I don't know how to handle endianness of sub-units. */
6111 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
6113 for (elem = 0; elem < num_elem; elem++)
6115 unsigned char * vp;
6116 rtx el = (GET_CODE (op) == CONST_VECTOR
6117 ? CONST_VECTOR_ELT (op, first_elem + elem)
6118 : op);
6120 /* Vectors are kept in target memory order. (This is probably
6121 a mistake.) */
6123 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6124 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6125 / BITS_PER_UNIT);
6126 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6127 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6128 unsigned bytele = (subword_byte % UNITS_PER_WORD
6129 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6130 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
6133 switch (GET_CODE (el))
6135 case CONST_INT:
6136 for (i = 0;
6137 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6138 i += value_bit)
6139 *vp++ = INTVAL (el) >> i;
6140 /* CONST_INTs are always logically sign-extended. */
6141 for (; i < elem_bitsize; i += value_bit)
6142 *vp++ = INTVAL (el) < 0 ? -1 : 0;
6143 break;
6145 case CONST_WIDE_INT:
6147 rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
6148 unsigned char extend = wi::sign_mask (val);
6149 int prec = wi::get_precision (val);
6151 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
6152 *vp++ = wi::extract_uhwi (val, i, value_bit);
6153 for (; i < elem_bitsize; i += value_bit)
6154 *vp++ = extend;
6156 break;
6158 case CONST_DOUBLE:
6159 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
6161 unsigned char extend = 0;
6162 /* If this triggers, someone should have generated a
6163 CONST_INT instead. */
6164 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
6166 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6167 *vp++ = CONST_DOUBLE_LOW (el) >> i;
6168 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
6170 *vp++
6171 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
6172 i += value_bit;
6175 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
6176 extend = -1;
6177 for (; i < elem_bitsize; i += value_bit)
6178 *vp++ = extend;
6180 else
6182 /* This is big enough for anything on the platform. */
6183 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
6184 scalar_float_mode el_mode;
6186 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
6187 int bitsize = GET_MODE_BITSIZE (el_mode);
6189 gcc_assert (bitsize <= elem_bitsize);
6190 gcc_assert (bitsize % value_bit == 0);
6192 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
6193 GET_MODE (el));
6195 /* real_to_target produces its result in words affected by
6196 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6197 and use WORDS_BIG_ENDIAN instead; see the documentation
6198 of SUBREG in rtl.texi. */
6199 for (i = 0; i < bitsize; i += value_bit)
6201 int ibase;
6202 if (WORDS_BIG_ENDIAN)
6203 ibase = bitsize - 1 - i;
6204 else
6205 ibase = i;
6206 *vp++ = tmp[ibase / 32] >> i % 32;
6209 /* It shouldn't matter what's done here, so fill it with
6210 zero. */
6211 for (; i < elem_bitsize; i += value_bit)
6212 *vp++ = 0;
6214 break;
6216 case CONST_FIXED:
6217 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
6219 for (i = 0; i < elem_bitsize; i += value_bit)
6220 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6222 else
6224 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6225 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6226 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
6227 i += value_bit)
6228 *vp++ = CONST_FIXED_VALUE_HIGH (el)
6229 >> (i - HOST_BITS_PER_WIDE_INT);
6230 for (; i < elem_bitsize; i += value_bit)
6231 *vp++ = 0;
6233 break;
6235 default:
6236 gcc_unreachable ();
6240 /* Now, pick the right byte to start with. */
6241 /* Renumber BYTE so that the least-significant byte is byte 0. A special
6242 case is paradoxical SUBREGs, which shouldn't be adjusted since they
6243 will already have offset 0. */
6244 if (inner_bytes >= GET_MODE_SIZE (outermode))
6246 unsigned ibyte = inner_bytes - GET_MODE_SIZE (outermode) - byte;
6247 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6248 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6249 byte = (subword_byte % UNITS_PER_WORD
6250 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6253 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6254 so if it's become negative it will instead be very large.) */
6255 gcc_assert (byte < inner_bytes);
6257 /* Convert from bytes to chunks of size value_bit. */
6258 value_start = byte * (BITS_PER_UNIT / value_bit);
6260 /* Re-pack the value. */
6261 num_elem = GET_MODE_NUNITS (outermode);
6263 if (VECTOR_MODE_P (outermode))
6265 result_v = rtvec_alloc (num_elem);
6266 elems = &RTVEC_ELT (result_v, 0);
6268 else
6269 elems = &result_s;
6271 outer_submode = GET_MODE_INNER (outermode);
6272 outer_class = GET_MODE_CLASS (outer_submode);
6273 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
6275 gcc_assert (elem_bitsize % value_bit == 0);
6276 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
6278 for (elem = 0; elem < num_elem; elem++)
6280 unsigned char *vp;
6282 /* Vectors are stored in target memory order. (This is probably
6283 a mistake.) */
6285 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6286 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6287 / BITS_PER_UNIT);
6288 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6289 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6290 unsigned bytele = (subword_byte % UNITS_PER_WORD
6291 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6292 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
6295 switch (outer_class)
6297 case MODE_INT:
6298 case MODE_PARTIAL_INT:
6300 int u;
6301 int base = 0;
6302 int units
6303 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
6304 / HOST_BITS_PER_WIDE_INT;
6305 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
6306 wide_int r;
6308 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
6309 return NULL_RTX;
6310 for (u = 0; u < units; u++)
6312 unsigned HOST_WIDE_INT buf = 0;
6313 for (i = 0;
6314 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
6315 i += value_bit)
6316 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6318 tmp[u] = buf;
6319 base += HOST_BITS_PER_WIDE_INT;
6321 r = wide_int::from_array (tmp, units,
6322 GET_MODE_PRECISION (outer_submode));
6323 #if TARGET_SUPPORTS_WIDE_INT == 0
6324 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6325 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
6326 return NULL_RTX;
6327 #endif
6328 elems[elem] = immed_wide_int_const (r, outer_submode);
6330 break;
6332 case MODE_FLOAT:
6333 case MODE_DECIMAL_FLOAT:
6335 REAL_VALUE_TYPE r;
6336 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
6338 /* real_from_target wants its input in words affected by
6339 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6340 and use WORDS_BIG_ENDIAN instead; see the documentation
6341 of SUBREG in rtl.texi. */
6342 for (i = 0; i < elem_bitsize; i += value_bit)
6344 int ibase;
6345 if (WORDS_BIG_ENDIAN)
6346 ibase = elem_bitsize - 1 - i;
6347 else
6348 ibase = i;
6349 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
6352 real_from_target (&r, tmp, outer_submode);
6353 elems[elem] = const_double_from_real_value (r, outer_submode);
6355 break;
6357 case MODE_FRACT:
6358 case MODE_UFRACT:
6359 case MODE_ACCUM:
6360 case MODE_UACCUM:
6362 FIXED_VALUE_TYPE f;
6363 f.data.low = 0;
6364 f.data.high = 0;
6365 f.mode = outer_submode;
6367 for (i = 0;
6368 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6369 i += value_bit)
6370 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6371 for (; i < elem_bitsize; i += value_bit)
6372 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6373 << (i - HOST_BITS_PER_WIDE_INT));
6375 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6377 break;
6379 default:
6380 gcc_unreachable ();
6383 if (VECTOR_MODE_P (outermode))
6384 return gen_rtx_CONST_VECTOR (outermode, result_v);
6385 else
6386 return result_s;
6389 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6390 Return 0 if no simplifications are possible. */
6392 simplify_subreg (machine_mode outermode, rtx op,
6393 machine_mode innermode, poly_uint64 byte)
6395 /* Little bit of sanity checking. */
6396 gcc_assert (innermode != VOIDmode);
6397 gcc_assert (outermode != VOIDmode);
6398 gcc_assert (innermode != BLKmode);
6399 gcc_assert (outermode != BLKmode);
6401 gcc_assert (GET_MODE (op) == innermode
6402 || GET_MODE (op) == VOIDmode);
6404 poly_uint64 outersize = GET_MODE_SIZE (outermode);
6405 if (!multiple_p (byte, outersize))
6406 return NULL_RTX;
6408 poly_uint64 innersize = GET_MODE_SIZE (innermode);
6409 if (maybe_ge (byte, innersize))
6410 return NULL_RTX;
6412 if (outermode == innermode && known_eq (byte, 0U))
6413 return op;
6415 if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
6417 rtx elt;
6419 if (VECTOR_MODE_P (outermode)
6420 && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
6421 && vec_duplicate_p (op, &elt))
6422 return gen_vec_duplicate (outermode, elt);
6424 if (outermode == GET_MODE_INNER (innermode)
6425 && vec_duplicate_p (op, &elt))
6426 return elt;
6429 if (CONST_SCALAR_INT_P (op)
6430 || CONST_DOUBLE_AS_FLOAT_P (op)
6431 || CONST_FIXED_P (op)
6432 || GET_CODE (op) == CONST_VECTOR)
6434 /* simplify_immed_subreg deconstructs OP into bytes and constructs
6435 the result from bytes, so it only works if the sizes of the modes
6436 and the value of the offset are known at compile time. Cases that
6437 that apply to general modes and offsets should be handled here
6438 before calling simplify_immed_subreg. */
6439 fixed_size_mode fs_outermode, fs_innermode;
6440 unsigned HOST_WIDE_INT cbyte;
6441 if (is_a <fixed_size_mode> (outermode, &fs_outermode)
6442 && is_a <fixed_size_mode> (innermode, &fs_innermode)
6443 && byte.is_constant (&cbyte))
6444 return simplify_immed_subreg (fs_outermode, op, fs_innermode, cbyte,
6445 0, GET_MODE_SIZE (fs_innermode));
6447 /* Handle constant-sized outer modes and variable-sized inner modes. */
6448 unsigned HOST_WIDE_INT first_elem;
6449 if (GET_CODE (op) == CONST_VECTOR
6450 && is_a <fixed_size_mode> (outermode, &fs_outermode)
6451 && constant_multiple_p (byte, GET_MODE_UNIT_SIZE (innermode),
6452 &first_elem))
6453 return simplify_immed_subreg (fs_outermode, op, innermode, 0,
6454 first_elem,
6455 GET_MODE_SIZE (fs_outermode));
6457 return NULL_RTX;
6460 /* Changing mode twice with SUBREG => just change it once,
6461 or not at all if changing back op starting mode. */
6462 if (GET_CODE (op) == SUBREG)
6464 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6465 poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode);
6466 rtx newx;
6468 if (outermode == innermostmode
6469 && known_eq (byte, 0U)
6470 && known_eq (SUBREG_BYTE (op), 0))
6471 return SUBREG_REG (op);
6473 /* Work out the memory offset of the final OUTERMODE value relative
6474 to the inner value of OP. */
6475 poly_int64 mem_offset = subreg_memory_offset (outermode,
6476 innermode, byte);
6477 poly_int64 op_mem_offset = subreg_memory_offset (op);
6478 poly_int64 final_offset = mem_offset + op_mem_offset;
6480 /* See whether resulting subreg will be paradoxical. */
6481 if (!paradoxical_subreg_p (outermode, innermostmode))
6483 /* Bail out in case resulting subreg would be incorrect. */
6484 if (maybe_lt (final_offset, 0)
6485 || maybe_ge (poly_uint64 (final_offset), innermostsize)
6486 || !multiple_p (final_offset, outersize))
6487 return NULL_RTX;
6489 else
6491 poly_int64 required_offset = subreg_memory_offset (outermode,
6492 innermostmode, 0);
6493 if (maybe_ne (final_offset, required_offset))
6494 return NULL_RTX;
6495 /* Paradoxical subregs always have byte offset 0. */
6496 final_offset = 0;
6499 /* Recurse for further possible simplifications. */
6500 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6501 final_offset);
6502 if (newx)
6503 return newx;
6504 if (validate_subreg (outermode, innermostmode,
6505 SUBREG_REG (op), final_offset))
6507 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6508 if (SUBREG_PROMOTED_VAR_P (op)
6509 && SUBREG_PROMOTED_SIGN (op) >= 0
6510 && GET_MODE_CLASS (outermode) == MODE_INT
6511 && known_ge (outersize, innersize)
6512 && known_le (outersize, innermostsize)
6513 && subreg_lowpart_p (newx))
6515 SUBREG_PROMOTED_VAR_P (newx) = 1;
6516 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6518 return newx;
6520 return NULL_RTX;
6523 /* SUBREG of a hard register => just change the register number
6524 and/or mode. If the hard register is not valid in that mode,
6525 suppress this simplification. If the hard register is the stack,
6526 frame, or argument pointer, leave this as a SUBREG. */
6528 if (REG_P (op) && HARD_REGISTER_P (op))
6530 unsigned int regno, final_regno;
6532 regno = REGNO (op);
6533 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6534 if (HARD_REGISTER_NUM_P (final_regno))
6536 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
6537 subreg_memory_offset (outermode,
6538 innermode, byte));
6540 /* Propagate original regno. We don't have any way to specify
6541 the offset inside original regno, so do so only for lowpart.
6542 The information is used only by alias analysis that can not
6543 grog partial register anyway. */
6545 if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
6546 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6547 return x;
6551 /* If we have a SUBREG of a register that we are replacing and we are
6552 replacing it with a MEM, make a new MEM and try replacing the
6553 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6554 or if we would be widening it. */
6556 if (MEM_P (op)
6557 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6558 /* Allow splitting of volatile memory references in case we don't
6559 have instruction to move the whole thing. */
6560 && (! MEM_VOLATILE_P (op)
6561 || ! have_insn_for (SET, innermode))
6562 && known_le (outersize, innersize))
6563 return adjust_address_nv (op, outermode, byte);
6565 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6566 of two parts. */
6567 if (GET_CODE (op) == CONCAT
6568 || GET_CODE (op) == VEC_CONCAT)
6570 poly_uint64 final_offset;
6571 rtx part, res;
6573 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6574 if (part_mode == VOIDmode)
6575 part_mode = GET_MODE_INNER (GET_MODE (op));
6576 poly_uint64 part_size = GET_MODE_SIZE (part_mode);
6577 if (known_lt (byte, part_size))
6579 part = XEXP (op, 0);
6580 final_offset = byte;
6582 else if (known_ge (byte, part_size))
6584 part = XEXP (op, 1);
6585 final_offset = byte - part_size;
6587 else
6588 return NULL_RTX;
6590 if (maybe_gt (final_offset + outersize, part_size))
6591 return NULL_RTX;
6593 part_mode = GET_MODE (part);
6594 if (part_mode == VOIDmode)
6595 part_mode = GET_MODE_INNER (GET_MODE (op));
6596 res = simplify_subreg (outermode, part, part_mode, final_offset);
6597 if (res)
6598 return res;
6599 if (validate_subreg (outermode, part_mode, part, final_offset))
6600 return gen_rtx_SUBREG (outermode, part, final_offset);
6601 return NULL_RTX;
6604 /* Simplify
6605 (subreg (vec_merge (X)
6606 (vector)
6607 (const_int ((1 << N) | M)))
6608 (N * sizeof (outermode)))
6610 (subreg (X) (N * sizeof (outermode)))
6612 unsigned int idx;
6613 if (constant_multiple_p (byte, GET_MODE_SIZE (outermode), &idx)
6614 && idx < HOST_BITS_PER_WIDE_INT
6615 && GET_CODE (op) == VEC_MERGE
6616 && GET_MODE_INNER (innermode) == outermode
6617 && CONST_INT_P (XEXP (op, 2))
6618 && (UINTVAL (XEXP (op, 2)) & (HOST_WIDE_INT_1U << idx)) != 0)
6619 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode, byte);
6621 /* A SUBREG resulting from a zero extension may fold to zero if
6622 it extracts higher bits that the ZERO_EXTEND's source bits. */
6623 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6625 poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
6626 if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
6627 return CONST0_RTX (outermode);
6630 scalar_int_mode int_outermode, int_innermode;
6631 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6632 && is_a <scalar_int_mode> (innermode, &int_innermode)
6633 && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
6635 /* Handle polynomial integers. The upper bits of a paradoxical
6636 subreg are undefined, so this is safe regardless of whether
6637 we're truncating or extending. */
6638 if (CONST_POLY_INT_P (op))
6640 poly_wide_int val
6641 = poly_wide_int::from (const_poly_int_value (op),
6642 GET_MODE_PRECISION (int_outermode),
6643 SIGNED);
6644 return immed_wide_int_const (val, int_outermode);
6647 if (GET_MODE_PRECISION (int_outermode)
6648 < GET_MODE_PRECISION (int_innermode))
6650 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6651 if (tem)
6652 return tem;
6656 return NULL_RTX;
6659 /* Make a SUBREG operation or equivalent if it folds. */
6662 simplify_gen_subreg (machine_mode outermode, rtx op,
6663 machine_mode innermode, poly_uint64 byte)
6665 rtx newx;
6667 newx = simplify_subreg (outermode, op, innermode, byte);
6668 if (newx)
6669 return newx;
6671 if (GET_CODE (op) == SUBREG
6672 || GET_CODE (op) == CONCAT
6673 || GET_MODE (op) == VOIDmode)
6674 return NULL_RTX;
6676 if (validate_subreg (outermode, innermode, op, byte))
6677 return gen_rtx_SUBREG (outermode, op, byte);
6679 return NULL_RTX;
6682 /* Generates a subreg to get the least significant part of EXPR (in mode
6683 INNER_MODE) to OUTER_MODE. */
6686 lowpart_subreg (machine_mode outer_mode, rtx expr,
6687 machine_mode inner_mode)
6689 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6690 subreg_lowpart_offset (outer_mode, inner_mode));
6693 /* Simplify X, an rtx expression.
6695 Return the simplified expression or NULL if no simplifications
6696 were possible.
6698 This is the preferred entry point into the simplification routines;
6699 however, we still allow passes to call the more specific routines.
6701 Right now GCC has three (yes, three) major bodies of RTL simplification
6702 code that need to be unified.
6704 1. fold_rtx in cse.c. This code uses various CSE specific
6705 information to aid in RTL simplification.
6707 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6708 it uses combine specific information to aid in RTL
6709 simplification.
6711 3. The routines in this file.
6714 Long term we want to only have one body of simplification code; to
6715 get to that state I recommend the following steps:
6717 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6718 which are not pass dependent state into these routines.
6720 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6721 use this routine whenever possible.
6723 3. Allow for pass dependent state to be provided to these
6724 routines and add simplifications based on the pass dependent
6725 state. Remove code from cse.c & combine.c that becomes
6726 redundant/dead.
6728 It will take time, but ultimately the compiler will be easier to
6729 maintain and improve. It's totally silly that when we add a
6730 simplification that it needs to be added to 4 places (3 for RTL
6731 simplification and 1 for tree simplification. */
6734 simplify_rtx (const_rtx x)
6736 const enum rtx_code code = GET_CODE (x);
6737 const machine_mode mode = GET_MODE (x);
6739 switch (GET_RTX_CLASS (code))
6741 case RTX_UNARY:
6742 return simplify_unary_operation (code, mode,
6743 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6744 case RTX_COMM_ARITH:
6745 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6746 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6748 /* Fall through. */
6750 case RTX_BIN_ARITH:
6751 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6753 case RTX_TERNARY:
6754 case RTX_BITFIELD_OPS:
6755 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6756 XEXP (x, 0), XEXP (x, 1),
6757 XEXP (x, 2));
6759 case RTX_COMPARE:
6760 case RTX_COMM_COMPARE:
6761 return simplify_relational_operation (code, mode,
6762 ((GET_MODE (XEXP (x, 0))
6763 != VOIDmode)
6764 ? GET_MODE (XEXP (x, 0))
6765 : GET_MODE (XEXP (x, 1))),
6766 XEXP (x, 0),
6767 XEXP (x, 1));
6769 case RTX_EXTRA:
6770 if (code == SUBREG)
6771 return simplify_subreg (mode, SUBREG_REG (x),
6772 GET_MODE (SUBREG_REG (x)),
6773 SUBREG_BYTE (x));
6774 break;
6776 case RTX_OBJ:
6777 if (code == LO_SUM)
6779 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6780 if (GET_CODE (XEXP (x, 0)) == HIGH
6781 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6782 return XEXP (x, 1);
6784 break;
6786 default:
6787 break;
6789 return NULL;
6792 #if CHECKING_P
6794 namespace selftest {
6796 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6798 static rtx
6799 make_test_reg (machine_mode mode)
6801 static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
6803 return gen_rtx_REG (mode, test_reg_num++);
6806 /* Test vector simplifications involving VEC_DUPLICATE in which the
6807 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6808 register that holds one element of MODE. */
6810 static void
6811 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
6813 scalar_mode inner_mode = GET_MODE_INNER (mode);
6814 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6815 poly_uint64 nunits = GET_MODE_NUNITS (mode);
6816 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
6818 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6819 rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
6820 rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
6821 ASSERT_RTX_EQ (duplicate,
6822 simplify_unary_operation (NOT, mode,
6823 duplicate_not, mode));
6825 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6826 rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
6827 ASSERT_RTX_EQ (duplicate,
6828 simplify_unary_operation (NEG, mode,
6829 duplicate_neg, mode));
6831 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6832 ASSERT_RTX_EQ (duplicate,
6833 simplify_binary_operation (PLUS, mode, duplicate,
6834 CONST0_RTX (mode)));
6836 ASSERT_RTX_EQ (duplicate,
6837 simplify_binary_operation (MINUS, mode, duplicate,
6838 CONST0_RTX (mode)));
6840 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
6841 simplify_binary_operation (MINUS, mode, duplicate,
6842 duplicate));
6845 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6846 rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
6847 ASSERT_RTX_PTR_EQ (scalar_reg,
6848 simplify_binary_operation (VEC_SELECT, inner_mode,
6849 duplicate, zero_par));
6851 unsigned HOST_WIDE_INT const_nunits;
6852 if (nunits.is_constant (&const_nunits))
6854 /* And again with the final element. */
6855 rtx last_index = gen_int_mode (const_nunits - 1, word_mode);
6856 rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
6857 ASSERT_RTX_PTR_EQ (scalar_reg,
6858 simplify_binary_operation (VEC_SELECT, inner_mode,
6859 duplicate, last_par));
6861 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
6862 rtx vector_reg = make_test_reg (mode);
6863 for (unsigned HOST_WIDE_INT i = 0; i < const_nunits; i++)
6865 if (i >= HOST_BITS_PER_WIDE_INT)
6866 break;
6867 rtx mask = GEN_INT ((HOST_WIDE_INT_1U << i) | (i + 1));
6868 rtx vm = gen_rtx_VEC_MERGE (mode, duplicate, vector_reg, mask);
6869 poly_uint64 offset = i * GET_MODE_SIZE (inner_mode);
6870 ASSERT_RTX_EQ (scalar_reg,
6871 simplify_gen_subreg (inner_mode, vm,
6872 mode, offset));
6876 /* Test a scalar subreg of a VEC_DUPLICATE. */
6877 poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
6878 ASSERT_RTX_EQ (scalar_reg,
6879 simplify_gen_subreg (inner_mode, duplicate,
6880 mode, offset));
6882 machine_mode narrower_mode;
6883 if (maybe_ne (nunits, 2U)
6884 && multiple_p (nunits, 2)
6885 && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
6886 && VECTOR_MODE_P (narrower_mode))
6888 /* Test VEC_SELECT of a vector. */
6889 rtx vec_par
6890 = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
6891 rtx narrower_duplicate
6892 = gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
6893 ASSERT_RTX_EQ (narrower_duplicate,
6894 simplify_binary_operation (VEC_SELECT, narrower_mode,
6895 duplicate, vec_par));
6897 /* Test a vector subreg of a VEC_DUPLICATE. */
6898 poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
6899 ASSERT_RTX_EQ (narrower_duplicate,
6900 simplify_gen_subreg (narrower_mode, duplicate,
6901 mode, offset));
6905 /* Test vector simplifications involving VEC_SERIES in which the
6906 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6907 register that holds one element of MODE. */
6909 static void
6910 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
6912 /* Test unary cases with VEC_SERIES arguments. */
6913 scalar_mode inner_mode = GET_MODE_INNER (mode);
6914 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6915 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6916 rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
6917 rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
6918 rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
6919 rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
6920 rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
6921 rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
6922 neg_scalar_reg);
6923 ASSERT_RTX_EQ (series_0_r,
6924 simplify_unary_operation (NEG, mode, series_0_nr, mode));
6925 ASSERT_RTX_EQ (series_r_m1,
6926 simplify_unary_operation (NEG, mode, series_nr_1, mode));
6927 ASSERT_RTX_EQ (series_r_r,
6928 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
6930 /* Test that a VEC_SERIES with a zero step is simplified away. */
6931 ASSERT_RTX_EQ (duplicate,
6932 simplify_binary_operation (VEC_SERIES, mode,
6933 scalar_reg, const0_rtx));
6935 /* Test PLUS and MINUS with VEC_SERIES. */
6936 rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
6937 rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
6938 rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
6939 ASSERT_RTX_EQ (series_r_r,
6940 simplify_binary_operation (PLUS, mode, series_0_r,
6941 duplicate));
6942 ASSERT_RTX_EQ (series_r_1,
6943 simplify_binary_operation (PLUS, mode, duplicate,
6944 series_0_1));
6945 ASSERT_RTX_EQ (series_r_m1,
6946 simplify_binary_operation (PLUS, mode, duplicate,
6947 series_0_m1));
6948 ASSERT_RTX_EQ (series_0_r,
6949 simplify_binary_operation (MINUS, mode, series_r_r,
6950 duplicate));
6951 ASSERT_RTX_EQ (series_r_m1,
6952 simplify_binary_operation (MINUS, mode, duplicate,
6953 series_0_1));
6954 ASSERT_RTX_EQ (series_r_1,
6955 simplify_binary_operation (MINUS, mode, duplicate,
6956 series_0_m1));
6957 ASSERT_RTX_EQ (series_0_m1,
6958 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
6959 constm1_rtx));
6962 /* Verify simplify_merge_mask works correctly. */
6964 static void
6965 test_vec_merge (machine_mode mode)
6967 rtx op0 = make_test_reg (mode);
6968 rtx op1 = make_test_reg (mode);
6969 rtx op2 = make_test_reg (mode);
6970 rtx op3 = make_test_reg (mode);
6971 rtx op4 = make_test_reg (mode);
6972 rtx op5 = make_test_reg (mode);
6973 rtx mask1 = make_test_reg (SImode);
6974 rtx mask2 = make_test_reg (SImode);
6975 rtx vm1 = gen_rtx_VEC_MERGE (mode, op0, op1, mask1);
6976 rtx vm2 = gen_rtx_VEC_MERGE (mode, op2, op3, mask1);
6977 rtx vm3 = gen_rtx_VEC_MERGE (mode, op4, op5, mask1);
6979 /* Simple vec_merge. */
6980 ASSERT_EQ (op0, simplify_merge_mask (vm1, mask1, 0));
6981 ASSERT_EQ (op1, simplify_merge_mask (vm1, mask1, 1));
6982 ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 0));
6983 ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 1));
6985 /* Nested vec_merge.
6986 It's tempting to make this simplify right down to opN, but we don't
6987 because all the simplify_* functions assume that the operands have
6988 already been simplified. */
6989 rtx nvm = gen_rtx_VEC_MERGE (mode, vm1, vm2, mask1);
6990 ASSERT_EQ (vm1, simplify_merge_mask (nvm, mask1, 0));
6991 ASSERT_EQ (vm2, simplify_merge_mask (nvm, mask1, 1));
6993 /* Intermediate unary op. */
6994 rtx unop = gen_rtx_NOT (mode, vm1);
6995 ASSERT_RTX_EQ (gen_rtx_NOT (mode, op0),
6996 simplify_merge_mask (unop, mask1, 0));
6997 ASSERT_RTX_EQ (gen_rtx_NOT (mode, op1),
6998 simplify_merge_mask (unop, mask1, 1));
7000 /* Intermediate binary op. */
7001 rtx binop = gen_rtx_PLUS (mode, vm1, vm2);
7002 ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op0, op2),
7003 simplify_merge_mask (binop, mask1, 0));
7004 ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op1, op3),
7005 simplify_merge_mask (binop, mask1, 1));
7007 /* Intermediate ternary op. */
7008 rtx tenop = gen_rtx_FMA (mode, vm1, vm2, vm3);
7009 ASSERT_RTX_EQ (gen_rtx_FMA (mode, op0, op2, op4),
7010 simplify_merge_mask (tenop, mask1, 0));
7011 ASSERT_RTX_EQ (gen_rtx_FMA (mode, op1, op3, op5),
7012 simplify_merge_mask (tenop, mask1, 1));
7014 /* Side effects. */
7015 rtx badop0 = gen_rtx_PRE_INC (mode, op0);
7016 rtx badvm = gen_rtx_VEC_MERGE (mode, badop0, op1, mask1);
7017 ASSERT_EQ (badop0, simplify_merge_mask (badvm, mask1, 0));
7018 ASSERT_EQ (NULL_RTX, simplify_merge_mask (badvm, mask1, 1));
7020 /* Called indirectly. */
7021 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode, op0, op3, mask1),
7022 simplify_rtx (nvm));
7025 /* Verify some simplifications involving vectors. */
7027 static void
7028 test_vector_ops ()
7030 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
7032 machine_mode mode = (machine_mode) i;
7033 if (VECTOR_MODE_P (mode))
7035 rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
7036 test_vector_ops_duplicate (mode, scalar_reg);
7037 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
7038 && maybe_gt (GET_MODE_NUNITS (mode), 2))
7039 test_vector_ops_series (mode, scalar_reg);
7040 test_vec_merge (mode);
7045 template<unsigned int N>
7046 struct simplify_const_poly_int_tests
7048 static void run ();
7051 template<>
7052 struct simplify_const_poly_int_tests<1>
7054 static void run () {}
7057 /* Test various CONST_POLY_INT properties. */
7059 template<unsigned int N>
7060 void
7061 simplify_const_poly_int_tests<N>::run ()
7063 rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
7064 rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
7065 rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
7066 rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
7067 rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
7068 rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
7069 rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
7070 rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
7071 rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
7072 rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
7073 rtx two = GEN_INT (2);
7074 rtx six = GEN_INT (6);
7075 poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
7077 /* These tests only try limited operation combinations. Fuller arithmetic
7078 testing is done directly on poly_ints. */
7079 ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
7080 ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
7081 ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
7082 ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
7083 ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
7084 ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
7085 ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
7086 ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
7087 ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
7088 ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
7089 ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
7092 /* Run all of the selftests within this file. */
7094 void
7095 simplify_rtx_c_tests ()
7097 test_vector_ops ();
7098 simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
7101 } // namespace selftest
7103 #endif /* CHECKING_P */