poly_int: alter_reg
[official-gcc.git] / gcc / simplify-rtx.c
blob22cabc5e534a026758bcdeefbe6f9119db5c4e76
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
48 static rtx neg_const_int (machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
65 if (!HWI_COMPUTABLE_MODE_P (mode)
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80 scalar_int_mode int_mode;
82 if (!is_int_mode (mode, &int_mode))
83 return false;
85 width = GET_MODE_PRECISION (int_mode);
86 if (width == 0)
87 return false;
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x))
95 unsigned int i;
96 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 return false;
99 for (i = 0; i < elts - 1; i++)
100 if (CONST_WIDE_INT_ELT (x, i) != 0)
101 return false;
102 val = CONST_WIDE_INT_ELT (x, elts - 1);
103 width %= HOST_BITS_PER_WIDE_INT;
104 if (width == 0)
105 width = HOST_BITS_PER_WIDE_INT;
107 #else
108 else if (width <= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x)
110 && CONST_DOUBLE_LOW (x) == 0)
112 val = CONST_DOUBLE_HIGH (x);
113 width -= HOST_BITS_PER_WIDE_INT;
115 #endif
116 else
117 /* X is not an integer constant. */
118 return false;
120 if (width < HOST_BITS_PER_WIDE_INT)
121 val &= (HOST_WIDE_INT_1U << width) - 1;
122 return val == (HOST_WIDE_INT_1U << (width - 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
129 bool
130 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
132 unsigned int width;
133 scalar_int_mode int_mode;
135 if (!is_int_mode (mode, &int_mode))
136 return false;
138 width = GET_MODE_PRECISION (int_mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
142 val &= GET_MODE_MASK (int_mode);
143 return val == (HOST_WIDE_INT_1U << (width - 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148 bool
149 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
151 unsigned int width;
153 scalar_int_mode int_mode;
154 if (!is_int_mode (mode, &int_mode))
155 return false;
157 width = GET_MODE_PRECISION (int_mode);
158 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159 return false;
161 val &= HOST_WIDE_INT_1U << (width - 1);
162 return val != 0;
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
167 bool
168 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
170 unsigned int width;
172 scalar_int_mode int_mode;
173 if (!is_int_mode (mode, &int_mode))
174 return false;
176 width = GET_MODE_PRECISION (int_mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
180 val &= HOST_WIDE_INT_1U << (width - 1);
181 return val == 0;
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
191 rtx tem;
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x)
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 HOST_WIDE_INT offset = 0;
215 switch (GET_CODE (x))
217 case MEM:
218 break;
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
229 default:
230 return x;
233 if (GET_MODE (x) == BLKmode)
234 return x;
236 addr = XEXP (x, 0);
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr) == CONST
243 && GET_CODE (XEXP (addr, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
246 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247 addr = XEXP (XEXP (addr, 0), 0);
250 if (GET_CODE (addr) == LO_SUM)
251 addr = XEXP (addr, 1);
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr))
258 c = get_pool_constant (addr);
259 cmode = get_pool_mode (addr);
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset == 0 && cmode == GET_MODE (x))
265 return c;
266 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
268 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
269 if (tem && CONSTANT_P (tem))
270 return tem;
274 return x;
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
290 tree decl = MEM_EXPR (x);
291 machine_mode mode = GET_MODE (x);
292 poly_int64 offset = 0;
294 switch (TREE_CODE (decl))
296 default:
297 decl = NULL;
298 break;
300 case VAR_DECL:
301 break;
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
311 poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
312 tree toffset;
313 int unsignedp, reversep, volatilep = 0;
315 decl
316 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
317 &unsignedp, &reversep, &volatilep);
318 if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
319 || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
320 || (toffset && !poly_int_tree_p (toffset, &toffset_val)))
321 decl = NULL;
322 else
323 offset += bytepos + toffset_val;
324 break;
328 if (decl
329 && mode == GET_MODE (x)
330 && VAR_P (decl)
331 && (TREE_STATIC (decl)
332 || DECL_THREAD_LOCAL_P (decl))
333 && DECL_RTL_SET_P (decl)
334 && MEM_P (DECL_RTL (decl)))
336 rtx newx;
338 offset += MEM_OFFSET (x);
340 newx = DECL_RTL (decl);
342 if (MEM_P (newx))
344 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
345 poly_int64 n_offset, o_offset;
347 /* Avoid creating a new MEM needlessly if we already had
348 the same address. We do if there's no OFFSET and the
349 old address X is identical to NEWX, or if X is of the
350 form (plus NEWX OFFSET), or the NEWX is of the form
351 (plus Y (const_int Z)) and X is that with the offset
352 added: (plus Y (const_int Z+OFFSET)). */
353 n = strip_offset (n, &n_offset);
354 o = strip_offset (o, &o_offset);
355 if (!(known_eq (o_offset, n_offset + offset)
356 && rtx_equal_p (o, n)))
357 x = adjust_address_nv (newx, mode, offset);
359 else if (GET_MODE (x) == GET_MODE (newx)
360 && known_eq (offset, 0))
361 x = newx;
365 return x;
368 /* Make a unary operation by first seeing if it folds and otherwise making
369 the specified operation. */
372 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
373 machine_mode op_mode)
375 rtx tem;
377 /* If this simplifies, use it. */
378 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
379 return tem;
381 return gen_rtx_fmt_e (code, mode, op);
384 /* Likewise for ternary operations. */
387 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
388 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
390 rtx tem;
392 /* If this simplifies, use it. */
393 if ((tem = simplify_ternary_operation (code, mode, op0_mode,
394 op0, op1, op2)) != 0)
395 return tem;
397 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
400 /* Likewise, for relational operations.
401 CMP_MODE specifies mode comparison is done in. */
404 simplify_gen_relational (enum rtx_code code, machine_mode mode,
405 machine_mode cmp_mode, rtx op0, rtx op1)
407 rtx tem;
409 if ((tem = simplify_relational_operation (code, mode, cmp_mode,
410 op0, op1)) != 0)
411 return tem;
413 return gen_rtx_fmt_ee (code, mode, op0, op1);
416 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
417 and simplify the result. If FN is non-NULL, call this callback on each
418 X, if it returns non-NULL, replace X with its return value and simplify the
419 result. */
422 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
423 rtx (*fn) (rtx, const_rtx, void *), void *data)
425 enum rtx_code code = GET_CODE (x);
426 machine_mode mode = GET_MODE (x);
427 machine_mode op_mode;
428 const char *fmt;
429 rtx op0, op1, op2, newx, op;
430 rtvec vec, newvec;
431 int i, j;
433 if (__builtin_expect (fn != NULL, 0))
435 newx = fn (x, old_rtx, data);
436 if (newx)
437 return newx;
439 else if (rtx_equal_p (x, old_rtx))
440 return copy_rtx ((rtx) data);
442 switch (GET_RTX_CLASS (code))
444 case RTX_UNARY:
445 op0 = XEXP (x, 0);
446 op_mode = GET_MODE (op0);
447 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
448 if (op0 == XEXP (x, 0))
449 return x;
450 return simplify_gen_unary (code, mode, op0, op_mode);
452 case RTX_BIN_ARITH:
453 case RTX_COMM_ARITH:
454 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
455 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
456 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
457 return x;
458 return simplify_gen_binary (code, mode, op0, op1);
460 case RTX_COMPARE:
461 case RTX_COMM_COMPARE:
462 op0 = XEXP (x, 0);
463 op1 = XEXP (x, 1);
464 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
465 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
466 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
467 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
468 return x;
469 return simplify_gen_relational (code, mode, op_mode, op0, op1);
471 case RTX_TERNARY:
472 case RTX_BITFIELD_OPS:
473 op0 = XEXP (x, 0);
474 op_mode = GET_MODE (op0);
475 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
477 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
478 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
479 return x;
480 if (op_mode == VOIDmode)
481 op_mode = GET_MODE (op0);
482 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
484 case RTX_EXTRA:
485 if (code == SUBREG)
487 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
488 if (op0 == SUBREG_REG (x))
489 return x;
490 op0 = simplify_gen_subreg (GET_MODE (x), op0,
491 GET_MODE (SUBREG_REG (x)),
492 SUBREG_BYTE (x));
493 return op0 ? op0 : x;
495 break;
497 case RTX_OBJ:
498 if (code == MEM)
500 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
501 if (op0 == XEXP (x, 0))
502 return x;
503 return replace_equiv_address_nv (x, op0);
505 else if (code == LO_SUM)
507 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
508 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
510 /* (lo_sum (high x) y) -> y where x and y have the same base. */
511 if (GET_CODE (op0) == HIGH)
513 rtx base0, base1, offset0, offset1;
514 split_const (XEXP (op0, 0), &base0, &offset0);
515 split_const (op1, &base1, &offset1);
516 if (rtx_equal_p (base0, base1))
517 return op1;
520 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
521 return x;
522 return gen_rtx_LO_SUM (mode, op0, op1);
524 break;
526 default:
527 break;
530 newx = x;
531 fmt = GET_RTX_FORMAT (code);
532 for (i = 0; fmt[i]; i++)
533 switch (fmt[i])
535 case 'E':
536 vec = XVEC (x, i);
537 newvec = XVEC (newx, i);
538 for (j = 0; j < GET_NUM_ELEM (vec); j++)
540 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
541 old_rtx, fn, data);
542 if (op != RTVEC_ELT (vec, j))
544 if (newvec == vec)
546 newvec = shallow_copy_rtvec (vec);
547 if (x == newx)
548 newx = shallow_copy_rtx (x);
549 XVEC (newx, i) = newvec;
551 RTVEC_ELT (newvec, j) = op;
554 break;
556 case 'e':
557 if (XEXP (x, i))
559 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
560 if (op != XEXP (x, i))
562 if (x == newx)
563 newx = shallow_copy_rtx (x);
564 XEXP (newx, i) = op;
567 break;
569 return newx;
572 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
573 resulting RTX. Return a new RTX which is as simplified as possible. */
576 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
578 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
581 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
582 Only handle cases where the truncated value is inherently an rvalue.
584 RTL provides two ways of truncating a value:
586 1. a lowpart subreg. This form is only a truncation when both
587 the outer and inner modes (here MODE and OP_MODE respectively)
588 are scalar integers, and only then when the subreg is used as
589 an rvalue.
591 It is only valid to form such truncating subregs if the
592 truncation requires no action by the target. The onus for
593 proving this is on the creator of the subreg -- e.g. the
594 caller to simplify_subreg or simplify_gen_subreg -- and typically
595 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
597 2. a TRUNCATE. This form handles both scalar and compound integers.
599 The first form is preferred where valid. However, the TRUNCATE
600 handling in simplify_unary_operation turns the second form into the
601 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
602 so it is generally safe to form rvalue truncations using:
604 simplify_gen_unary (TRUNCATE, ...)
606 and leave simplify_unary_operation to work out which representation
607 should be used.
609 Because of the proof requirements on (1), simplify_truncation must
610 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
611 regardless of whether the outer truncation came from a SUBREG or a
612 TRUNCATE. For example, if the caller has proven that an SImode
613 truncation of:
615 (and:DI X Y)
617 is a no-op and can be represented as a subreg, it does not follow
618 that SImode truncations of X and Y are also no-ops. On a target
619 like 64-bit MIPS that requires SImode values to be stored in
620 sign-extended form, an SImode truncation of:
622 (and:DI (reg:DI X) (const_int 63))
624 is trivially a no-op because only the lower 6 bits can be set.
625 However, X is still an arbitrary 64-bit number and so we cannot
626 assume that truncating it too is a no-op. */
628 static rtx
629 simplify_truncation (machine_mode mode, rtx op,
630 machine_mode op_mode)
632 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
633 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
634 scalar_int_mode int_mode, int_op_mode, subreg_mode;
636 gcc_assert (precision <= op_precision);
638 /* Optimize truncations of zero and sign extended values. */
639 if (GET_CODE (op) == ZERO_EXTEND
640 || GET_CODE (op) == SIGN_EXTEND)
642 /* There are three possibilities. If MODE is the same as the
643 origmode, we can omit both the extension and the subreg.
644 If MODE is not larger than the origmode, we can apply the
645 truncation without the extension. Finally, if the outermode
646 is larger than the origmode, we can just extend to the appropriate
647 mode. */
648 machine_mode origmode = GET_MODE (XEXP (op, 0));
649 if (mode == origmode)
650 return XEXP (op, 0);
651 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
652 return simplify_gen_unary (TRUNCATE, mode,
653 XEXP (op, 0), origmode);
654 else
655 return simplify_gen_unary (GET_CODE (op), mode,
656 XEXP (op, 0), origmode);
659 /* If the machine can perform operations in the truncated mode, distribute
660 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
661 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
662 if (1
663 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
664 && (GET_CODE (op) == PLUS
665 || GET_CODE (op) == MINUS
666 || GET_CODE (op) == MULT))
668 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
669 if (op0)
671 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
672 if (op1)
673 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
677 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
678 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
679 the outer subreg is effectively a truncation to the original mode. */
680 if ((GET_CODE (op) == LSHIFTRT
681 || GET_CODE (op) == ASHIFTRT)
682 /* Ensure that OP_MODE is at least twice as wide as MODE
683 to avoid the possibility that an outer LSHIFTRT shifts by more
684 than the sign extension's sign_bit_copies and introduces zeros
685 into the high bits of the result. */
686 && 2 * precision <= op_precision
687 && CONST_INT_P (XEXP (op, 1))
688 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
689 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
690 && UINTVAL (XEXP (op, 1)) < precision)
691 return simplify_gen_binary (ASHIFTRT, mode,
692 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
694 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
695 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
696 the outer subreg is effectively a truncation to the original mode. */
697 if ((GET_CODE (op) == LSHIFTRT
698 || GET_CODE (op) == ASHIFTRT)
699 && CONST_INT_P (XEXP (op, 1))
700 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
701 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
702 && UINTVAL (XEXP (op, 1)) < precision)
703 return simplify_gen_binary (LSHIFTRT, mode,
704 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
706 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
707 to (ashift:QI (x:QI) C), where C is a suitable small constant and
708 the outer subreg is effectively a truncation to the original mode. */
709 if (GET_CODE (op) == ASHIFT
710 && CONST_INT_P (XEXP (op, 1))
711 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
712 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
713 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
714 && UINTVAL (XEXP (op, 1)) < precision)
715 return simplify_gen_binary (ASHIFT, mode,
716 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
718 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
719 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
720 and C2. */
721 if (GET_CODE (op) == AND
722 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
723 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
724 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
725 && CONST_INT_P (XEXP (op, 1)))
727 rtx op0 = (XEXP (XEXP (op, 0), 0));
728 rtx shift_op = XEXP (XEXP (op, 0), 1);
729 rtx mask_op = XEXP (op, 1);
730 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
731 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
733 if (shift < precision
734 /* If doing this transform works for an X with all bits set,
735 it works for any X. */
736 && ((GET_MODE_MASK (mode) >> shift) & mask)
737 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
738 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
739 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
741 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
742 return simplify_gen_binary (AND, mode, op0, mask_op);
746 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
747 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
748 changing len. */
749 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
750 && REG_P (XEXP (op, 0))
751 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
752 && CONST_INT_P (XEXP (op, 1))
753 && CONST_INT_P (XEXP (op, 2)))
755 rtx op0 = XEXP (op, 0);
756 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
757 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
758 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
760 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
761 if (op0)
763 pos -= op_precision - precision;
764 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
765 XEXP (op, 1), GEN_INT (pos));
768 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
770 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
771 if (op0)
772 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
773 XEXP (op, 1), XEXP (op, 2));
777 /* Recognize a word extraction from a multi-word subreg. */
778 if ((GET_CODE (op) == LSHIFTRT
779 || GET_CODE (op) == ASHIFTRT)
780 && SCALAR_INT_MODE_P (mode)
781 && SCALAR_INT_MODE_P (op_mode)
782 && precision >= BITS_PER_WORD
783 && 2 * precision <= op_precision
784 && CONST_INT_P (XEXP (op, 1))
785 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
786 && UINTVAL (XEXP (op, 1)) < op_precision)
788 poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
789 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
790 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
791 (WORDS_BIG_ENDIAN
792 ? byte - shifted_bytes
793 : byte + shifted_bytes));
796 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
797 and try replacing the TRUNCATE and shift with it. Don't do this
798 if the MEM has a mode-dependent address. */
799 if ((GET_CODE (op) == LSHIFTRT
800 || GET_CODE (op) == ASHIFTRT)
801 && is_a <scalar_int_mode> (mode, &int_mode)
802 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
803 && MEM_P (XEXP (op, 0))
804 && CONST_INT_P (XEXP (op, 1))
805 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
806 && INTVAL (XEXP (op, 1)) > 0
807 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
808 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
809 MEM_ADDR_SPACE (XEXP (op, 0)))
810 && ! MEM_VOLATILE_P (XEXP (op, 0))
811 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
812 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
814 poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
815 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
816 return adjust_address_nv (XEXP (op, 0), int_mode,
817 (WORDS_BIG_ENDIAN
818 ? byte - shifted_bytes
819 : byte + shifted_bytes));
822 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
823 (OP:SI foo:SI) if OP is NEG or ABS. */
824 if ((GET_CODE (op) == ABS
825 || GET_CODE (op) == NEG)
826 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
827 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
828 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
829 return simplify_gen_unary (GET_CODE (op), mode,
830 XEXP (XEXP (op, 0), 0), mode);
832 /* (truncate:A (subreg:B (truncate:C X) 0)) is
833 (truncate:A X). */
834 if (GET_CODE (op) == SUBREG
835 && is_a <scalar_int_mode> (mode, &int_mode)
836 && SCALAR_INT_MODE_P (op_mode)
837 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
838 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
839 && subreg_lowpart_p (op))
841 rtx inner = XEXP (SUBREG_REG (op), 0);
842 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
843 return simplify_gen_unary (TRUNCATE, int_mode, inner,
844 GET_MODE (inner));
845 else
846 /* If subreg above is paradoxical and C is narrower
847 than A, return (subreg:A (truncate:C X) 0). */
848 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
851 /* (truncate:A (truncate:B X)) is (truncate:A X). */
852 if (GET_CODE (op) == TRUNCATE)
853 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
854 GET_MODE (XEXP (op, 0)));
856 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
857 in mode A. */
858 if (GET_CODE (op) == IOR
859 && SCALAR_INT_MODE_P (mode)
860 && SCALAR_INT_MODE_P (op_mode)
861 && CONST_INT_P (XEXP (op, 1))
862 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
863 return constm1_rtx;
865 return NULL_RTX;
868 /* Try to simplify a unary operation CODE whose output mode is to be
869 MODE with input operand OP whose mode was originally OP_MODE.
870 Return zero if no simplification can be made. */
872 simplify_unary_operation (enum rtx_code code, machine_mode mode,
873 rtx op, machine_mode op_mode)
875 rtx trueop, tem;
877 trueop = avoid_constant_pool_reference (op);
879 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
880 if (tem)
881 return tem;
883 return simplify_unary_operation_1 (code, mode, op);
886 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
887 to be exact. */
889 static bool
890 exact_int_to_float_conversion_p (const_rtx op)
892 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
893 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
894 /* Constants shouldn't reach here. */
895 gcc_assert (op0_mode != VOIDmode);
896 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
897 int in_bits = in_prec;
898 if (HWI_COMPUTABLE_MODE_P (op0_mode))
900 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
901 if (GET_CODE (op) == FLOAT)
902 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
903 else if (GET_CODE (op) == UNSIGNED_FLOAT)
904 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
905 else
906 gcc_unreachable ();
907 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
909 return in_bits <= out_bits;
912 /* Perform some simplifications we can do even if the operands
913 aren't constant. */
914 static rtx
915 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
917 enum rtx_code reversed;
918 rtx temp, elt, base, step;
919 scalar_int_mode inner, int_mode, op_mode, op0_mode;
921 switch (code)
923 case NOT:
924 /* (not (not X)) == X. */
925 if (GET_CODE (op) == NOT)
926 return XEXP (op, 0);
928 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
929 comparison is all ones. */
930 if (COMPARISON_P (op)
931 && (mode == BImode || STORE_FLAG_VALUE == -1)
932 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
933 return simplify_gen_relational (reversed, mode, VOIDmode,
934 XEXP (op, 0), XEXP (op, 1));
936 /* (not (plus X -1)) can become (neg X). */
937 if (GET_CODE (op) == PLUS
938 && XEXP (op, 1) == constm1_rtx)
939 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
941 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
942 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
943 and MODE_VECTOR_INT. */
944 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
945 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
946 CONSTM1_RTX (mode));
948 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
949 if (GET_CODE (op) == XOR
950 && CONST_INT_P (XEXP (op, 1))
951 && (temp = simplify_unary_operation (NOT, mode,
952 XEXP (op, 1), mode)) != 0)
953 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
955 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
956 if (GET_CODE (op) == PLUS
957 && CONST_INT_P (XEXP (op, 1))
958 && mode_signbit_p (mode, XEXP (op, 1))
959 && (temp = simplify_unary_operation (NOT, mode,
960 XEXP (op, 1), mode)) != 0)
961 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
964 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
965 operands other than 1, but that is not valid. We could do a
966 similar simplification for (not (lshiftrt C X)) where C is
967 just the sign bit, but this doesn't seem common enough to
968 bother with. */
969 if (GET_CODE (op) == ASHIFT
970 && XEXP (op, 0) == const1_rtx)
972 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
973 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
976 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
977 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
978 so we can perform the above simplification. */
979 if (STORE_FLAG_VALUE == -1
980 && is_a <scalar_int_mode> (mode, &int_mode)
981 && GET_CODE (op) == ASHIFTRT
982 && CONST_INT_P (XEXP (op, 1))
983 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
984 return simplify_gen_relational (GE, int_mode, VOIDmode,
985 XEXP (op, 0), const0_rtx);
988 if (partial_subreg_p (op)
989 && subreg_lowpart_p (op)
990 && GET_CODE (SUBREG_REG (op)) == ASHIFT
991 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
993 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
994 rtx x;
996 x = gen_rtx_ROTATE (inner_mode,
997 simplify_gen_unary (NOT, inner_mode, const1_rtx,
998 inner_mode),
999 XEXP (SUBREG_REG (op), 1));
1000 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1001 if (temp)
1002 return temp;
1005 /* Apply De Morgan's laws to reduce number of patterns for machines
1006 with negating logical insns (and-not, nand, etc.). If result has
1007 only one NOT, put it first, since that is how the patterns are
1008 coded. */
1009 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1011 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1012 machine_mode op_mode;
1014 op_mode = GET_MODE (in1);
1015 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1017 op_mode = GET_MODE (in2);
1018 if (op_mode == VOIDmode)
1019 op_mode = mode;
1020 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1022 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1023 std::swap (in1, in2);
1025 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1026 mode, in1, in2);
1029 /* (not (bswap x)) -> (bswap (not x)). */
1030 if (GET_CODE (op) == BSWAP)
1032 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1033 return simplify_gen_unary (BSWAP, mode, x, mode);
1035 break;
1037 case NEG:
1038 /* (neg (neg X)) == X. */
1039 if (GET_CODE (op) == NEG)
1040 return XEXP (op, 0);
1042 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1043 If comparison is not reversible use
1044 x ? y : (neg y). */
1045 if (GET_CODE (op) == IF_THEN_ELSE)
1047 rtx cond = XEXP (op, 0);
1048 rtx true_rtx = XEXP (op, 1);
1049 rtx false_rtx = XEXP (op, 2);
1051 if ((GET_CODE (true_rtx) == NEG
1052 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1053 || (GET_CODE (false_rtx) == NEG
1054 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1056 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1057 temp = reversed_comparison (cond, mode);
1058 else
1060 temp = cond;
1061 std::swap (true_rtx, false_rtx);
1063 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1064 mode, temp, true_rtx, false_rtx);
1068 /* (neg (plus X 1)) can become (not X). */
1069 if (GET_CODE (op) == PLUS
1070 && XEXP (op, 1) == const1_rtx)
1071 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1073 /* Similarly, (neg (not X)) is (plus X 1). */
1074 if (GET_CODE (op) == NOT)
1075 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1076 CONST1_RTX (mode));
1078 /* (neg (minus X Y)) can become (minus Y X). This transformation
1079 isn't safe for modes with signed zeros, since if X and Y are
1080 both +0, (minus Y X) is the same as (minus X Y). If the
1081 rounding mode is towards +infinity (or -infinity) then the two
1082 expressions will be rounded differently. */
1083 if (GET_CODE (op) == MINUS
1084 && !HONOR_SIGNED_ZEROS (mode)
1085 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1086 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1088 if (GET_CODE (op) == PLUS
1089 && !HONOR_SIGNED_ZEROS (mode)
1090 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1092 /* (neg (plus A C)) is simplified to (minus -C A). */
1093 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1094 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1096 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1097 if (temp)
1098 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1101 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1102 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1103 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1106 /* (neg (mult A B)) becomes (mult A (neg B)).
1107 This works even for floating-point values. */
1108 if (GET_CODE (op) == MULT
1109 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1111 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1112 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1115 /* NEG commutes with ASHIFT since it is multiplication. Only do
1116 this if we can then eliminate the NEG (e.g., if the operand
1117 is a constant). */
1118 if (GET_CODE (op) == ASHIFT)
1120 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1121 if (temp)
1122 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1125 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1126 C is equal to the width of MODE minus 1. */
1127 if (GET_CODE (op) == ASHIFTRT
1128 && CONST_INT_P (XEXP (op, 1))
1129 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1130 return simplify_gen_binary (LSHIFTRT, mode,
1131 XEXP (op, 0), XEXP (op, 1));
1133 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1134 C is equal to the width of MODE minus 1. */
1135 if (GET_CODE (op) == LSHIFTRT
1136 && CONST_INT_P (XEXP (op, 1))
1137 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1138 return simplify_gen_binary (ASHIFTRT, mode,
1139 XEXP (op, 0), XEXP (op, 1));
1141 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1142 if (GET_CODE (op) == XOR
1143 && XEXP (op, 1) == const1_rtx
1144 && nonzero_bits (XEXP (op, 0), mode) == 1)
1145 return plus_constant (mode, XEXP (op, 0), -1);
1147 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1148 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1149 if (GET_CODE (op) == LT
1150 && XEXP (op, 1) == const0_rtx
1151 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1153 int_mode = as_a <scalar_int_mode> (mode);
1154 int isize = GET_MODE_PRECISION (inner);
1155 if (STORE_FLAG_VALUE == 1)
1157 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1158 gen_int_shift_amount (inner,
1159 isize - 1));
1160 if (int_mode == inner)
1161 return temp;
1162 if (GET_MODE_PRECISION (int_mode) > isize)
1163 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1164 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1166 else if (STORE_FLAG_VALUE == -1)
1168 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1169 gen_int_shift_amount (inner,
1170 isize - 1));
1171 if (int_mode == inner)
1172 return temp;
1173 if (GET_MODE_PRECISION (int_mode) > isize)
1174 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1175 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1179 if (vec_series_p (op, &base, &step))
1181 /* Only create a new series if we can simplify both parts. In other
1182 cases this isn't really a simplification, and it's not necessarily
1183 a win to replace a vector operation with a scalar operation. */
1184 scalar_mode inner_mode = GET_MODE_INNER (mode);
1185 base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1186 if (base)
1188 step = simplify_unary_operation (NEG, inner_mode,
1189 step, inner_mode);
1190 if (step)
1191 return gen_vec_series (mode, base, step);
1194 break;
1196 case TRUNCATE:
1197 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1198 with the umulXi3_highpart patterns. */
1199 if (GET_CODE (op) == LSHIFTRT
1200 && GET_CODE (XEXP (op, 0)) == MULT)
1201 break;
1203 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1205 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1207 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1208 if (temp)
1209 return temp;
1211 /* We can't handle truncation to a partial integer mode here
1212 because we don't know the real bitsize of the partial
1213 integer mode. */
1214 break;
1217 if (GET_MODE (op) != VOIDmode)
1219 temp = simplify_truncation (mode, op, GET_MODE (op));
1220 if (temp)
1221 return temp;
1224 /* If we know that the value is already truncated, we can
1225 replace the TRUNCATE with a SUBREG. */
1226 if (GET_MODE_NUNITS (mode) == 1
1227 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1228 || truncated_to_mode (mode, op)))
1230 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1231 if (temp)
1232 return temp;
1235 /* A truncate of a comparison can be replaced with a subreg if
1236 STORE_FLAG_VALUE permits. This is like the previous test,
1237 but it works even if the comparison is done in a mode larger
1238 than HOST_BITS_PER_WIDE_INT. */
1239 if (HWI_COMPUTABLE_MODE_P (mode)
1240 && COMPARISON_P (op)
1241 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1243 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1244 if (temp)
1245 return temp;
1248 /* A truncate of a memory is just loading the low part of the memory
1249 if we are not changing the meaning of the address. */
1250 if (GET_CODE (op) == MEM
1251 && !VECTOR_MODE_P (mode)
1252 && !MEM_VOLATILE_P (op)
1253 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1255 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1256 if (temp)
1257 return temp;
1260 break;
1262 case FLOAT_TRUNCATE:
1263 if (DECIMAL_FLOAT_MODE_P (mode))
1264 break;
1266 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1267 if (GET_CODE (op) == FLOAT_EXTEND
1268 && GET_MODE (XEXP (op, 0)) == mode)
1269 return XEXP (op, 0);
1271 /* (float_truncate:SF (float_truncate:DF foo:XF))
1272 = (float_truncate:SF foo:XF).
1273 This may eliminate double rounding, so it is unsafe.
1275 (float_truncate:SF (float_extend:XF foo:DF))
1276 = (float_truncate:SF foo:DF).
1278 (float_truncate:DF (float_extend:XF foo:SF))
1279 = (float_extend:DF foo:SF). */
1280 if ((GET_CODE (op) == FLOAT_TRUNCATE
1281 && flag_unsafe_math_optimizations)
1282 || GET_CODE (op) == FLOAT_EXTEND)
1283 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1284 > GET_MODE_UNIT_SIZE (mode)
1285 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1286 mode,
1287 XEXP (op, 0), mode);
1289 /* (float_truncate (float x)) is (float x) */
1290 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1291 && (flag_unsafe_math_optimizations
1292 || exact_int_to_float_conversion_p (op)))
1293 return simplify_gen_unary (GET_CODE (op), mode,
1294 XEXP (op, 0),
1295 GET_MODE (XEXP (op, 0)));
1297 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1298 (OP:SF foo:SF) if OP is NEG or ABS. */
1299 if ((GET_CODE (op) == ABS
1300 || GET_CODE (op) == NEG)
1301 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1302 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1303 return simplify_gen_unary (GET_CODE (op), mode,
1304 XEXP (XEXP (op, 0), 0), mode);
1306 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1307 is (float_truncate:SF x). */
1308 if (GET_CODE (op) == SUBREG
1309 && subreg_lowpart_p (op)
1310 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1311 return SUBREG_REG (op);
1312 break;
1314 case FLOAT_EXTEND:
1315 if (DECIMAL_FLOAT_MODE_P (mode))
1316 break;
1318 /* (float_extend (float_extend x)) is (float_extend x)
1320 (float_extend (float x)) is (float x) assuming that double
1321 rounding can't happen.
1323 if (GET_CODE (op) == FLOAT_EXTEND
1324 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1325 && exact_int_to_float_conversion_p (op)))
1326 return simplify_gen_unary (GET_CODE (op), mode,
1327 XEXP (op, 0),
1328 GET_MODE (XEXP (op, 0)));
1330 break;
1332 case ABS:
1333 /* (abs (neg <foo>)) -> (abs <foo>) */
1334 if (GET_CODE (op) == NEG)
1335 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1336 GET_MODE (XEXP (op, 0)));
1338 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1339 do nothing. */
1340 if (GET_MODE (op) == VOIDmode)
1341 break;
1343 /* If operand is something known to be positive, ignore the ABS. */
1344 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1345 || val_signbit_known_clear_p (GET_MODE (op),
1346 nonzero_bits (op, GET_MODE (op))))
1347 return op;
1349 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1350 if (is_a <scalar_int_mode> (mode, &int_mode)
1351 && (num_sign_bit_copies (op, int_mode)
1352 == GET_MODE_PRECISION (int_mode)))
1353 return gen_rtx_NEG (int_mode, op);
1355 break;
1357 case FFS:
1358 /* (ffs (*_extend <X>)) = (ffs <X>) */
1359 if (GET_CODE (op) == SIGN_EXTEND
1360 || GET_CODE (op) == ZERO_EXTEND)
1361 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1362 GET_MODE (XEXP (op, 0)));
1363 break;
1365 case POPCOUNT:
1366 switch (GET_CODE (op))
1368 case BSWAP:
1369 case ZERO_EXTEND:
1370 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1371 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1372 GET_MODE (XEXP (op, 0)));
1374 case ROTATE:
1375 case ROTATERT:
1376 /* Rotations don't affect popcount. */
1377 if (!side_effects_p (XEXP (op, 1)))
1378 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1379 GET_MODE (XEXP (op, 0)));
1380 break;
1382 default:
1383 break;
1385 break;
1387 case PARITY:
1388 switch (GET_CODE (op))
1390 case NOT:
1391 case BSWAP:
1392 case ZERO_EXTEND:
1393 case SIGN_EXTEND:
1394 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1395 GET_MODE (XEXP (op, 0)));
1397 case ROTATE:
1398 case ROTATERT:
1399 /* Rotations don't affect parity. */
1400 if (!side_effects_p (XEXP (op, 1)))
1401 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1402 GET_MODE (XEXP (op, 0)));
1403 break;
1405 default:
1406 break;
1408 break;
1410 case BSWAP:
1411 /* (bswap (bswap x)) -> x. */
1412 if (GET_CODE (op) == BSWAP)
1413 return XEXP (op, 0);
1414 break;
1416 case FLOAT:
1417 /* (float (sign_extend <X>)) = (float <X>). */
1418 if (GET_CODE (op) == SIGN_EXTEND)
1419 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1420 GET_MODE (XEXP (op, 0)));
1421 break;
1423 case SIGN_EXTEND:
1424 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1425 becomes just the MINUS if its mode is MODE. This allows
1426 folding switch statements on machines using casesi (such as
1427 the VAX). */
1428 if (GET_CODE (op) == TRUNCATE
1429 && GET_MODE (XEXP (op, 0)) == mode
1430 && GET_CODE (XEXP (op, 0)) == MINUS
1431 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1432 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1433 return XEXP (op, 0);
1435 /* Extending a widening multiplication should be canonicalized to
1436 a wider widening multiplication. */
1437 if (GET_CODE (op) == MULT)
1439 rtx lhs = XEXP (op, 0);
1440 rtx rhs = XEXP (op, 1);
1441 enum rtx_code lcode = GET_CODE (lhs);
1442 enum rtx_code rcode = GET_CODE (rhs);
1444 /* Widening multiplies usually extend both operands, but sometimes
1445 they use a shift to extract a portion of a register. */
1446 if ((lcode == SIGN_EXTEND
1447 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1448 && (rcode == SIGN_EXTEND
1449 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1451 machine_mode lmode = GET_MODE (lhs);
1452 machine_mode rmode = GET_MODE (rhs);
1453 int bits;
1455 if (lcode == ASHIFTRT)
1456 /* Number of bits not shifted off the end. */
1457 bits = (GET_MODE_UNIT_PRECISION (lmode)
1458 - INTVAL (XEXP (lhs, 1)));
1459 else /* lcode == SIGN_EXTEND */
1460 /* Size of inner mode. */
1461 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1463 if (rcode == ASHIFTRT)
1464 bits += (GET_MODE_UNIT_PRECISION (rmode)
1465 - INTVAL (XEXP (rhs, 1)));
1466 else /* rcode == SIGN_EXTEND */
1467 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1469 /* We can only widen multiplies if the result is mathematiclly
1470 equivalent. I.e. if overflow was impossible. */
1471 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1472 return simplify_gen_binary
1473 (MULT, mode,
1474 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1475 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1479 /* Check for a sign extension of a subreg of a promoted
1480 variable, where the promotion is sign-extended, and the
1481 target mode is the same as the variable's promotion. */
1482 if (GET_CODE (op) == SUBREG
1483 && SUBREG_PROMOTED_VAR_P (op)
1484 && SUBREG_PROMOTED_SIGNED_P (op)
1485 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1487 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1488 if (temp)
1489 return temp;
1492 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1493 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1494 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1496 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1497 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1498 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1499 GET_MODE (XEXP (op, 0)));
1502 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1503 is (sign_extend:M (subreg:O <X>)) if there is mode with
1504 GET_MODE_BITSIZE (N) - I bits.
1505 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1506 is similarly (zero_extend:M (subreg:O <X>)). */
1507 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1508 && GET_CODE (XEXP (op, 0)) == ASHIFT
1509 && is_a <scalar_int_mode> (mode, &int_mode)
1510 && CONST_INT_P (XEXP (op, 1))
1511 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1512 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1513 GET_MODE_BITSIZE (op_mode) > INTVAL (XEXP (op, 1))))
1515 scalar_int_mode tmode;
1516 gcc_assert (GET_MODE_BITSIZE (int_mode)
1517 > GET_MODE_BITSIZE (op_mode));
1518 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode)
1519 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1521 rtx inner =
1522 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1523 if (inner)
1524 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1525 ? SIGN_EXTEND : ZERO_EXTEND,
1526 int_mode, inner, tmode);
1530 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1531 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1532 if (GET_CODE (op) == LSHIFTRT
1533 && CONST_INT_P (XEXP (op, 1))
1534 && XEXP (op, 1) != const0_rtx)
1535 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1537 #if defined(POINTERS_EXTEND_UNSIGNED)
1538 /* As we do not know which address space the pointer is referring to,
1539 we can do this only if the target does not support different pointer
1540 or address modes depending on the address space. */
1541 if (target_default_pointer_address_modes_p ()
1542 && ! POINTERS_EXTEND_UNSIGNED
1543 && mode == Pmode && GET_MODE (op) == ptr_mode
1544 && (CONSTANT_P (op)
1545 || (GET_CODE (op) == SUBREG
1546 && REG_P (SUBREG_REG (op))
1547 && REG_POINTER (SUBREG_REG (op))
1548 && GET_MODE (SUBREG_REG (op)) == Pmode))
1549 && !targetm.have_ptr_extend ())
1551 temp
1552 = convert_memory_address_addr_space_1 (Pmode, op,
1553 ADDR_SPACE_GENERIC, false,
1554 true);
1555 if (temp)
1556 return temp;
1558 #endif
1559 break;
1561 case ZERO_EXTEND:
1562 /* Check for a zero extension of a subreg of a promoted
1563 variable, where the promotion is zero-extended, and the
1564 target mode is the same as the variable's promotion. */
1565 if (GET_CODE (op) == SUBREG
1566 && SUBREG_PROMOTED_VAR_P (op)
1567 && SUBREG_PROMOTED_UNSIGNED_P (op)
1568 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1570 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1571 if (temp)
1572 return temp;
1575 /* Extending a widening multiplication should be canonicalized to
1576 a wider widening multiplication. */
1577 if (GET_CODE (op) == MULT)
1579 rtx lhs = XEXP (op, 0);
1580 rtx rhs = XEXP (op, 1);
1581 enum rtx_code lcode = GET_CODE (lhs);
1582 enum rtx_code rcode = GET_CODE (rhs);
1584 /* Widening multiplies usually extend both operands, but sometimes
1585 they use a shift to extract a portion of a register. */
1586 if ((lcode == ZERO_EXTEND
1587 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1588 && (rcode == ZERO_EXTEND
1589 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1591 machine_mode lmode = GET_MODE (lhs);
1592 machine_mode rmode = GET_MODE (rhs);
1593 int bits;
1595 if (lcode == LSHIFTRT)
1596 /* Number of bits not shifted off the end. */
1597 bits = (GET_MODE_UNIT_PRECISION (lmode)
1598 - INTVAL (XEXP (lhs, 1)));
1599 else /* lcode == ZERO_EXTEND */
1600 /* Size of inner mode. */
1601 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1603 if (rcode == LSHIFTRT)
1604 bits += (GET_MODE_UNIT_PRECISION (rmode)
1605 - INTVAL (XEXP (rhs, 1)));
1606 else /* rcode == ZERO_EXTEND */
1607 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1609 /* We can only widen multiplies if the result is mathematiclly
1610 equivalent. I.e. if overflow was impossible. */
1611 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1612 return simplify_gen_binary
1613 (MULT, mode,
1614 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1615 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1619 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1620 if (GET_CODE (op) == ZERO_EXTEND)
1621 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1622 GET_MODE (XEXP (op, 0)));
1624 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1625 is (zero_extend:M (subreg:O <X>)) if there is mode with
1626 GET_MODE_PRECISION (N) - I bits. */
1627 if (GET_CODE (op) == LSHIFTRT
1628 && GET_CODE (XEXP (op, 0)) == ASHIFT
1629 && is_a <scalar_int_mode> (mode, &int_mode)
1630 && CONST_INT_P (XEXP (op, 1))
1631 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1632 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1633 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1635 scalar_int_mode tmode;
1636 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1637 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1639 rtx inner =
1640 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1641 if (inner)
1642 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1643 inner, tmode);
1647 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1648 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1649 of mode N. E.g.
1650 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1651 (and:SI (reg:SI) (const_int 63)). */
1652 if (partial_subreg_p (op)
1653 && is_a <scalar_int_mode> (mode, &int_mode)
1654 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1655 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1656 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1657 && subreg_lowpart_p (op)
1658 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1659 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1661 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1662 return SUBREG_REG (op);
1663 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1664 op0_mode);
1667 #if defined(POINTERS_EXTEND_UNSIGNED)
1668 /* As we do not know which address space the pointer is referring to,
1669 we can do this only if the target does not support different pointer
1670 or address modes depending on the address space. */
1671 if (target_default_pointer_address_modes_p ()
1672 && POINTERS_EXTEND_UNSIGNED > 0
1673 && mode == Pmode && GET_MODE (op) == ptr_mode
1674 && (CONSTANT_P (op)
1675 || (GET_CODE (op) == SUBREG
1676 && REG_P (SUBREG_REG (op))
1677 && REG_POINTER (SUBREG_REG (op))
1678 && GET_MODE (SUBREG_REG (op)) == Pmode))
1679 && !targetm.have_ptr_extend ())
1681 temp
1682 = convert_memory_address_addr_space_1 (Pmode, op,
1683 ADDR_SPACE_GENERIC, false,
1684 true);
1685 if (temp)
1686 return temp;
1688 #endif
1689 break;
1691 default:
1692 break;
1695 if (VECTOR_MODE_P (mode) && vec_duplicate_p (op, &elt))
1697 /* Try applying the operator to ELT and see if that simplifies.
1698 We can duplicate the result if so.
1700 The reason we don't use simplify_gen_unary is that it isn't
1701 necessarily a win to convert things like:
1703 (neg:V (vec_duplicate:V (reg:S R)))
1707 (vec_duplicate:V (neg:S (reg:S R)))
1709 The first might be done entirely in vector registers while the
1710 second might need a move between register files. */
1711 temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1712 elt, GET_MODE_INNER (GET_MODE (op)));
1713 if (temp)
1714 return gen_vec_duplicate (mode, temp);
1717 return 0;
1720 /* Try to compute the value of a unary operation CODE whose output mode is to
1721 be MODE with input operand OP whose mode was originally OP_MODE.
1722 Return zero if the value cannot be computed. */
1724 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1725 rtx op, machine_mode op_mode)
1727 scalar_int_mode result_mode;
1729 if (code == VEC_DUPLICATE)
1731 gcc_assert (VECTOR_MODE_P (mode));
1732 if (GET_MODE (op) != VOIDmode)
1734 if (!VECTOR_MODE_P (GET_MODE (op)))
1735 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1736 else
1737 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1738 (GET_MODE (op)));
1740 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1741 return gen_const_vec_duplicate (mode, op);
1742 if (GET_CODE (op) == CONST_VECTOR)
1744 unsigned int n_elts = GET_MODE_NUNITS (mode);
1745 unsigned int in_n_elts = CONST_VECTOR_NUNITS (op);
1746 gcc_assert (in_n_elts < n_elts);
1747 gcc_assert ((n_elts % in_n_elts) == 0);
1748 rtvec v = rtvec_alloc (n_elts);
1749 for (unsigned i = 0; i < n_elts; i++)
1750 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1751 return gen_rtx_CONST_VECTOR (mode, v);
1755 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1757 int elt_size = GET_MODE_UNIT_SIZE (mode);
1758 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1759 machine_mode opmode = GET_MODE (op);
1760 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1761 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1762 rtvec v = rtvec_alloc (n_elts);
1763 unsigned int i;
1765 gcc_assert (op_n_elts == n_elts);
1766 for (i = 0; i < n_elts; i++)
1768 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1769 CONST_VECTOR_ELT (op, i),
1770 GET_MODE_INNER (opmode));
1771 if (!x || !valid_for_const_vector_p (mode, x))
1772 return 0;
1773 RTVEC_ELT (v, i) = x;
1775 return gen_rtx_CONST_VECTOR (mode, v);
1778 /* The order of these tests is critical so that, for example, we don't
1779 check the wrong mode (input vs. output) for a conversion operation,
1780 such as FIX. At some point, this should be simplified. */
1782 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1784 REAL_VALUE_TYPE d;
1786 if (op_mode == VOIDmode)
1788 /* CONST_INT have VOIDmode as the mode. We assume that all
1789 the bits of the constant are significant, though, this is
1790 a dangerous assumption as many times CONST_INTs are
1791 created and used with garbage in the bits outside of the
1792 precision of the implied mode of the const_int. */
1793 op_mode = MAX_MODE_INT;
1796 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1798 /* Avoid the folding if flag_signaling_nans is on and
1799 operand is a signaling NaN. */
1800 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1801 return 0;
1803 d = real_value_truncate (mode, d);
1804 return const_double_from_real_value (d, mode);
1806 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1808 REAL_VALUE_TYPE d;
1810 if (op_mode == VOIDmode)
1812 /* CONST_INT have VOIDmode as the mode. We assume that all
1813 the bits of the constant are significant, though, this is
1814 a dangerous assumption as many times CONST_INTs are
1815 created and used with garbage in the bits outside of the
1816 precision of the implied mode of the const_int. */
1817 op_mode = MAX_MODE_INT;
1820 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1822 /* Avoid the folding if flag_signaling_nans is on and
1823 operand is a signaling NaN. */
1824 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1825 return 0;
1827 d = real_value_truncate (mode, d);
1828 return const_double_from_real_value (d, mode);
1831 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1833 unsigned int width = GET_MODE_PRECISION (result_mode);
1834 wide_int result;
1835 scalar_int_mode imode = (op_mode == VOIDmode
1836 ? result_mode
1837 : as_a <scalar_int_mode> (op_mode));
1838 rtx_mode_t op0 = rtx_mode_t (op, imode);
1839 int int_value;
1841 #if TARGET_SUPPORTS_WIDE_INT == 0
1842 /* This assert keeps the simplification from producing a result
1843 that cannot be represented in a CONST_DOUBLE but a lot of
1844 upstream callers expect that this function never fails to
1845 simplify something and so you if you added this to the test
1846 above the code would die later anyway. If this assert
1847 happens, you just need to make the port support wide int. */
1848 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1849 #endif
1851 switch (code)
1853 case NOT:
1854 result = wi::bit_not (op0);
1855 break;
1857 case NEG:
1858 result = wi::neg (op0);
1859 break;
1861 case ABS:
1862 result = wi::abs (op0);
1863 break;
1865 case FFS:
1866 result = wi::shwi (wi::ffs (op0), result_mode);
1867 break;
1869 case CLZ:
1870 if (wi::ne_p (op0, 0))
1871 int_value = wi::clz (op0);
1872 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1873 int_value = GET_MODE_PRECISION (imode);
1874 result = wi::shwi (int_value, result_mode);
1875 break;
1877 case CLRSB:
1878 result = wi::shwi (wi::clrsb (op0), result_mode);
1879 break;
1881 case CTZ:
1882 if (wi::ne_p (op0, 0))
1883 int_value = wi::ctz (op0);
1884 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1885 int_value = GET_MODE_PRECISION (imode);
1886 result = wi::shwi (int_value, result_mode);
1887 break;
1889 case POPCOUNT:
1890 result = wi::shwi (wi::popcount (op0), result_mode);
1891 break;
1893 case PARITY:
1894 result = wi::shwi (wi::parity (op0), result_mode);
1895 break;
1897 case BSWAP:
1898 result = wide_int (op0).bswap ();
1899 break;
1901 case TRUNCATE:
1902 case ZERO_EXTEND:
1903 result = wide_int::from (op0, width, UNSIGNED);
1904 break;
1906 case SIGN_EXTEND:
1907 result = wide_int::from (op0, width, SIGNED);
1908 break;
1910 case SQRT:
1911 default:
1912 return 0;
1915 return immed_wide_int_const (result, result_mode);
1918 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1919 && SCALAR_FLOAT_MODE_P (mode)
1920 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1922 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1923 switch (code)
1925 case SQRT:
1926 return 0;
1927 case ABS:
1928 d = real_value_abs (&d);
1929 break;
1930 case NEG:
1931 d = real_value_negate (&d);
1932 break;
1933 case FLOAT_TRUNCATE:
1934 /* Don't perform the operation if flag_signaling_nans is on
1935 and the operand is a signaling NaN. */
1936 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1937 return NULL_RTX;
1938 d = real_value_truncate (mode, d);
1939 break;
1940 case FLOAT_EXTEND:
1941 /* Don't perform the operation if flag_signaling_nans is on
1942 and the operand is a signaling NaN. */
1943 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1944 return NULL_RTX;
1945 /* All this does is change the mode, unless changing
1946 mode class. */
1947 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1948 real_convert (&d, mode, &d);
1949 break;
1950 case FIX:
1951 /* Don't perform the operation if flag_signaling_nans is on
1952 and the operand is a signaling NaN. */
1953 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1954 return NULL_RTX;
1955 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1956 break;
1957 case NOT:
1959 long tmp[4];
1960 int i;
1962 real_to_target (tmp, &d, GET_MODE (op));
1963 for (i = 0; i < 4; i++)
1964 tmp[i] = ~tmp[i];
1965 real_from_target (&d, tmp, mode);
1966 break;
1968 default:
1969 gcc_unreachable ();
1971 return const_double_from_real_value (d, mode);
1973 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1974 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1975 && is_int_mode (mode, &result_mode))
1977 unsigned int width = GET_MODE_PRECISION (result_mode);
1978 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1979 operators are intentionally left unspecified (to ease implementation
1980 by target backends), for consistency, this routine implements the
1981 same semantics for constant folding as used by the middle-end. */
1983 /* This was formerly used only for non-IEEE float.
1984 eggert@twinsun.com says it is safe for IEEE also. */
1985 REAL_VALUE_TYPE t;
1986 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1987 wide_int wmax, wmin;
1988 /* This is part of the abi to real_to_integer, but we check
1989 things before making this call. */
1990 bool fail;
1992 switch (code)
1994 case FIX:
1995 if (REAL_VALUE_ISNAN (*x))
1996 return const0_rtx;
1998 /* Test against the signed upper bound. */
1999 wmax = wi::max_value (width, SIGNED);
2000 real_from_integer (&t, VOIDmode, wmax, SIGNED);
2001 if (real_less (&t, x))
2002 return immed_wide_int_const (wmax, mode);
2004 /* Test against the signed lower bound. */
2005 wmin = wi::min_value (width, SIGNED);
2006 real_from_integer (&t, VOIDmode, wmin, SIGNED);
2007 if (real_less (x, &t))
2008 return immed_wide_int_const (wmin, mode);
2010 return immed_wide_int_const (real_to_integer (x, &fail, width),
2011 mode);
2013 case UNSIGNED_FIX:
2014 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2015 return const0_rtx;
2017 /* Test against the unsigned upper bound. */
2018 wmax = wi::max_value (width, UNSIGNED);
2019 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2020 if (real_less (&t, x))
2021 return immed_wide_int_const (wmax, mode);
2023 return immed_wide_int_const (real_to_integer (x, &fail, width),
2024 mode);
2026 default:
2027 gcc_unreachable ();
2031 /* Handle polynomial integers. */
2032 else if (CONST_POLY_INT_P (op))
2034 poly_wide_int result;
2035 switch (code)
2037 case NEG:
2038 result = -const_poly_int_value (op);
2039 break;
2041 case NOT:
2042 result = ~const_poly_int_value (op);
2043 break;
2045 default:
2046 return NULL_RTX;
2048 return immed_wide_int_const (result, mode);
2051 return NULL_RTX;
2054 /* Subroutine of simplify_binary_operation to simplify a binary operation
2055 CODE that can commute with byte swapping, with result mode MODE and
2056 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2057 Return zero if no simplification or canonicalization is possible. */
2059 static rtx
2060 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2061 rtx op0, rtx op1)
2063 rtx tem;
2065 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2066 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2068 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2069 simplify_gen_unary (BSWAP, mode, op1, mode));
2070 return simplify_gen_unary (BSWAP, mode, tem, mode);
2073 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2074 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2076 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2077 return simplify_gen_unary (BSWAP, mode, tem, mode);
2080 return NULL_RTX;
2083 /* Subroutine of simplify_binary_operation to simplify a commutative,
2084 associative binary operation CODE with result mode MODE, operating
2085 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2086 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2087 canonicalization is possible. */
2089 static rtx
2090 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2091 rtx op0, rtx op1)
2093 rtx tem;
2095 /* Linearize the operator to the left. */
2096 if (GET_CODE (op1) == code)
2098 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2099 if (GET_CODE (op0) == code)
2101 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2102 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2105 /* "a op (b op c)" becomes "(b op c) op a". */
2106 if (! swap_commutative_operands_p (op1, op0))
2107 return simplify_gen_binary (code, mode, op1, op0);
2109 std::swap (op0, op1);
2112 if (GET_CODE (op0) == code)
2114 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2115 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2117 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2118 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2121 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2122 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2123 if (tem != 0)
2124 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2126 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2127 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2128 if (tem != 0)
2129 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2132 return 0;
2136 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2137 and OP1. Return 0 if no simplification is possible.
2139 Don't use this for relational operations such as EQ or LT.
2140 Use simplify_relational_operation instead. */
2142 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2143 rtx op0, rtx op1)
2145 rtx trueop0, trueop1;
2146 rtx tem;
2148 /* Relational operations don't work here. We must know the mode
2149 of the operands in order to do the comparison correctly.
2150 Assuming a full word can give incorrect results.
2151 Consider comparing 128 with -128 in QImode. */
2152 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2153 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2155 /* Make sure the constant is second. */
2156 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2157 && swap_commutative_operands_p (op0, op1))
2158 std::swap (op0, op1);
2160 trueop0 = avoid_constant_pool_reference (op0);
2161 trueop1 = avoid_constant_pool_reference (op1);
2163 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2164 if (tem)
2165 return tem;
2166 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2168 if (tem)
2169 return tem;
2171 /* If the above steps did not result in a simplification and op0 or op1
2172 were constant pool references, use the referenced constants directly. */
2173 if (trueop0 != op0 || trueop1 != op1)
2174 return simplify_gen_binary (code, mode, trueop0, trueop1);
2176 return NULL_RTX;
2179 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2180 which OP0 and OP1 are both vector series or vector duplicates
2181 (which are really just series with a step of 0). If so, try to
2182 form a new series by applying CODE to the bases and to the steps.
2183 Return null if no simplification is possible.
2185 MODE is the mode of the operation and is known to be a vector
2186 integer mode. */
2188 static rtx
2189 simplify_binary_operation_series (rtx_code code, machine_mode mode,
2190 rtx op0, rtx op1)
2192 rtx base0, step0;
2193 if (vec_duplicate_p (op0, &base0))
2194 step0 = const0_rtx;
2195 else if (!vec_series_p (op0, &base0, &step0))
2196 return NULL_RTX;
2198 rtx base1, step1;
2199 if (vec_duplicate_p (op1, &base1))
2200 step1 = const0_rtx;
2201 else if (!vec_series_p (op1, &base1, &step1))
2202 return NULL_RTX;
2204 /* Only create a new series if we can simplify both parts. In other
2205 cases this isn't really a simplification, and it's not necessarily
2206 a win to replace a vector operation with a scalar operation. */
2207 scalar_mode inner_mode = GET_MODE_INNER (mode);
2208 rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2209 if (!new_base)
2210 return NULL_RTX;
2212 rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2213 if (!new_step)
2214 return NULL_RTX;
2216 return gen_vec_series (mode, new_base, new_step);
2219 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2220 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2221 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2222 actual constants. */
2224 static rtx
2225 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2226 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2228 rtx tem, reversed, opleft, opright, elt0, elt1;
2229 HOST_WIDE_INT val;
2230 scalar_int_mode int_mode, inner_mode;
2231 poly_int64 offset;
2233 /* Even if we can't compute a constant result,
2234 there are some cases worth simplifying. */
2236 switch (code)
2238 case PLUS:
2239 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2240 when x is NaN, infinite, or finite and nonzero. They aren't
2241 when x is -0 and the rounding mode is not towards -infinity,
2242 since (-0) + 0 is then 0. */
2243 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2244 return op0;
2246 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2247 transformations are safe even for IEEE. */
2248 if (GET_CODE (op0) == NEG)
2249 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2250 else if (GET_CODE (op1) == NEG)
2251 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2253 /* (~a) + 1 -> -a */
2254 if (INTEGRAL_MODE_P (mode)
2255 && GET_CODE (op0) == NOT
2256 && trueop1 == const1_rtx)
2257 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2259 /* Handle both-operands-constant cases. We can only add
2260 CONST_INTs to constants since the sum of relocatable symbols
2261 can't be handled by most assemblers. Don't add CONST_INT
2262 to CONST_INT since overflow won't be computed properly if wider
2263 than HOST_BITS_PER_WIDE_INT. */
2265 if ((GET_CODE (op0) == CONST
2266 || GET_CODE (op0) == SYMBOL_REF
2267 || GET_CODE (op0) == LABEL_REF)
2268 && CONST_INT_P (op1))
2269 return plus_constant (mode, op0, INTVAL (op1));
2270 else if ((GET_CODE (op1) == CONST
2271 || GET_CODE (op1) == SYMBOL_REF
2272 || GET_CODE (op1) == LABEL_REF)
2273 && CONST_INT_P (op0))
2274 return plus_constant (mode, op1, INTVAL (op0));
2276 /* See if this is something like X * C - X or vice versa or
2277 if the multiplication is written as a shift. If so, we can
2278 distribute and make a new multiply, shift, or maybe just
2279 have X (if C is 2 in the example above). But don't make
2280 something more expensive than we had before. */
2282 if (is_a <scalar_int_mode> (mode, &int_mode))
2284 rtx lhs = op0, rhs = op1;
2286 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2287 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2289 if (GET_CODE (lhs) == NEG)
2291 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2292 lhs = XEXP (lhs, 0);
2294 else if (GET_CODE (lhs) == MULT
2295 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2297 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2298 lhs = XEXP (lhs, 0);
2300 else if (GET_CODE (lhs) == ASHIFT
2301 && CONST_INT_P (XEXP (lhs, 1))
2302 && INTVAL (XEXP (lhs, 1)) >= 0
2303 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2305 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2306 GET_MODE_PRECISION (int_mode));
2307 lhs = XEXP (lhs, 0);
2310 if (GET_CODE (rhs) == NEG)
2312 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2313 rhs = XEXP (rhs, 0);
2315 else if (GET_CODE (rhs) == MULT
2316 && CONST_INT_P (XEXP (rhs, 1)))
2318 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2319 rhs = XEXP (rhs, 0);
2321 else if (GET_CODE (rhs) == ASHIFT
2322 && CONST_INT_P (XEXP (rhs, 1))
2323 && INTVAL (XEXP (rhs, 1)) >= 0
2324 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2326 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2327 GET_MODE_PRECISION (int_mode));
2328 rhs = XEXP (rhs, 0);
2331 if (rtx_equal_p (lhs, rhs))
2333 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2334 rtx coeff;
2335 bool speed = optimize_function_for_speed_p (cfun);
2337 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2339 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2340 return (set_src_cost (tem, int_mode, speed)
2341 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2345 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2346 if (CONST_SCALAR_INT_P (op1)
2347 && GET_CODE (op0) == XOR
2348 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2349 && mode_signbit_p (mode, op1))
2350 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2351 simplify_gen_binary (XOR, mode, op1,
2352 XEXP (op0, 1)));
2354 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2355 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2356 && GET_CODE (op0) == MULT
2357 && GET_CODE (XEXP (op0, 0)) == NEG)
2359 rtx in1, in2;
2361 in1 = XEXP (XEXP (op0, 0), 0);
2362 in2 = XEXP (op0, 1);
2363 return simplify_gen_binary (MINUS, mode, op1,
2364 simplify_gen_binary (MULT, mode,
2365 in1, in2));
2368 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2369 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2370 is 1. */
2371 if (COMPARISON_P (op0)
2372 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2373 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2374 && (reversed = reversed_comparison (op0, mode)))
2375 return
2376 simplify_gen_unary (NEG, mode, reversed, mode);
2378 /* If one of the operands is a PLUS or a MINUS, see if we can
2379 simplify this by the associative law.
2380 Don't use the associative law for floating point.
2381 The inaccuracy makes it nonassociative,
2382 and subtle programs can break if operations are associated. */
2384 if (INTEGRAL_MODE_P (mode)
2385 && (plus_minus_operand_p (op0)
2386 || plus_minus_operand_p (op1))
2387 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2388 return tem;
2390 /* Reassociate floating point addition only when the user
2391 specifies associative math operations. */
2392 if (FLOAT_MODE_P (mode)
2393 && flag_associative_math)
2395 tem = simplify_associative_operation (code, mode, op0, op1);
2396 if (tem)
2397 return tem;
2400 /* Handle vector series. */
2401 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2403 tem = simplify_binary_operation_series (code, mode, op0, op1);
2404 if (tem)
2405 return tem;
2407 break;
2409 case COMPARE:
2410 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2411 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2412 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2413 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2415 rtx xop00 = XEXP (op0, 0);
2416 rtx xop10 = XEXP (op1, 0);
2418 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2419 return xop00;
2421 if (REG_P (xop00) && REG_P (xop10)
2422 && REGNO (xop00) == REGNO (xop10)
2423 && GET_MODE (xop00) == mode
2424 && GET_MODE (xop10) == mode
2425 && GET_MODE_CLASS (mode) == MODE_CC)
2426 return xop00;
2428 break;
2430 case MINUS:
2431 /* We can't assume x-x is 0 even with non-IEEE floating point,
2432 but since it is zero except in very strange circumstances, we
2433 will treat it as zero with -ffinite-math-only. */
2434 if (rtx_equal_p (trueop0, trueop1)
2435 && ! side_effects_p (op0)
2436 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2437 return CONST0_RTX (mode);
2439 /* Change subtraction from zero into negation. (0 - x) is the
2440 same as -x when x is NaN, infinite, or finite and nonzero.
2441 But if the mode has signed zeros, and does not round towards
2442 -infinity, then 0 - 0 is 0, not -0. */
2443 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2444 return simplify_gen_unary (NEG, mode, op1, mode);
2446 /* (-1 - a) is ~a, unless the expression contains symbolic
2447 constants, in which case not retaining additions and
2448 subtractions could cause invalid assembly to be produced. */
2449 if (trueop0 == constm1_rtx
2450 && !contains_symbolic_reference_p (op1))
2451 return simplify_gen_unary (NOT, mode, op1, mode);
2453 /* Subtracting 0 has no effect unless the mode has signed zeros
2454 and supports rounding towards -infinity. In such a case,
2455 0 - 0 is -0. */
2456 if (!(HONOR_SIGNED_ZEROS (mode)
2457 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2458 && trueop1 == CONST0_RTX (mode))
2459 return op0;
2461 /* See if this is something like X * C - X or vice versa or
2462 if the multiplication is written as a shift. If so, we can
2463 distribute and make a new multiply, shift, or maybe just
2464 have X (if C is 2 in the example above). But don't make
2465 something more expensive than we had before. */
2467 if (is_a <scalar_int_mode> (mode, &int_mode))
2469 rtx lhs = op0, rhs = op1;
2471 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2472 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2474 if (GET_CODE (lhs) == NEG)
2476 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2477 lhs = XEXP (lhs, 0);
2479 else if (GET_CODE (lhs) == MULT
2480 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2482 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2483 lhs = XEXP (lhs, 0);
2485 else if (GET_CODE (lhs) == ASHIFT
2486 && CONST_INT_P (XEXP (lhs, 1))
2487 && INTVAL (XEXP (lhs, 1)) >= 0
2488 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2490 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2491 GET_MODE_PRECISION (int_mode));
2492 lhs = XEXP (lhs, 0);
2495 if (GET_CODE (rhs) == NEG)
2497 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2498 rhs = XEXP (rhs, 0);
2500 else if (GET_CODE (rhs) == MULT
2501 && CONST_INT_P (XEXP (rhs, 1)))
2503 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2504 rhs = XEXP (rhs, 0);
2506 else if (GET_CODE (rhs) == ASHIFT
2507 && CONST_INT_P (XEXP (rhs, 1))
2508 && INTVAL (XEXP (rhs, 1)) >= 0
2509 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2511 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2512 GET_MODE_PRECISION (int_mode));
2513 negcoeff1 = -negcoeff1;
2514 rhs = XEXP (rhs, 0);
2517 if (rtx_equal_p (lhs, rhs))
2519 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2520 rtx coeff;
2521 bool speed = optimize_function_for_speed_p (cfun);
2523 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2525 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2526 return (set_src_cost (tem, int_mode, speed)
2527 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2531 /* (a - (-b)) -> (a + b). True even for IEEE. */
2532 if (GET_CODE (op1) == NEG)
2533 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2535 /* (-x - c) may be simplified as (-c - x). */
2536 if (GET_CODE (op0) == NEG
2537 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2539 tem = simplify_unary_operation (NEG, mode, op1, mode);
2540 if (tem)
2541 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2544 if ((GET_CODE (op0) == CONST
2545 || GET_CODE (op0) == SYMBOL_REF
2546 || GET_CODE (op0) == LABEL_REF)
2547 && poly_int_rtx_p (op1, &offset))
2548 return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
2550 /* Don't let a relocatable value get a negative coeff. */
2551 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2552 return simplify_gen_binary (PLUS, mode,
2553 op0,
2554 neg_const_int (mode, op1));
2556 /* (x - (x & y)) -> (x & ~y) */
2557 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2559 if (rtx_equal_p (op0, XEXP (op1, 0)))
2561 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2562 GET_MODE (XEXP (op1, 1)));
2563 return simplify_gen_binary (AND, mode, op0, tem);
2565 if (rtx_equal_p (op0, XEXP (op1, 1)))
2567 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2568 GET_MODE (XEXP (op1, 0)));
2569 return simplify_gen_binary (AND, mode, op0, tem);
2573 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2574 by reversing the comparison code if valid. */
2575 if (STORE_FLAG_VALUE == 1
2576 && trueop0 == const1_rtx
2577 && COMPARISON_P (op1)
2578 && (reversed = reversed_comparison (op1, mode)))
2579 return reversed;
2581 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2582 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2583 && GET_CODE (op1) == MULT
2584 && GET_CODE (XEXP (op1, 0)) == NEG)
2586 rtx in1, in2;
2588 in1 = XEXP (XEXP (op1, 0), 0);
2589 in2 = XEXP (op1, 1);
2590 return simplify_gen_binary (PLUS, mode,
2591 simplify_gen_binary (MULT, mode,
2592 in1, in2),
2593 op0);
2596 /* Canonicalize (minus (neg A) (mult B C)) to
2597 (minus (mult (neg B) C) A). */
2598 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2599 && GET_CODE (op1) == MULT
2600 && GET_CODE (op0) == NEG)
2602 rtx in1, in2;
2604 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2605 in2 = XEXP (op1, 1);
2606 return simplify_gen_binary (MINUS, mode,
2607 simplify_gen_binary (MULT, mode,
2608 in1, in2),
2609 XEXP (op0, 0));
2612 /* If one of the operands is a PLUS or a MINUS, see if we can
2613 simplify this by the associative law. This will, for example,
2614 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2615 Don't use the associative law for floating point.
2616 The inaccuracy makes it nonassociative,
2617 and subtle programs can break if operations are associated. */
2619 if (INTEGRAL_MODE_P (mode)
2620 && (plus_minus_operand_p (op0)
2621 || plus_minus_operand_p (op1))
2622 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2623 return tem;
2625 /* Handle vector series. */
2626 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2628 tem = simplify_binary_operation_series (code, mode, op0, op1);
2629 if (tem)
2630 return tem;
2632 break;
2634 case MULT:
2635 if (trueop1 == constm1_rtx)
2636 return simplify_gen_unary (NEG, mode, op0, mode);
2638 if (GET_CODE (op0) == NEG)
2640 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2641 /* If op1 is a MULT as well and simplify_unary_operation
2642 just moved the NEG to the second operand, simplify_gen_binary
2643 below could through simplify_associative_operation move
2644 the NEG around again and recurse endlessly. */
2645 if (temp
2646 && GET_CODE (op1) == MULT
2647 && GET_CODE (temp) == MULT
2648 && XEXP (op1, 0) == XEXP (temp, 0)
2649 && GET_CODE (XEXP (temp, 1)) == NEG
2650 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2651 temp = NULL_RTX;
2652 if (temp)
2653 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2655 if (GET_CODE (op1) == NEG)
2657 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2658 /* If op0 is a MULT as well and simplify_unary_operation
2659 just moved the NEG to the second operand, simplify_gen_binary
2660 below could through simplify_associative_operation move
2661 the NEG around again and recurse endlessly. */
2662 if (temp
2663 && GET_CODE (op0) == MULT
2664 && GET_CODE (temp) == MULT
2665 && XEXP (op0, 0) == XEXP (temp, 0)
2666 && GET_CODE (XEXP (temp, 1)) == NEG
2667 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2668 temp = NULL_RTX;
2669 if (temp)
2670 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2673 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2674 x is NaN, since x * 0 is then also NaN. Nor is it valid
2675 when the mode has signed zeros, since multiplying a negative
2676 number by 0 will give -0, not 0. */
2677 if (!HONOR_NANS (mode)
2678 && !HONOR_SIGNED_ZEROS (mode)
2679 && trueop1 == CONST0_RTX (mode)
2680 && ! side_effects_p (op0))
2681 return op1;
2683 /* In IEEE floating point, x*1 is not equivalent to x for
2684 signalling NaNs. */
2685 if (!HONOR_SNANS (mode)
2686 && trueop1 == CONST1_RTX (mode))
2687 return op0;
2689 /* Convert multiply by constant power of two into shift. */
2690 if (CONST_SCALAR_INT_P (trueop1))
2692 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2693 if (val >= 0)
2694 return simplify_gen_binary (ASHIFT, mode, op0,
2695 gen_int_shift_amount (mode, val));
2698 /* x*2 is x+x and x*(-1) is -x */
2699 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2700 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2701 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2702 && GET_MODE (op0) == mode)
2704 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2706 if (real_equal (d1, &dconst2))
2707 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2709 if (!HONOR_SNANS (mode)
2710 && real_equal (d1, &dconstm1))
2711 return simplify_gen_unary (NEG, mode, op0, mode);
2714 /* Optimize -x * -x as x * x. */
2715 if (FLOAT_MODE_P (mode)
2716 && GET_CODE (op0) == NEG
2717 && GET_CODE (op1) == NEG
2718 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2719 && !side_effects_p (XEXP (op0, 0)))
2720 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2722 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2723 if (SCALAR_FLOAT_MODE_P (mode)
2724 && GET_CODE (op0) == ABS
2725 && GET_CODE (op1) == ABS
2726 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2727 && !side_effects_p (XEXP (op0, 0)))
2728 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2730 /* Reassociate multiplication, but for floating point MULTs
2731 only when the user specifies unsafe math optimizations. */
2732 if (! FLOAT_MODE_P (mode)
2733 || flag_unsafe_math_optimizations)
2735 tem = simplify_associative_operation (code, mode, op0, op1);
2736 if (tem)
2737 return tem;
2739 break;
2741 case IOR:
2742 if (trueop1 == CONST0_RTX (mode))
2743 return op0;
2744 if (INTEGRAL_MODE_P (mode)
2745 && trueop1 == CONSTM1_RTX (mode)
2746 && !side_effects_p (op0))
2747 return op1;
2748 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2749 return op0;
2750 /* A | (~A) -> -1 */
2751 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2752 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2753 && ! side_effects_p (op0)
2754 && SCALAR_INT_MODE_P (mode))
2755 return constm1_rtx;
2757 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2758 if (CONST_INT_P (op1)
2759 && HWI_COMPUTABLE_MODE_P (mode)
2760 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2761 && !side_effects_p (op0))
2762 return op1;
2764 /* Canonicalize (X & C1) | C2. */
2765 if (GET_CODE (op0) == AND
2766 && CONST_INT_P (trueop1)
2767 && CONST_INT_P (XEXP (op0, 1)))
2769 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2770 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2771 HOST_WIDE_INT c2 = INTVAL (trueop1);
2773 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2774 if ((c1 & c2) == c1
2775 && !side_effects_p (XEXP (op0, 0)))
2776 return trueop1;
2778 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2779 if (((c1|c2) & mask) == mask)
2780 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2783 /* Convert (A & B) | A to A. */
2784 if (GET_CODE (op0) == AND
2785 && (rtx_equal_p (XEXP (op0, 0), op1)
2786 || rtx_equal_p (XEXP (op0, 1), op1))
2787 && ! side_effects_p (XEXP (op0, 0))
2788 && ! side_effects_p (XEXP (op0, 1)))
2789 return op1;
2791 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2792 mode size to (rotate A CX). */
2794 if (GET_CODE (op1) == ASHIFT
2795 || GET_CODE (op1) == SUBREG)
2797 opleft = op1;
2798 opright = op0;
2800 else
2802 opright = op1;
2803 opleft = op0;
2806 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2807 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2808 && CONST_INT_P (XEXP (opleft, 1))
2809 && CONST_INT_P (XEXP (opright, 1))
2810 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2811 == GET_MODE_UNIT_PRECISION (mode)))
2812 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2814 /* Same, but for ashift that has been "simplified" to a wider mode
2815 by simplify_shift_const. */
2817 if (GET_CODE (opleft) == SUBREG
2818 && is_a <scalar_int_mode> (mode, &int_mode)
2819 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2820 &inner_mode)
2821 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2822 && GET_CODE (opright) == LSHIFTRT
2823 && GET_CODE (XEXP (opright, 0)) == SUBREG
2824 && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
2825 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2826 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2827 SUBREG_REG (XEXP (opright, 0)))
2828 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2829 && CONST_INT_P (XEXP (opright, 1))
2830 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2831 + INTVAL (XEXP (opright, 1))
2832 == GET_MODE_PRECISION (int_mode)))
2833 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2834 XEXP (SUBREG_REG (opleft), 1));
2836 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2837 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2838 the PLUS does not affect any of the bits in OP1: then we can do
2839 the IOR as a PLUS and we can associate. This is valid if OP1
2840 can be safely shifted left C bits. */
2841 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2842 && GET_CODE (XEXP (op0, 0)) == PLUS
2843 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2844 && CONST_INT_P (XEXP (op0, 1))
2845 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2847 int count = INTVAL (XEXP (op0, 1));
2848 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2850 if (mask >> count == INTVAL (trueop1)
2851 && trunc_int_for_mode (mask, mode) == mask
2852 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2853 return simplify_gen_binary (ASHIFTRT, mode,
2854 plus_constant (mode, XEXP (op0, 0),
2855 mask),
2856 XEXP (op0, 1));
2859 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2860 if (tem)
2861 return tem;
2863 tem = simplify_associative_operation (code, mode, op0, op1);
2864 if (tem)
2865 return tem;
2866 break;
2868 case XOR:
2869 if (trueop1 == CONST0_RTX (mode))
2870 return op0;
2871 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2872 return simplify_gen_unary (NOT, mode, op0, mode);
2873 if (rtx_equal_p (trueop0, trueop1)
2874 && ! side_effects_p (op0)
2875 && GET_MODE_CLASS (mode) != MODE_CC)
2876 return CONST0_RTX (mode);
2878 /* Canonicalize XOR of the most significant bit to PLUS. */
2879 if (CONST_SCALAR_INT_P (op1)
2880 && mode_signbit_p (mode, op1))
2881 return simplify_gen_binary (PLUS, mode, op0, op1);
2882 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2883 if (CONST_SCALAR_INT_P (op1)
2884 && GET_CODE (op0) == PLUS
2885 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2886 && mode_signbit_p (mode, XEXP (op0, 1)))
2887 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2888 simplify_gen_binary (XOR, mode, op1,
2889 XEXP (op0, 1)));
2891 /* If we are XORing two things that have no bits in common,
2892 convert them into an IOR. This helps to detect rotation encoded
2893 using those methods and possibly other simplifications. */
2895 if (HWI_COMPUTABLE_MODE_P (mode)
2896 && (nonzero_bits (op0, mode)
2897 & nonzero_bits (op1, mode)) == 0)
2898 return (simplify_gen_binary (IOR, mode, op0, op1));
2900 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2901 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2902 (NOT y). */
2904 int num_negated = 0;
2906 if (GET_CODE (op0) == NOT)
2907 num_negated++, op0 = XEXP (op0, 0);
2908 if (GET_CODE (op1) == NOT)
2909 num_negated++, op1 = XEXP (op1, 0);
2911 if (num_negated == 2)
2912 return simplify_gen_binary (XOR, mode, op0, op1);
2913 else if (num_negated == 1)
2914 return simplify_gen_unary (NOT, mode,
2915 simplify_gen_binary (XOR, mode, op0, op1),
2916 mode);
2919 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2920 correspond to a machine insn or result in further simplifications
2921 if B is a constant. */
2923 if (GET_CODE (op0) == AND
2924 && rtx_equal_p (XEXP (op0, 1), op1)
2925 && ! side_effects_p (op1))
2926 return simplify_gen_binary (AND, mode,
2927 simplify_gen_unary (NOT, mode,
2928 XEXP (op0, 0), mode),
2929 op1);
2931 else if (GET_CODE (op0) == AND
2932 && rtx_equal_p (XEXP (op0, 0), op1)
2933 && ! side_effects_p (op1))
2934 return simplify_gen_binary (AND, mode,
2935 simplify_gen_unary (NOT, mode,
2936 XEXP (op0, 1), mode),
2937 op1);
2939 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2940 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2941 out bits inverted twice and not set by C. Similarly, given
2942 (xor (and (xor A B) C) D), simplify without inverting C in
2943 the xor operand: (xor (and A C) (B&C)^D).
2945 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2946 && GET_CODE (XEXP (op0, 0)) == XOR
2947 && CONST_INT_P (op1)
2948 && CONST_INT_P (XEXP (op0, 1))
2949 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2951 enum rtx_code op = GET_CODE (op0);
2952 rtx a = XEXP (XEXP (op0, 0), 0);
2953 rtx b = XEXP (XEXP (op0, 0), 1);
2954 rtx c = XEXP (op0, 1);
2955 rtx d = op1;
2956 HOST_WIDE_INT bval = INTVAL (b);
2957 HOST_WIDE_INT cval = INTVAL (c);
2958 HOST_WIDE_INT dval = INTVAL (d);
2959 HOST_WIDE_INT xcval;
2961 if (op == IOR)
2962 xcval = ~cval;
2963 else
2964 xcval = cval;
2966 return simplify_gen_binary (XOR, mode,
2967 simplify_gen_binary (op, mode, a, c),
2968 gen_int_mode ((bval & xcval) ^ dval,
2969 mode));
2972 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2973 we can transform like this:
2974 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2975 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2976 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2977 Attempt a few simplifications when B and C are both constants. */
2978 if (GET_CODE (op0) == AND
2979 && CONST_INT_P (op1)
2980 && CONST_INT_P (XEXP (op0, 1)))
2982 rtx a = XEXP (op0, 0);
2983 rtx b = XEXP (op0, 1);
2984 rtx c = op1;
2985 HOST_WIDE_INT bval = INTVAL (b);
2986 HOST_WIDE_INT cval = INTVAL (c);
2988 /* Instead of computing ~A&C, we compute its negated value,
2989 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2990 optimize for sure. If it does not simplify, we still try
2991 to compute ~A&C below, but since that always allocates
2992 RTL, we don't try that before committing to returning a
2993 simplified expression. */
2994 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2995 GEN_INT (~cval));
2997 if ((~cval & bval) == 0)
2999 rtx na_c = NULL_RTX;
3000 if (n_na_c)
3001 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
3002 else
3004 /* If ~A does not simplify, don't bother: we don't
3005 want to simplify 2 operations into 3, and if na_c
3006 were to simplify with na, n_na_c would have
3007 simplified as well. */
3008 rtx na = simplify_unary_operation (NOT, mode, a, mode);
3009 if (na)
3010 na_c = simplify_gen_binary (AND, mode, na, c);
3013 /* Try to simplify ~A&C | ~B&C. */
3014 if (na_c != NULL_RTX)
3015 return simplify_gen_binary (IOR, mode, na_c,
3016 gen_int_mode (~bval & cval, mode));
3018 else
3020 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3021 if (n_na_c == CONSTM1_RTX (mode))
3023 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3024 gen_int_mode (~cval & bval,
3025 mode));
3026 return simplify_gen_binary (IOR, mode, a_nc_b,
3027 gen_int_mode (~bval & cval,
3028 mode));
3033 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3034 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3035 machines, and also has shorter instruction path length. */
3036 if (GET_CODE (op0) == AND
3037 && GET_CODE (XEXP (op0, 0)) == XOR
3038 && CONST_INT_P (XEXP (op0, 1))
3039 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3041 rtx a = trueop1;
3042 rtx b = XEXP (XEXP (op0, 0), 1);
3043 rtx c = XEXP (op0, 1);
3044 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3045 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3046 rtx bc = simplify_gen_binary (AND, mode, b, c);
3047 return simplify_gen_binary (IOR, mode, a_nc, bc);
3049 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3050 else if (GET_CODE (op0) == AND
3051 && GET_CODE (XEXP (op0, 0)) == XOR
3052 && CONST_INT_P (XEXP (op0, 1))
3053 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3055 rtx a = XEXP (XEXP (op0, 0), 0);
3056 rtx b = trueop1;
3057 rtx c = XEXP (op0, 1);
3058 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3059 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3060 rtx ac = simplify_gen_binary (AND, mode, a, c);
3061 return simplify_gen_binary (IOR, mode, ac, b_nc);
3064 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3065 comparison if STORE_FLAG_VALUE is 1. */
3066 if (STORE_FLAG_VALUE == 1
3067 && trueop1 == const1_rtx
3068 && COMPARISON_P (op0)
3069 && (reversed = reversed_comparison (op0, mode)))
3070 return reversed;
3072 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3073 is (lt foo (const_int 0)), so we can perform the above
3074 simplification if STORE_FLAG_VALUE is 1. */
3076 if (is_a <scalar_int_mode> (mode, &int_mode)
3077 && STORE_FLAG_VALUE == 1
3078 && trueop1 == const1_rtx
3079 && GET_CODE (op0) == LSHIFTRT
3080 && CONST_INT_P (XEXP (op0, 1))
3081 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3082 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3084 /* (xor (comparison foo bar) (const_int sign-bit))
3085 when STORE_FLAG_VALUE is the sign bit. */
3086 if (is_a <scalar_int_mode> (mode, &int_mode)
3087 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3088 && trueop1 == const_true_rtx
3089 && COMPARISON_P (op0)
3090 && (reversed = reversed_comparison (op0, int_mode)))
3091 return reversed;
3093 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3094 if (tem)
3095 return tem;
3097 tem = simplify_associative_operation (code, mode, op0, op1);
3098 if (tem)
3099 return tem;
3100 break;
3102 case AND:
3103 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3104 return trueop1;
3105 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3106 return op0;
3107 if (HWI_COMPUTABLE_MODE_P (mode))
3109 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3110 HOST_WIDE_INT nzop1;
3111 if (CONST_INT_P (trueop1))
3113 HOST_WIDE_INT val1 = INTVAL (trueop1);
3114 /* If we are turning off bits already known off in OP0, we need
3115 not do an AND. */
3116 if ((nzop0 & ~val1) == 0)
3117 return op0;
3119 nzop1 = nonzero_bits (trueop1, mode);
3120 /* If we are clearing all the nonzero bits, the result is zero. */
3121 if ((nzop1 & nzop0) == 0
3122 && !side_effects_p (op0) && !side_effects_p (op1))
3123 return CONST0_RTX (mode);
3125 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3126 && GET_MODE_CLASS (mode) != MODE_CC)
3127 return op0;
3128 /* A & (~A) -> 0 */
3129 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3130 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3131 && ! side_effects_p (op0)
3132 && GET_MODE_CLASS (mode) != MODE_CC)
3133 return CONST0_RTX (mode);
3135 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3136 there are no nonzero bits of C outside of X's mode. */
3137 if ((GET_CODE (op0) == SIGN_EXTEND
3138 || GET_CODE (op0) == ZERO_EXTEND)
3139 && CONST_INT_P (trueop1)
3140 && HWI_COMPUTABLE_MODE_P (mode)
3141 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3142 & UINTVAL (trueop1)) == 0)
3144 machine_mode imode = GET_MODE (XEXP (op0, 0));
3145 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3146 gen_int_mode (INTVAL (trueop1),
3147 imode));
3148 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3151 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3152 we might be able to further simplify the AND with X and potentially
3153 remove the truncation altogether. */
3154 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3156 rtx x = XEXP (op0, 0);
3157 machine_mode xmode = GET_MODE (x);
3158 tem = simplify_gen_binary (AND, xmode, x,
3159 gen_int_mode (INTVAL (trueop1), xmode));
3160 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3163 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3164 if (GET_CODE (op0) == IOR
3165 && CONST_INT_P (trueop1)
3166 && CONST_INT_P (XEXP (op0, 1)))
3168 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3169 return simplify_gen_binary (IOR, mode,
3170 simplify_gen_binary (AND, mode,
3171 XEXP (op0, 0), op1),
3172 gen_int_mode (tmp, mode));
3175 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3176 insn (and may simplify more). */
3177 if (GET_CODE (op0) == XOR
3178 && rtx_equal_p (XEXP (op0, 0), op1)
3179 && ! side_effects_p (op1))
3180 return simplify_gen_binary (AND, mode,
3181 simplify_gen_unary (NOT, mode,
3182 XEXP (op0, 1), mode),
3183 op1);
3185 if (GET_CODE (op0) == XOR
3186 && rtx_equal_p (XEXP (op0, 1), op1)
3187 && ! side_effects_p (op1))
3188 return simplify_gen_binary (AND, mode,
3189 simplify_gen_unary (NOT, mode,
3190 XEXP (op0, 0), mode),
3191 op1);
3193 /* Similarly for (~(A ^ B)) & A. */
3194 if (GET_CODE (op0) == NOT
3195 && GET_CODE (XEXP (op0, 0)) == XOR
3196 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3197 && ! side_effects_p (op1))
3198 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3200 if (GET_CODE (op0) == NOT
3201 && GET_CODE (XEXP (op0, 0)) == XOR
3202 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3203 && ! side_effects_p (op1))
3204 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3206 /* Convert (A | B) & A to A. */
3207 if (GET_CODE (op0) == IOR
3208 && (rtx_equal_p (XEXP (op0, 0), op1)
3209 || rtx_equal_p (XEXP (op0, 1), op1))
3210 && ! side_effects_p (XEXP (op0, 0))
3211 && ! side_effects_p (XEXP (op0, 1)))
3212 return op1;
3214 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3215 ((A & N) + B) & M -> (A + B) & M
3216 Similarly if (N & M) == 0,
3217 ((A | N) + B) & M -> (A + B) & M
3218 and for - instead of + and/or ^ instead of |.
3219 Also, if (N & M) == 0, then
3220 (A +- N) & M -> A & M. */
3221 if (CONST_INT_P (trueop1)
3222 && HWI_COMPUTABLE_MODE_P (mode)
3223 && ~UINTVAL (trueop1)
3224 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3225 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3227 rtx pmop[2];
3228 int which;
3230 pmop[0] = XEXP (op0, 0);
3231 pmop[1] = XEXP (op0, 1);
3233 if (CONST_INT_P (pmop[1])
3234 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3235 return simplify_gen_binary (AND, mode, pmop[0], op1);
3237 for (which = 0; which < 2; which++)
3239 tem = pmop[which];
3240 switch (GET_CODE (tem))
3242 case AND:
3243 if (CONST_INT_P (XEXP (tem, 1))
3244 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3245 == UINTVAL (trueop1))
3246 pmop[which] = XEXP (tem, 0);
3247 break;
3248 case IOR:
3249 case XOR:
3250 if (CONST_INT_P (XEXP (tem, 1))
3251 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3252 pmop[which] = XEXP (tem, 0);
3253 break;
3254 default:
3255 break;
3259 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3261 tem = simplify_gen_binary (GET_CODE (op0), mode,
3262 pmop[0], pmop[1]);
3263 return simplify_gen_binary (code, mode, tem, op1);
3267 /* (and X (ior (not X) Y) -> (and X Y) */
3268 if (GET_CODE (op1) == IOR
3269 && GET_CODE (XEXP (op1, 0)) == NOT
3270 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3271 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3273 /* (and (ior (not X) Y) X) -> (and X Y) */
3274 if (GET_CODE (op0) == IOR
3275 && GET_CODE (XEXP (op0, 0)) == NOT
3276 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3277 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3279 /* (and X (ior Y (not X)) -> (and X Y) */
3280 if (GET_CODE (op1) == IOR
3281 && GET_CODE (XEXP (op1, 1)) == NOT
3282 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3283 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3285 /* (and (ior Y (not X)) X) -> (and X Y) */
3286 if (GET_CODE (op0) == IOR
3287 && GET_CODE (XEXP (op0, 1)) == NOT
3288 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3289 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3291 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3292 if (tem)
3293 return tem;
3295 tem = simplify_associative_operation (code, mode, op0, op1);
3296 if (tem)
3297 return tem;
3298 break;
3300 case UDIV:
3301 /* 0/x is 0 (or x&0 if x has side-effects). */
3302 if (trueop0 == CONST0_RTX (mode)
3303 && !cfun->can_throw_non_call_exceptions)
3305 if (side_effects_p (op1))
3306 return simplify_gen_binary (AND, mode, op1, trueop0);
3307 return trueop0;
3309 /* x/1 is x. */
3310 if (trueop1 == CONST1_RTX (mode))
3312 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3313 if (tem)
3314 return tem;
3316 /* Convert divide by power of two into shift. */
3317 if (CONST_INT_P (trueop1)
3318 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3319 return simplify_gen_binary (LSHIFTRT, mode, op0,
3320 gen_int_shift_amount (mode, val));
3321 break;
3323 case DIV:
3324 /* Handle floating point and integers separately. */
3325 if (SCALAR_FLOAT_MODE_P (mode))
3327 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3328 safe for modes with NaNs, since 0.0 / 0.0 will then be
3329 NaN rather than 0.0. Nor is it safe for modes with signed
3330 zeros, since dividing 0 by a negative number gives -0.0 */
3331 if (trueop0 == CONST0_RTX (mode)
3332 && !HONOR_NANS (mode)
3333 && !HONOR_SIGNED_ZEROS (mode)
3334 && ! side_effects_p (op1))
3335 return op0;
3336 /* x/1.0 is x. */
3337 if (trueop1 == CONST1_RTX (mode)
3338 && !HONOR_SNANS (mode))
3339 return op0;
3341 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3342 && trueop1 != CONST0_RTX (mode))
3344 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3346 /* x/-1.0 is -x. */
3347 if (real_equal (d1, &dconstm1)
3348 && !HONOR_SNANS (mode))
3349 return simplify_gen_unary (NEG, mode, op0, mode);
3351 /* Change FP division by a constant into multiplication.
3352 Only do this with -freciprocal-math. */
3353 if (flag_reciprocal_math
3354 && !real_equal (d1, &dconst0))
3356 REAL_VALUE_TYPE d;
3357 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3358 tem = const_double_from_real_value (d, mode);
3359 return simplify_gen_binary (MULT, mode, op0, tem);
3363 else if (SCALAR_INT_MODE_P (mode))
3365 /* 0/x is 0 (or x&0 if x has side-effects). */
3366 if (trueop0 == CONST0_RTX (mode)
3367 && !cfun->can_throw_non_call_exceptions)
3369 if (side_effects_p (op1))
3370 return simplify_gen_binary (AND, mode, op1, trueop0);
3371 return trueop0;
3373 /* x/1 is x. */
3374 if (trueop1 == CONST1_RTX (mode))
3376 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3377 if (tem)
3378 return tem;
3380 /* x/-1 is -x. */
3381 if (trueop1 == constm1_rtx)
3383 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3384 if (x)
3385 return simplify_gen_unary (NEG, mode, x, mode);
3388 break;
3390 case UMOD:
3391 /* 0%x is 0 (or x&0 if x has side-effects). */
3392 if (trueop0 == CONST0_RTX (mode))
3394 if (side_effects_p (op1))
3395 return simplify_gen_binary (AND, mode, op1, trueop0);
3396 return trueop0;
3398 /* x%1 is 0 (of x&0 if x has side-effects). */
3399 if (trueop1 == CONST1_RTX (mode))
3401 if (side_effects_p (op0))
3402 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3403 return CONST0_RTX (mode);
3405 /* Implement modulus by power of two as AND. */
3406 if (CONST_INT_P (trueop1)
3407 && exact_log2 (UINTVAL (trueop1)) > 0)
3408 return simplify_gen_binary (AND, mode, op0,
3409 gen_int_mode (INTVAL (op1) - 1, mode));
3410 break;
3412 case MOD:
3413 /* 0%x is 0 (or x&0 if x has side-effects). */
3414 if (trueop0 == CONST0_RTX (mode))
3416 if (side_effects_p (op1))
3417 return simplify_gen_binary (AND, mode, op1, trueop0);
3418 return trueop0;
3420 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3421 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3423 if (side_effects_p (op0))
3424 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3425 return CONST0_RTX (mode);
3427 break;
3429 case ROTATERT:
3430 case ROTATE:
3431 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3432 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3433 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3434 amount instead. */
3435 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3436 if (CONST_INT_P (trueop1)
3437 && IN_RANGE (INTVAL (trueop1),
3438 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3439 GET_MODE_UNIT_PRECISION (mode) - 1))
3441 int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
3442 rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
3443 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3444 mode, op0, new_amount_rtx);
3446 #endif
3447 /* FALLTHRU */
3448 case ASHIFTRT:
3449 if (trueop1 == CONST0_RTX (mode))
3450 return op0;
3451 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3452 return op0;
3453 /* Rotating ~0 always results in ~0. */
3454 if (CONST_INT_P (trueop0)
3455 && HWI_COMPUTABLE_MODE_P (mode)
3456 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3457 && ! side_effects_p (op1))
3458 return op0;
3460 canonicalize_shift:
3461 /* Given:
3462 scalar modes M1, M2
3463 scalar constants c1, c2
3464 size (M2) > size (M1)
3465 c1 == size (M2) - size (M1)
3466 optimize:
3467 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3468 <low_part>)
3469 (const_int <c2>))
3471 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3472 <low_part>). */
3473 if ((code == ASHIFTRT || code == LSHIFTRT)
3474 && is_a <scalar_int_mode> (mode, &int_mode)
3475 && SUBREG_P (op0)
3476 && CONST_INT_P (op1)
3477 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3478 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3479 &inner_mode)
3480 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3481 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3482 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3483 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3484 && subreg_lowpart_p (op0))
3486 rtx tmp = gen_int_shift_amount
3487 (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
3488 tmp = simplify_gen_binary (code, inner_mode,
3489 XEXP (SUBREG_REG (op0), 0),
3490 tmp);
3491 return lowpart_subreg (int_mode, tmp, inner_mode);
3494 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3496 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3497 if (val != INTVAL (op1))
3498 return simplify_gen_binary (code, mode, op0,
3499 gen_int_shift_amount (mode, val));
3501 break;
3503 case ASHIFT:
3504 case SS_ASHIFT:
3505 case US_ASHIFT:
3506 if (trueop1 == CONST0_RTX (mode))
3507 return op0;
3508 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3509 return op0;
3510 goto canonicalize_shift;
3512 case LSHIFTRT:
3513 if (trueop1 == CONST0_RTX (mode))
3514 return op0;
3515 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3516 return op0;
3517 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3518 if (GET_CODE (op0) == CLZ
3519 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3520 && CONST_INT_P (trueop1)
3521 && STORE_FLAG_VALUE == 1
3522 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3524 unsigned HOST_WIDE_INT zero_val = 0;
3526 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3527 && zero_val == GET_MODE_PRECISION (inner_mode)
3528 && INTVAL (trueop1) == exact_log2 (zero_val))
3529 return simplify_gen_relational (EQ, mode, inner_mode,
3530 XEXP (op0, 0), const0_rtx);
3532 goto canonicalize_shift;
3534 case SMIN:
3535 if (HWI_COMPUTABLE_MODE_P (mode)
3536 && mode_signbit_p (mode, trueop1)
3537 && ! side_effects_p (op0))
3538 return op1;
3539 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3540 return op0;
3541 tem = simplify_associative_operation (code, mode, op0, op1);
3542 if (tem)
3543 return tem;
3544 break;
3546 case SMAX:
3547 if (HWI_COMPUTABLE_MODE_P (mode)
3548 && CONST_INT_P (trueop1)
3549 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3550 && ! side_effects_p (op0))
3551 return op1;
3552 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3553 return op0;
3554 tem = simplify_associative_operation (code, mode, op0, op1);
3555 if (tem)
3556 return tem;
3557 break;
3559 case UMIN:
3560 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3561 return op1;
3562 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3563 return op0;
3564 tem = simplify_associative_operation (code, mode, op0, op1);
3565 if (tem)
3566 return tem;
3567 break;
3569 case UMAX:
3570 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3571 return op1;
3572 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3573 return op0;
3574 tem = simplify_associative_operation (code, mode, op0, op1);
3575 if (tem)
3576 return tem;
3577 break;
3579 case SS_PLUS:
3580 case US_PLUS:
3581 case SS_MINUS:
3582 case US_MINUS:
3583 case SS_MULT:
3584 case US_MULT:
3585 case SS_DIV:
3586 case US_DIV:
3587 /* ??? There are simplifications that can be done. */
3588 return 0;
3590 case VEC_SERIES:
3591 if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
3592 return gen_vec_duplicate (mode, op0);
3593 if (valid_for_const_vector_p (mode, op0)
3594 && valid_for_const_vector_p (mode, op1))
3595 return gen_const_vec_series (mode, op0, op1);
3596 return 0;
3598 case VEC_SELECT:
3599 if (!VECTOR_MODE_P (mode))
3601 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3602 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3603 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3604 gcc_assert (XVECLEN (trueop1, 0) == 1);
3605 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3607 if (vec_duplicate_p (trueop0, &elt0))
3608 return elt0;
3610 if (GET_CODE (trueop0) == CONST_VECTOR)
3611 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3612 (trueop1, 0, 0)));
3614 /* Extract a scalar element from a nested VEC_SELECT expression
3615 (with optional nested VEC_CONCAT expression). Some targets
3616 (i386) extract scalar element from a vector using chain of
3617 nested VEC_SELECT expressions. When input operand is a memory
3618 operand, this operation can be simplified to a simple scalar
3619 load from an offseted memory address. */
3620 if (GET_CODE (trueop0) == VEC_SELECT)
3622 rtx op0 = XEXP (trueop0, 0);
3623 rtx op1 = XEXP (trueop0, 1);
3625 int n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3627 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3628 int elem;
3630 rtvec vec;
3631 rtx tmp_op, tmp;
3633 gcc_assert (GET_CODE (op1) == PARALLEL);
3634 gcc_assert (i < n_elts);
3636 /* Select element, pointed by nested selector. */
3637 elem = INTVAL (XVECEXP (op1, 0, i));
3639 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3640 if (GET_CODE (op0) == VEC_CONCAT)
3642 rtx op00 = XEXP (op0, 0);
3643 rtx op01 = XEXP (op0, 1);
3645 machine_mode mode00, mode01;
3646 int n_elts00, n_elts01;
3648 mode00 = GET_MODE (op00);
3649 mode01 = GET_MODE (op01);
3651 /* Find out number of elements of each operand. */
3652 n_elts00 = GET_MODE_NUNITS (mode00);
3653 n_elts01 = GET_MODE_NUNITS (mode01);
3655 gcc_assert (n_elts == n_elts00 + n_elts01);
3657 /* Select correct operand of VEC_CONCAT
3658 and adjust selector. */
3659 if (elem < n_elts01)
3660 tmp_op = op00;
3661 else
3663 tmp_op = op01;
3664 elem -= n_elts00;
3667 else
3668 tmp_op = op0;
3670 vec = rtvec_alloc (1);
3671 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3673 tmp = gen_rtx_fmt_ee (code, mode,
3674 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3675 return tmp;
3678 else
3680 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3681 gcc_assert (GET_MODE_INNER (mode)
3682 == GET_MODE_INNER (GET_MODE (trueop0)));
3683 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3685 if (vec_duplicate_p (trueop0, &elt0))
3686 /* It doesn't matter which elements are selected by trueop1,
3687 because they are all the same. */
3688 return gen_vec_duplicate (mode, elt0);
3690 if (GET_CODE (trueop0) == CONST_VECTOR)
3692 int elt_size = GET_MODE_UNIT_SIZE (mode);
3693 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3694 rtvec v = rtvec_alloc (n_elts);
3695 unsigned int i;
3697 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3698 for (i = 0; i < n_elts; i++)
3700 rtx x = XVECEXP (trueop1, 0, i);
3702 gcc_assert (CONST_INT_P (x));
3703 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3704 INTVAL (x));
3707 return gen_rtx_CONST_VECTOR (mode, v);
3710 /* Recognize the identity. */
3711 if (GET_MODE (trueop0) == mode)
3713 bool maybe_ident = true;
3714 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3716 rtx j = XVECEXP (trueop1, 0, i);
3717 if (!CONST_INT_P (j) || INTVAL (j) != i)
3719 maybe_ident = false;
3720 break;
3723 if (maybe_ident)
3724 return trueop0;
3727 /* If we build {a,b} then permute it, build the result directly. */
3728 if (XVECLEN (trueop1, 0) == 2
3729 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3730 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3731 && GET_CODE (trueop0) == VEC_CONCAT
3732 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3733 && GET_MODE (XEXP (trueop0, 0)) == mode
3734 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3735 && GET_MODE (XEXP (trueop0, 1)) == mode)
3737 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3738 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3739 rtx subop0, subop1;
3741 gcc_assert (i0 < 4 && i1 < 4);
3742 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3743 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3745 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3748 if (XVECLEN (trueop1, 0) == 2
3749 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3750 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3751 && GET_CODE (trueop0) == VEC_CONCAT
3752 && GET_MODE (trueop0) == mode)
3754 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3755 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3756 rtx subop0, subop1;
3758 gcc_assert (i0 < 2 && i1 < 2);
3759 subop0 = XEXP (trueop0, i0);
3760 subop1 = XEXP (trueop0, i1);
3762 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3765 /* If we select one half of a vec_concat, return that. */
3766 if (GET_CODE (trueop0) == VEC_CONCAT
3767 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3769 rtx subop0 = XEXP (trueop0, 0);
3770 rtx subop1 = XEXP (trueop0, 1);
3771 machine_mode mode0 = GET_MODE (subop0);
3772 machine_mode mode1 = GET_MODE (subop1);
3773 int l0 = GET_MODE_NUNITS (mode0);
3774 int l1 = GET_MODE_NUNITS (mode1);
3775 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3776 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3778 bool success = true;
3779 for (int i = 1; i < l0; ++i)
3781 rtx j = XVECEXP (trueop1, 0, i);
3782 if (!CONST_INT_P (j) || INTVAL (j) != i)
3784 success = false;
3785 break;
3788 if (success)
3789 return subop0;
3791 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3793 bool success = true;
3794 for (int i = 1; i < l1; ++i)
3796 rtx j = XVECEXP (trueop1, 0, i);
3797 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3799 success = false;
3800 break;
3803 if (success)
3804 return subop1;
3809 if (XVECLEN (trueop1, 0) == 1
3810 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3811 && GET_CODE (trueop0) == VEC_CONCAT)
3813 rtx vec = trueop0;
3814 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3816 /* Try to find the element in the VEC_CONCAT. */
3817 while (GET_MODE (vec) != mode
3818 && GET_CODE (vec) == VEC_CONCAT)
3820 HOST_WIDE_INT vec_size;
3822 if (CONST_INT_P (XEXP (vec, 0)))
3824 /* vec_concat of two const_ints doesn't make sense with
3825 respect to modes. */
3826 if (CONST_INT_P (XEXP (vec, 1)))
3827 return 0;
3829 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3830 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3832 else
3833 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3835 if (offset < vec_size)
3836 vec = XEXP (vec, 0);
3837 else
3839 offset -= vec_size;
3840 vec = XEXP (vec, 1);
3842 vec = avoid_constant_pool_reference (vec);
3845 if (GET_MODE (vec) == mode)
3846 return vec;
3849 /* If we select elements in a vec_merge that all come from the same
3850 operand, select from that operand directly. */
3851 if (GET_CODE (op0) == VEC_MERGE)
3853 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3854 if (CONST_INT_P (trueop02))
3856 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3857 bool all_operand0 = true;
3858 bool all_operand1 = true;
3859 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3861 rtx j = XVECEXP (trueop1, 0, i);
3862 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3863 all_operand1 = false;
3864 else
3865 all_operand0 = false;
3867 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3868 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3869 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3870 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3874 /* If we have two nested selects that are inverses of each
3875 other, replace them with the source operand. */
3876 if (GET_CODE (trueop0) == VEC_SELECT
3877 && GET_MODE (XEXP (trueop0, 0)) == mode)
3879 rtx op0_subop1 = XEXP (trueop0, 1);
3880 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3881 gcc_assert (XVECLEN (trueop1, 0) == GET_MODE_NUNITS (mode));
3883 /* Apply the outer ordering vector to the inner one. (The inner
3884 ordering vector is expressly permitted to be of a different
3885 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3886 then the two VEC_SELECTs cancel. */
3887 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3889 rtx x = XVECEXP (trueop1, 0, i);
3890 if (!CONST_INT_P (x))
3891 return 0;
3892 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3893 if (!CONST_INT_P (y) || i != INTVAL (y))
3894 return 0;
3896 return XEXP (trueop0, 0);
3899 return 0;
3900 case VEC_CONCAT:
3902 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3903 ? GET_MODE (trueop0)
3904 : GET_MODE_INNER (mode));
3905 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3906 ? GET_MODE (trueop1)
3907 : GET_MODE_INNER (mode));
3909 gcc_assert (VECTOR_MODE_P (mode));
3910 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3911 == GET_MODE_SIZE (mode));
3913 if (VECTOR_MODE_P (op0_mode))
3914 gcc_assert (GET_MODE_INNER (mode)
3915 == GET_MODE_INNER (op0_mode));
3916 else
3917 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3919 if (VECTOR_MODE_P (op1_mode))
3920 gcc_assert (GET_MODE_INNER (mode)
3921 == GET_MODE_INNER (op1_mode));
3922 else
3923 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3925 if ((GET_CODE (trueop0) == CONST_VECTOR
3926 || CONST_SCALAR_INT_P (trueop0)
3927 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3928 && (GET_CODE (trueop1) == CONST_VECTOR
3929 || CONST_SCALAR_INT_P (trueop1)
3930 || CONST_DOUBLE_AS_FLOAT_P (trueop1)))
3932 unsigned n_elts = GET_MODE_NUNITS (mode);
3933 unsigned in_n_elts = GET_MODE_NUNITS (op0_mode);
3934 rtvec v = rtvec_alloc (n_elts);
3935 unsigned int i;
3936 for (i = 0; i < n_elts; i++)
3938 if (i < in_n_elts)
3940 if (!VECTOR_MODE_P (op0_mode))
3941 RTVEC_ELT (v, i) = trueop0;
3942 else
3943 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3945 else
3947 if (!VECTOR_MODE_P (op1_mode))
3948 RTVEC_ELT (v, i) = trueop1;
3949 else
3950 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3951 i - in_n_elts);
3955 return gen_rtx_CONST_VECTOR (mode, v);
3958 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3959 Restrict the transformation to avoid generating a VEC_SELECT with a
3960 mode unrelated to its operand. */
3961 if (GET_CODE (trueop0) == VEC_SELECT
3962 && GET_CODE (trueop1) == VEC_SELECT
3963 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3964 && GET_MODE (XEXP (trueop0, 0)) == mode)
3966 rtx par0 = XEXP (trueop0, 1);
3967 rtx par1 = XEXP (trueop1, 1);
3968 int len0 = XVECLEN (par0, 0);
3969 int len1 = XVECLEN (par1, 0);
3970 rtvec vec = rtvec_alloc (len0 + len1);
3971 for (int i = 0; i < len0; i++)
3972 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3973 for (int i = 0; i < len1; i++)
3974 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3975 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3976 gen_rtx_PARALLEL (VOIDmode, vec));
3979 return 0;
3981 default:
3982 gcc_unreachable ();
3985 if (mode == GET_MODE (op0)
3986 && mode == GET_MODE (op1)
3987 && vec_duplicate_p (op0, &elt0)
3988 && vec_duplicate_p (op1, &elt1))
3990 /* Try applying the operator to ELT and see if that simplifies.
3991 We can duplicate the result if so.
3993 The reason we don't use simplify_gen_binary is that it isn't
3994 necessarily a win to convert things like:
3996 (plus:V (vec_duplicate:V (reg:S R1))
3997 (vec_duplicate:V (reg:S R2)))
4001 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4003 The first might be done entirely in vector registers while the
4004 second might need a move between register files. */
4005 tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4006 elt0, elt1);
4007 if (tem)
4008 return gen_vec_duplicate (mode, tem);
4011 return 0;
4015 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4016 rtx op0, rtx op1)
4018 if (VECTOR_MODE_P (mode)
4019 && code != VEC_CONCAT
4020 && GET_CODE (op0) == CONST_VECTOR
4021 && GET_CODE (op1) == CONST_VECTOR)
4023 unsigned int n_elts = CONST_VECTOR_NUNITS (op0);
4024 gcc_assert (n_elts == (unsigned int) CONST_VECTOR_NUNITS (op1));
4025 gcc_assert (n_elts == GET_MODE_NUNITS (mode));
4026 rtvec v = rtvec_alloc (n_elts);
4027 unsigned int i;
4029 for (i = 0; i < n_elts; i++)
4031 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4032 CONST_VECTOR_ELT (op0, i),
4033 CONST_VECTOR_ELT (op1, i));
4034 if (!x || !valid_for_const_vector_p (mode, x))
4035 return 0;
4036 RTVEC_ELT (v, i) = x;
4039 return gen_rtx_CONST_VECTOR (mode, v);
4042 if (VECTOR_MODE_P (mode)
4043 && code == VEC_CONCAT
4044 && (CONST_SCALAR_INT_P (op0)
4045 || CONST_FIXED_P (op0)
4046 || CONST_DOUBLE_AS_FLOAT_P (op0))
4047 && (CONST_SCALAR_INT_P (op1)
4048 || CONST_DOUBLE_AS_FLOAT_P (op1)
4049 || CONST_FIXED_P (op1)))
4051 unsigned n_elts = GET_MODE_NUNITS (mode);
4052 rtvec v = rtvec_alloc (n_elts);
4054 gcc_assert (n_elts >= 2);
4055 if (n_elts == 2)
4057 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4058 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4060 RTVEC_ELT (v, 0) = op0;
4061 RTVEC_ELT (v, 1) = op1;
4063 else
4065 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
4066 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
4067 unsigned i;
4069 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4070 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4071 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4073 for (i = 0; i < op0_n_elts; ++i)
4074 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op0, i);
4075 for (i = 0; i < op1_n_elts; ++i)
4076 RTVEC_ELT (v, op0_n_elts+i) = CONST_VECTOR_ELT (op1, i);
4079 return gen_rtx_CONST_VECTOR (mode, v);
4082 if (SCALAR_FLOAT_MODE_P (mode)
4083 && CONST_DOUBLE_AS_FLOAT_P (op0)
4084 && CONST_DOUBLE_AS_FLOAT_P (op1)
4085 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4087 if (code == AND
4088 || code == IOR
4089 || code == XOR)
4091 long tmp0[4];
4092 long tmp1[4];
4093 REAL_VALUE_TYPE r;
4094 int i;
4096 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4097 GET_MODE (op0));
4098 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4099 GET_MODE (op1));
4100 for (i = 0; i < 4; i++)
4102 switch (code)
4104 case AND:
4105 tmp0[i] &= tmp1[i];
4106 break;
4107 case IOR:
4108 tmp0[i] |= tmp1[i];
4109 break;
4110 case XOR:
4111 tmp0[i] ^= tmp1[i];
4112 break;
4113 default:
4114 gcc_unreachable ();
4117 real_from_target (&r, tmp0, mode);
4118 return const_double_from_real_value (r, mode);
4120 else
4122 REAL_VALUE_TYPE f0, f1, value, result;
4123 const REAL_VALUE_TYPE *opr0, *opr1;
4124 bool inexact;
4126 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4127 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4129 if (HONOR_SNANS (mode)
4130 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4131 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4132 return 0;
4134 real_convert (&f0, mode, opr0);
4135 real_convert (&f1, mode, opr1);
4137 if (code == DIV
4138 && real_equal (&f1, &dconst0)
4139 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4140 return 0;
4142 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4143 && flag_trapping_math
4144 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4146 int s0 = REAL_VALUE_NEGATIVE (f0);
4147 int s1 = REAL_VALUE_NEGATIVE (f1);
4149 switch (code)
4151 case PLUS:
4152 /* Inf + -Inf = NaN plus exception. */
4153 if (s0 != s1)
4154 return 0;
4155 break;
4156 case MINUS:
4157 /* Inf - Inf = NaN plus exception. */
4158 if (s0 == s1)
4159 return 0;
4160 break;
4161 case DIV:
4162 /* Inf / Inf = NaN plus exception. */
4163 return 0;
4164 default:
4165 break;
4169 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4170 && flag_trapping_math
4171 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4172 || (REAL_VALUE_ISINF (f1)
4173 && real_equal (&f0, &dconst0))))
4174 /* Inf * 0 = NaN plus exception. */
4175 return 0;
4177 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4178 &f0, &f1);
4179 real_convert (&result, mode, &value);
4181 /* Don't constant fold this floating point operation if
4182 the result has overflowed and flag_trapping_math. */
4184 if (flag_trapping_math
4185 && MODE_HAS_INFINITIES (mode)
4186 && REAL_VALUE_ISINF (result)
4187 && !REAL_VALUE_ISINF (f0)
4188 && !REAL_VALUE_ISINF (f1))
4189 /* Overflow plus exception. */
4190 return 0;
4192 /* Don't constant fold this floating point operation if the
4193 result may dependent upon the run-time rounding mode and
4194 flag_rounding_math is set, or if GCC's software emulation
4195 is unable to accurately represent the result. */
4197 if ((flag_rounding_math
4198 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4199 && (inexact || !real_identical (&result, &value)))
4200 return NULL_RTX;
4202 return const_double_from_real_value (result, mode);
4206 /* We can fold some multi-word operations. */
4207 scalar_int_mode int_mode;
4208 if (is_a <scalar_int_mode> (mode, &int_mode)
4209 && CONST_SCALAR_INT_P (op0)
4210 && CONST_SCALAR_INT_P (op1))
4212 wide_int result;
4213 bool overflow;
4214 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4215 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4217 #if TARGET_SUPPORTS_WIDE_INT == 0
4218 /* This assert keeps the simplification from producing a result
4219 that cannot be represented in a CONST_DOUBLE but a lot of
4220 upstream callers expect that this function never fails to
4221 simplify something and so you if you added this to the test
4222 above the code would die later anyway. If this assert
4223 happens, you just need to make the port support wide int. */
4224 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4225 #endif
4226 switch (code)
4228 case MINUS:
4229 result = wi::sub (pop0, pop1);
4230 break;
4232 case PLUS:
4233 result = wi::add (pop0, pop1);
4234 break;
4236 case MULT:
4237 result = wi::mul (pop0, pop1);
4238 break;
4240 case DIV:
4241 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4242 if (overflow)
4243 return NULL_RTX;
4244 break;
4246 case MOD:
4247 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4248 if (overflow)
4249 return NULL_RTX;
4250 break;
4252 case UDIV:
4253 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4254 if (overflow)
4255 return NULL_RTX;
4256 break;
4258 case UMOD:
4259 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4260 if (overflow)
4261 return NULL_RTX;
4262 break;
4264 case AND:
4265 result = wi::bit_and (pop0, pop1);
4266 break;
4268 case IOR:
4269 result = wi::bit_or (pop0, pop1);
4270 break;
4272 case XOR:
4273 result = wi::bit_xor (pop0, pop1);
4274 break;
4276 case SMIN:
4277 result = wi::smin (pop0, pop1);
4278 break;
4280 case SMAX:
4281 result = wi::smax (pop0, pop1);
4282 break;
4284 case UMIN:
4285 result = wi::umin (pop0, pop1);
4286 break;
4288 case UMAX:
4289 result = wi::umax (pop0, pop1);
4290 break;
4292 case LSHIFTRT:
4293 case ASHIFTRT:
4294 case ASHIFT:
4296 wide_int wop1 = pop1;
4297 if (SHIFT_COUNT_TRUNCATED)
4298 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4299 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4300 return NULL_RTX;
4302 switch (code)
4304 case LSHIFTRT:
4305 result = wi::lrshift (pop0, wop1);
4306 break;
4308 case ASHIFTRT:
4309 result = wi::arshift (pop0, wop1);
4310 break;
4312 case ASHIFT:
4313 result = wi::lshift (pop0, wop1);
4314 break;
4316 default:
4317 gcc_unreachable ();
4319 break;
4321 case ROTATE:
4322 case ROTATERT:
4324 if (wi::neg_p (pop1))
4325 return NULL_RTX;
4327 switch (code)
4329 case ROTATE:
4330 result = wi::lrotate (pop0, pop1);
4331 break;
4333 case ROTATERT:
4334 result = wi::rrotate (pop0, pop1);
4335 break;
4337 default:
4338 gcc_unreachable ();
4340 break;
4342 default:
4343 return NULL_RTX;
4345 return immed_wide_int_const (result, int_mode);
4348 /* Handle polynomial integers. */
4349 if (NUM_POLY_INT_COEFFS > 1
4350 && is_a <scalar_int_mode> (mode, &int_mode)
4351 && poly_int_rtx_p (op0)
4352 && poly_int_rtx_p (op1))
4354 poly_wide_int result;
4355 switch (code)
4357 case PLUS:
4358 result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
4359 break;
4361 case MINUS:
4362 result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
4363 break;
4365 case MULT:
4366 if (CONST_SCALAR_INT_P (op1))
4367 result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
4368 else
4369 return NULL_RTX;
4370 break;
4372 case ASHIFT:
4373 if (CONST_SCALAR_INT_P (op1))
4375 wide_int shift = rtx_mode_t (op1, mode);
4376 if (SHIFT_COUNT_TRUNCATED)
4377 shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
4378 else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
4379 return NULL_RTX;
4380 result = wi::to_poly_wide (op0, mode) << shift;
4382 else
4383 return NULL_RTX;
4384 break;
4386 case IOR:
4387 if (!CONST_SCALAR_INT_P (op1)
4388 || !can_ior_p (wi::to_poly_wide (op0, mode),
4389 rtx_mode_t (op1, mode), &result))
4390 return NULL_RTX;
4391 break;
4393 default:
4394 return NULL_RTX;
4396 return immed_wide_int_const (result, int_mode);
4399 return NULL_RTX;
4404 /* Return a positive integer if X should sort after Y. The value
4405 returned is 1 if and only if X and Y are both regs. */
4407 static int
4408 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4410 int result;
4412 result = (commutative_operand_precedence (y)
4413 - commutative_operand_precedence (x));
4414 if (result)
4415 return result + result;
4417 /* Group together equal REGs to do more simplification. */
4418 if (REG_P (x) && REG_P (y))
4419 return REGNO (x) > REGNO (y);
4421 return 0;
4424 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4425 operands may be another PLUS or MINUS.
4427 Rather than test for specific case, we do this by a brute-force method
4428 and do all possible simplifications until no more changes occur. Then
4429 we rebuild the operation.
4431 May return NULL_RTX when no changes were made. */
4433 static rtx
4434 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4435 rtx op1)
4437 struct simplify_plus_minus_op_data
4439 rtx op;
4440 short neg;
4441 } ops[16];
4442 rtx result, tem;
4443 int n_ops = 2;
4444 int changed, n_constants, canonicalized = 0;
4445 int i, j;
4447 memset (ops, 0, sizeof ops);
4449 /* Set up the two operands and then expand them until nothing has been
4450 changed. If we run out of room in our array, give up; this should
4451 almost never happen. */
4453 ops[0].op = op0;
4454 ops[0].neg = 0;
4455 ops[1].op = op1;
4456 ops[1].neg = (code == MINUS);
4460 changed = 0;
4461 n_constants = 0;
4463 for (i = 0; i < n_ops; i++)
4465 rtx this_op = ops[i].op;
4466 int this_neg = ops[i].neg;
4467 enum rtx_code this_code = GET_CODE (this_op);
4469 switch (this_code)
4471 case PLUS:
4472 case MINUS:
4473 if (n_ops == ARRAY_SIZE (ops))
4474 return NULL_RTX;
4476 ops[n_ops].op = XEXP (this_op, 1);
4477 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4478 n_ops++;
4480 ops[i].op = XEXP (this_op, 0);
4481 changed = 1;
4482 /* If this operand was negated then we will potentially
4483 canonicalize the expression. Similarly if we don't
4484 place the operands adjacent we're re-ordering the
4485 expression and thus might be performing a
4486 canonicalization. Ignore register re-ordering.
4487 ??? It might be better to shuffle the ops array here,
4488 but then (plus (plus (A, B), plus (C, D))) wouldn't
4489 be seen as non-canonical. */
4490 if (this_neg
4491 || (i != n_ops - 2
4492 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4493 canonicalized = 1;
4494 break;
4496 case NEG:
4497 ops[i].op = XEXP (this_op, 0);
4498 ops[i].neg = ! this_neg;
4499 changed = 1;
4500 canonicalized = 1;
4501 break;
4503 case CONST:
4504 if (n_ops != ARRAY_SIZE (ops)
4505 && GET_CODE (XEXP (this_op, 0)) == PLUS
4506 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4507 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4509 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4510 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4511 ops[n_ops].neg = this_neg;
4512 n_ops++;
4513 changed = 1;
4514 canonicalized = 1;
4516 break;
4518 case NOT:
4519 /* ~a -> (-a - 1) */
4520 if (n_ops != ARRAY_SIZE (ops))
4522 ops[n_ops].op = CONSTM1_RTX (mode);
4523 ops[n_ops++].neg = this_neg;
4524 ops[i].op = XEXP (this_op, 0);
4525 ops[i].neg = !this_neg;
4526 changed = 1;
4527 canonicalized = 1;
4529 break;
4531 case CONST_INT:
4532 n_constants++;
4533 if (this_neg)
4535 ops[i].op = neg_const_int (mode, this_op);
4536 ops[i].neg = 0;
4537 changed = 1;
4538 canonicalized = 1;
4540 break;
4542 default:
4543 break;
4547 while (changed);
4549 if (n_constants > 1)
4550 canonicalized = 1;
4552 gcc_assert (n_ops >= 2);
4554 /* If we only have two operands, we can avoid the loops. */
4555 if (n_ops == 2)
4557 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4558 rtx lhs, rhs;
4560 /* Get the two operands. Be careful with the order, especially for
4561 the cases where code == MINUS. */
4562 if (ops[0].neg && ops[1].neg)
4564 lhs = gen_rtx_NEG (mode, ops[0].op);
4565 rhs = ops[1].op;
4567 else if (ops[0].neg)
4569 lhs = ops[1].op;
4570 rhs = ops[0].op;
4572 else
4574 lhs = ops[0].op;
4575 rhs = ops[1].op;
4578 return simplify_const_binary_operation (code, mode, lhs, rhs);
4581 /* Now simplify each pair of operands until nothing changes. */
4582 while (1)
4584 /* Insertion sort is good enough for a small array. */
4585 for (i = 1; i < n_ops; i++)
4587 struct simplify_plus_minus_op_data save;
4588 int cmp;
4590 j = i - 1;
4591 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4592 if (cmp <= 0)
4593 continue;
4594 /* Just swapping registers doesn't count as canonicalization. */
4595 if (cmp != 1)
4596 canonicalized = 1;
4598 save = ops[i];
4600 ops[j + 1] = ops[j];
4601 while (j--
4602 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4603 ops[j + 1] = save;
4606 changed = 0;
4607 for (i = n_ops - 1; i > 0; i--)
4608 for (j = i - 1; j >= 0; j--)
4610 rtx lhs = ops[j].op, rhs = ops[i].op;
4611 int lneg = ops[j].neg, rneg = ops[i].neg;
4613 if (lhs != 0 && rhs != 0)
4615 enum rtx_code ncode = PLUS;
4617 if (lneg != rneg)
4619 ncode = MINUS;
4620 if (lneg)
4621 std::swap (lhs, rhs);
4623 else if (swap_commutative_operands_p (lhs, rhs))
4624 std::swap (lhs, rhs);
4626 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4627 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4629 rtx tem_lhs, tem_rhs;
4631 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4632 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4633 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4634 tem_rhs);
4636 if (tem && !CONSTANT_P (tem))
4637 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4639 else
4640 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4642 if (tem)
4644 /* Reject "simplifications" that just wrap the two
4645 arguments in a CONST. Failure to do so can result
4646 in infinite recursion with simplify_binary_operation
4647 when it calls us to simplify CONST operations.
4648 Also, if we find such a simplification, don't try
4649 any more combinations with this rhs: We must have
4650 something like symbol+offset, ie. one of the
4651 trivial CONST expressions we handle later. */
4652 if (GET_CODE (tem) == CONST
4653 && GET_CODE (XEXP (tem, 0)) == ncode
4654 && XEXP (XEXP (tem, 0), 0) == lhs
4655 && XEXP (XEXP (tem, 0), 1) == rhs)
4656 break;
4657 lneg &= rneg;
4658 if (GET_CODE (tem) == NEG)
4659 tem = XEXP (tem, 0), lneg = !lneg;
4660 if (CONST_INT_P (tem) && lneg)
4661 tem = neg_const_int (mode, tem), lneg = 0;
4663 ops[i].op = tem;
4664 ops[i].neg = lneg;
4665 ops[j].op = NULL_RTX;
4666 changed = 1;
4667 canonicalized = 1;
4672 if (!changed)
4673 break;
4675 /* Pack all the operands to the lower-numbered entries. */
4676 for (i = 0, j = 0; j < n_ops; j++)
4677 if (ops[j].op)
4679 ops[i] = ops[j];
4680 i++;
4682 n_ops = i;
4685 /* If nothing changed, check that rematerialization of rtl instructions
4686 is still required. */
4687 if (!canonicalized)
4689 /* Perform rematerialization if only all operands are registers and
4690 all operations are PLUS. */
4691 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4692 around rs6000 and how it uses the CA register. See PR67145. */
4693 for (i = 0; i < n_ops; i++)
4694 if (ops[i].neg
4695 || !REG_P (ops[i].op)
4696 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4697 && fixed_regs[REGNO (ops[i].op)]
4698 && !global_regs[REGNO (ops[i].op)]
4699 && ops[i].op != frame_pointer_rtx
4700 && ops[i].op != arg_pointer_rtx
4701 && ops[i].op != stack_pointer_rtx))
4702 return NULL_RTX;
4703 goto gen_result;
4706 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4707 if (n_ops == 2
4708 && CONST_INT_P (ops[1].op)
4709 && CONSTANT_P (ops[0].op)
4710 && ops[0].neg)
4711 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4713 /* We suppressed creation of trivial CONST expressions in the
4714 combination loop to avoid recursion. Create one manually now.
4715 The combination loop should have ensured that there is exactly
4716 one CONST_INT, and the sort will have ensured that it is last
4717 in the array and that any other constant will be next-to-last. */
4719 if (n_ops > 1
4720 && CONST_INT_P (ops[n_ops - 1].op)
4721 && CONSTANT_P (ops[n_ops - 2].op))
4723 rtx value = ops[n_ops - 1].op;
4724 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4725 value = neg_const_int (mode, value);
4726 if (CONST_INT_P (value))
4728 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4729 INTVAL (value));
4730 n_ops--;
4734 /* Put a non-negated operand first, if possible. */
4736 for (i = 0; i < n_ops && ops[i].neg; i++)
4737 continue;
4738 if (i == n_ops)
4739 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4740 else if (i != 0)
4742 tem = ops[0].op;
4743 ops[0] = ops[i];
4744 ops[i].op = tem;
4745 ops[i].neg = 1;
4748 /* Now make the result by performing the requested operations. */
4749 gen_result:
4750 result = ops[0].op;
4751 for (i = 1; i < n_ops; i++)
4752 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4753 mode, result, ops[i].op);
4755 return result;
4758 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4759 static bool
4760 plus_minus_operand_p (const_rtx x)
4762 return GET_CODE (x) == PLUS
4763 || GET_CODE (x) == MINUS
4764 || (GET_CODE (x) == CONST
4765 && GET_CODE (XEXP (x, 0)) == PLUS
4766 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4767 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4770 /* Like simplify_binary_operation except used for relational operators.
4771 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4772 not also be VOIDmode.
4774 CMP_MODE specifies in which mode the comparison is done in, so it is
4775 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4776 the operands or, if both are VOIDmode, the operands are compared in
4777 "infinite precision". */
4779 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4780 machine_mode cmp_mode, rtx op0, rtx op1)
4782 rtx tem, trueop0, trueop1;
4784 if (cmp_mode == VOIDmode)
4785 cmp_mode = GET_MODE (op0);
4786 if (cmp_mode == VOIDmode)
4787 cmp_mode = GET_MODE (op1);
4789 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4790 if (tem)
4792 if (SCALAR_FLOAT_MODE_P (mode))
4794 if (tem == const0_rtx)
4795 return CONST0_RTX (mode);
4796 #ifdef FLOAT_STORE_FLAG_VALUE
4798 REAL_VALUE_TYPE val;
4799 val = FLOAT_STORE_FLAG_VALUE (mode);
4800 return const_double_from_real_value (val, mode);
4802 #else
4803 return NULL_RTX;
4804 #endif
4806 if (VECTOR_MODE_P (mode))
4808 if (tem == const0_rtx)
4809 return CONST0_RTX (mode);
4810 #ifdef VECTOR_STORE_FLAG_VALUE
4812 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4813 if (val == NULL_RTX)
4814 return NULL_RTX;
4815 if (val == const1_rtx)
4816 return CONST1_RTX (mode);
4818 return gen_const_vec_duplicate (mode, val);
4820 #else
4821 return NULL_RTX;
4822 #endif
4825 return tem;
4828 /* For the following tests, ensure const0_rtx is op1. */
4829 if (swap_commutative_operands_p (op0, op1)
4830 || (op0 == const0_rtx && op1 != const0_rtx))
4831 std::swap (op0, op1), code = swap_condition (code);
4833 /* If op0 is a compare, extract the comparison arguments from it. */
4834 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4835 return simplify_gen_relational (code, mode, VOIDmode,
4836 XEXP (op0, 0), XEXP (op0, 1));
4838 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4839 || CC0_P (op0))
4840 return NULL_RTX;
4842 trueop0 = avoid_constant_pool_reference (op0);
4843 trueop1 = avoid_constant_pool_reference (op1);
4844 return simplify_relational_operation_1 (code, mode, cmp_mode,
4845 trueop0, trueop1);
4848 /* This part of simplify_relational_operation is only used when CMP_MODE
4849 is not in class MODE_CC (i.e. it is a real comparison).
4851 MODE is the mode of the result, while CMP_MODE specifies in which
4852 mode the comparison is done in, so it is the mode of the operands. */
4854 static rtx
4855 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4856 machine_mode cmp_mode, rtx op0, rtx op1)
4858 enum rtx_code op0code = GET_CODE (op0);
4860 if (op1 == const0_rtx && COMPARISON_P (op0))
4862 /* If op0 is a comparison, extract the comparison arguments
4863 from it. */
4864 if (code == NE)
4866 if (GET_MODE (op0) == mode)
4867 return simplify_rtx (op0);
4868 else
4869 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4870 XEXP (op0, 0), XEXP (op0, 1));
4872 else if (code == EQ)
4874 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4875 if (new_code != UNKNOWN)
4876 return simplify_gen_relational (new_code, mode, VOIDmode,
4877 XEXP (op0, 0), XEXP (op0, 1));
4881 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4882 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4883 if ((code == LTU || code == GEU)
4884 && GET_CODE (op0) == PLUS
4885 && CONST_INT_P (XEXP (op0, 1))
4886 && (rtx_equal_p (op1, XEXP (op0, 0))
4887 || rtx_equal_p (op1, XEXP (op0, 1)))
4888 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4889 && XEXP (op0, 1) != const0_rtx)
4891 rtx new_cmp
4892 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4893 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4894 cmp_mode, XEXP (op0, 0), new_cmp);
4897 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4898 transformed into (LTU a -C). */
4899 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4900 && CONST_INT_P (XEXP (op0, 1))
4901 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4902 && XEXP (op0, 1) != const0_rtx)
4904 rtx new_cmp
4905 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4906 return simplify_gen_relational (LTU, mode, cmp_mode,
4907 XEXP (op0, 0), new_cmp);
4910 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4911 if ((code == LTU || code == GEU)
4912 && GET_CODE (op0) == PLUS
4913 && rtx_equal_p (op1, XEXP (op0, 1))
4914 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4915 && !rtx_equal_p (op1, XEXP (op0, 0)))
4916 return simplify_gen_relational (code, mode, cmp_mode, op0,
4917 copy_rtx (XEXP (op0, 0)));
4919 if (op1 == const0_rtx)
4921 /* Canonicalize (GTU x 0) as (NE x 0). */
4922 if (code == GTU)
4923 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4924 /* Canonicalize (LEU x 0) as (EQ x 0). */
4925 if (code == LEU)
4926 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4928 else if (op1 == const1_rtx)
4930 switch (code)
4932 case GE:
4933 /* Canonicalize (GE x 1) as (GT x 0). */
4934 return simplify_gen_relational (GT, mode, cmp_mode,
4935 op0, const0_rtx);
4936 case GEU:
4937 /* Canonicalize (GEU x 1) as (NE x 0). */
4938 return simplify_gen_relational (NE, mode, cmp_mode,
4939 op0, const0_rtx);
4940 case LT:
4941 /* Canonicalize (LT x 1) as (LE x 0). */
4942 return simplify_gen_relational (LE, mode, cmp_mode,
4943 op0, const0_rtx);
4944 case LTU:
4945 /* Canonicalize (LTU x 1) as (EQ x 0). */
4946 return simplify_gen_relational (EQ, mode, cmp_mode,
4947 op0, const0_rtx);
4948 default:
4949 break;
4952 else if (op1 == constm1_rtx)
4954 /* Canonicalize (LE x -1) as (LT x 0). */
4955 if (code == LE)
4956 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4957 /* Canonicalize (GT x -1) as (GE x 0). */
4958 if (code == GT)
4959 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4962 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4963 if ((code == EQ || code == NE)
4964 && (op0code == PLUS || op0code == MINUS)
4965 && CONSTANT_P (op1)
4966 && CONSTANT_P (XEXP (op0, 1))
4967 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4969 rtx x = XEXP (op0, 0);
4970 rtx c = XEXP (op0, 1);
4971 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4972 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4974 /* Detect an infinite recursive condition, where we oscillate at this
4975 simplification case between:
4976 A + B == C <---> C - B == A,
4977 where A, B, and C are all constants with non-simplifiable expressions,
4978 usually SYMBOL_REFs. */
4979 if (GET_CODE (tem) == invcode
4980 && CONSTANT_P (x)
4981 && rtx_equal_p (c, XEXP (tem, 1)))
4982 return NULL_RTX;
4984 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
4987 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4988 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4989 scalar_int_mode int_mode, int_cmp_mode;
4990 if (code == NE
4991 && op1 == const0_rtx
4992 && is_int_mode (mode, &int_mode)
4993 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
4994 /* ??? Work-around BImode bugs in the ia64 backend. */
4995 && int_mode != BImode
4996 && int_cmp_mode != BImode
4997 && nonzero_bits (op0, int_cmp_mode) == 1
4998 && STORE_FLAG_VALUE == 1)
4999 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
5000 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
5001 : lowpart_subreg (int_mode, op0, int_cmp_mode);
5003 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5004 if ((code == EQ || code == NE)
5005 && op1 == const0_rtx
5006 && op0code == XOR)
5007 return simplify_gen_relational (code, mode, cmp_mode,
5008 XEXP (op0, 0), XEXP (op0, 1));
5010 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5011 if ((code == EQ || code == NE)
5012 && op0code == XOR
5013 && rtx_equal_p (XEXP (op0, 0), op1)
5014 && !side_effects_p (XEXP (op0, 0)))
5015 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
5016 CONST0_RTX (mode));
5018 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5019 if ((code == EQ || code == NE)
5020 && op0code == XOR
5021 && rtx_equal_p (XEXP (op0, 1), op1)
5022 && !side_effects_p (XEXP (op0, 1)))
5023 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5024 CONST0_RTX (mode));
5026 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5027 if ((code == EQ || code == NE)
5028 && op0code == XOR
5029 && CONST_SCALAR_INT_P (op1)
5030 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5031 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5032 simplify_gen_binary (XOR, cmp_mode,
5033 XEXP (op0, 1), op1));
5035 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
5036 can be implemented with a BICS instruction on some targets, or
5037 constant-folded if y is a constant. */
5038 if ((code == EQ || code == NE)
5039 && op0code == AND
5040 && rtx_equal_p (XEXP (op0, 0), op1)
5041 && !side_effects_p (op1)
5042 && op1 != CONST0_RTX (cmp_mode))
5044 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1), cmp_mode);
5045 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5047 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5048 CONST0_RTX (cmp_mode));
5051 /* Likewise for (eq/ne (and x y) y). */
5052 if ((code == EQ || code == NE)
5053 && op0code == AND
5054 && rtx_equal_p (XEXP (op0, 1), op1)
5055 && !side_effects_p (op1)
5056 && op1 != CONST0_RTX (cmp_mode))
5058 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0), cmp_mode);
5059 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5061 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5062 CONST0_RTX (cmp_mode));
5065 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5066 if ((code == EQ || code == NE)
5067 && GET_CODE (op0) == BSWAP
5068 && CONST_SCALAR_INT_P (op1))
5069 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5070 simplify_gen_unary (BSWAP, cmp_mode,
5071 op1, cmp_mode));
5073 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5074 if ((code == EQ || code == NE)
5075 && GET_CODE (op0) == BSWAP
5076 && GET_CODE (op1) == BSWAP)
5077 return simplify_gen_relational (code, mode, cmp_mode,
5078 XEXP (op0, 0), XEXP (op1, 0));
5080 if (op0code == POPCOUNT && op1 == const0_rtx)
5081 switch (code)
5083 case EQ:
5084 case LE:
5085 case LEU:
5086 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5087 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5088 XEXP (op0, 0), const0_rtx);
5090 case NE:
5091 case GT:
5092 case GTU:
5093 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5094 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5095 XEXP (op0, 0), const0_rtx);
5097 default:
5098 break;
5101 return NULL_RTX;
5104 enum
5106 CMP_EQ = 1,
5107 CMP_LT = 2,
5108 CMP_GT = 4,
5109 CMP_LTU = 8,
5110 CMP_GTU = 16
5114 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5115 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5116 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5117 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5118 For floating-point comparisons, assume that the operands were ordered. */
5120 static rtx
5121 comparison_result (enum rtx_code code, int known_results)
5123 switch (code)
5125 case EQ:
5126 case UNEQ:
5127 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5128 case NE:
5129 case LTGT:
5130 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5132 case LT:
5133 case UNLT:
5134 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5135 case GE:
5136 case UNGE:
5137 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5139 case GT:
5140 case UNGT:
5141 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5142 case LE:
5143 case UNLE:
5144 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5146 case LTU:
5147 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5148 case GEU:
5149 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5151 case GTU:
5152 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5153 case LEU:
5154 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5156 case ORDERED:
5157 return const_true_rtx;
5158 case UNORDERED:
5159 return const0_rtx;
5160 default:
5161 gcc_unreachable ();
5165 /* Check if the given comparison (done in the given MODE) is actually
5166 a tautology or a contradiction. If the mode is VOID_mode, the
5167 comparison is done in "infinite precision". If no simplification
5168 is possible, this function returns zero. Otherwise, it returns
5169 either const_true_rtx or const0_rtx. */
5172 simplify_const_relational_operation (enum rtx_code code,
5173 machine_mode mode,
5174 rtx op0, rtx op1)
5176 rtx tem;
5177 rtx trueop0;
5178 rtx trueop1;
5180 gcc_assert (mode != VOIDmode
5181 || (GET_MODE (op0) == VOIDmode
5182 && GET_MODE (op1) == VOIDmode));
5184 /* If op0 is a compare, extract the comparison arguments from it. */
5185 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5187 op1 = XEXP (op0, 1);
5188 op0 = XEXP (op0, 0);
5190 if (GET_MODE (op0) != VOIDmode)
5191 mode = GET_MODE (op0);
5192 else if (GET_MODE (op1) != VOIDmode)
5193 mode = GET_MODE (op1);
5194 else
5195 return 0;
5198 /* We can't simplify MODE_CC values since we don't know what the
5199 actual comparison is. */
5200 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5201 return 0;
5203 /* Make sure the constant is second. */
5204 if (swap_commutative_operands_p (op0, op1))
5206 std::swap (op0, op1);
5207 code = swap_condition (code);
5210 trueop0 = avoid_constant_pool_reference (op0);
5211 trueop1 = avoid_constant_pool_reference (op1);
5213 /* For integer comparisons of A and B maybe we can simplify A - B and can
5214 then simplify a comparison of that with zero. If A and B are both either
5215 a register or a CONST_INT, this can't help; testing for these cases will
5216 prevent infinite recursion here and speed things up.
5218 We can only do this for EQ and NE comparisons as otherwise we may
5219 lose or introduce overflow which we cannot disregard as undefined as
5220 we do not know the signedness of the operation on either the left or
5221 the right hand side of the comparison. */
5223 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5224 && (code == EQ || code == NE)
5225 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5226 && (REG_P (op1) || CONST_INT_P (trueop1)))
5227 && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
5228 /* We cannot do this if tem is a nonzero address. */
5229 && ! nonzero_address_p (tem))
5230 return simplify_const_relational_operation (signed_condition (code),
5231 mode, tem, const0_rtx);
5233 if (! HONOR_NANS (mode) && code == ORDERED)
5234 return const_true_rtx;
5236 if (! HONOR_NANS (mode) && code == UNORDERED)
5237 return const0_rtx;
5239 /* For modes without NaNs, if the two operands are equal, we know the
5240 result except if they have side-effects. Even with NaNs we know
5241 the result of unordered comparisons and, if signaling NaNs are
5242 irrelevant, also the result of LT/GT/LTGT. */
5243 if ((! HONOR_NANS (trueop0)
5244 || code == UNEQ || code == UNLE || code == UNGE
5245 || ((code == LT || code == GT || code == LTGT)
5246 && ! HONOR_SNANS (trueop0)))
5247 && rtx_equal_p (trueop0, trueop1)
5248 && ! side_effects_p (trueop0))
5249 return comparison_result (code, CMP_EQ);
5251 /* If the operands are floating-point constants, see if we can fold
5252 the result. */
5253 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5254 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5255 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5257 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5258 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5260 /* Comparisons are unordered iff at least one of the values is NaN. */
5261 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5262 switch (code)
5264 case UNEQ:
5265 case UNLT:
5266 case UNGT:
5267 case UNLE:
5268 case UNGE:
5269 case NE:
5270 case UNORDERED:
5271 return const_true_rtx;
5272 case EQ:
5273 case LT:
5274 case GT:
5275 case LE:
5276 case GE:
5277 case LTGT:
5278 case ORDERED:
5279 return const0_rtx;
5280 default:
5281 return 0;
5284 return comparison_result (code,
5285 (real_equal (d0, d1) ? CMP_EQ :
5286 real_less (d0, d1) ? CMP_LT : CMP_GT));
5289 /* Otherwise, see if the operands are both integers. */
5290 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5291 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5293 /* It would be nice if we really had a mode here. However, the
5294 largest int representable on the target is as good as
5295 infinite. */
5296 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5297 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5298 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5300 if (wi::eq_p (ptrueop0, ptrueop1))
5301 return comparison_result (code, CMP_EQ);
5302 else
5304 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5305 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5306 return comparison_result (code, cr);
5310 /* Optimize comparisons with upper and lower bounds. */
5311 scalar_int_mode int_mode;
5312 if (CONST_INT_P (trueop1)
5313 && is_a <scalar_int_mode> (mode, &int_mode)
5314 && HWI_COMPUTABLE_MODE_P (int_mode)
5315 && !side_effects_p (trueop0))
5317 int sign;
5318 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5319 HOST_WIDE_INT val = INTVAL (trueop1);
5320 HOST_WIDE_INT mmin, mmax;
5322 if (code == GEU
5323 || code == LEU
5324 || code == GTU
5325 || code == LTU)
5326 sign = 0;
5327 else
5328 sign = 1;
5330 /* Get a reduced range if the sign bit is zero. */
5331 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5333 mmin = 0;
5334 mmax = nonzero;
5336 else
5338 rtx mmin_rtx, mmax_rtx;
5339 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5341 mmin = INTVAL (mmin_rtx);
5342 mmax = INTVAL (mmax_rtx);
5343 if (sign)
5345 unsigned int sign_copies
5346 = num_sign_bit_copies (trueop0, int_mode);
5348 mmin >>= (sign_copies - 1);
5349 mmax >>= (sign_copies - 1);
5353 switch (code)
5355 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5356 case GEU:
5357 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5358 return const_true_rtx;
5359 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5360 return const0_rtx;
5361 break;
5362 case GE:
5363 if (val <= mmin)
5364 return const_true_rtx;
5365 if (val > mmax)
5366 return const0_rtx;
5367 break;
5369 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5370 case LEU:
5371 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5372 return const_true_rtx;
5373 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5374 return const0_rtx;
5375 break;
5376 case LE:
5377 if (val >= mmax)
5378 return const_true_rtx;
5379 if (val < mmin)
5380 return const0_rtx;
5381 break;
5383 case EQ:
5384 /* x == y is always false for y out of range. */
5385 if (val < mmin || val > mmax)
5386 return const0_rtx;
5387 break;
5389 /* x > y is always false for y >= mmax, always true for y < mmin. */
5390 case GTU:
5391 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5392 return const0_rtx;
5393 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5394 return const_true_rtx;
5395 break;
5396 case GT:
5397 if (val >= mmax)
5398 return const0_rtx;
5399 if (val < mmin)
5400 return const_true_rtx;
5401 break;
5403 /* x < y is always false for y <= mmin, always true for y > mmax. */
5404 case LTU:
5405 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5406 return const0_rtx;
5407 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5408 return const_true_rtx;
5409 break;
5410 case LT:
5411 if (val <= mmin)
5412 return const0_rtx;
5413 if (val > mmax)
5414 return const_true_rtx;
5415 break;
5417 case NE:
5418 /* x != y is always true for y out of range. */
5419 if (val < mmin || val > mmax)
5420 return const_true_rtx;
5421 break;
5423 default:
5424 break;
5428 /* Optimize integer comparisons with zero. */
5429 if (is_a <scalar_int_mode> (mode, &int_mode)
5430 && trueop1 == const0_rtx
5431 && !side_effects_p (trueop0))
5433 /* Some addresses are known to be nonzero. We don't know
5434 their sign, but equality comparisons are known. */
5435 if (nonzero_address_p (trueop0))
5437 if (code == EQ || code == LEU)
5438 return const0_rtx;
5439 if (code == NE || code == GTU)
5440 return const_true_rtx;
5443 /* See if the first operand is an IOR with a constant. If so, we
5444 may be able to determine the result of this comparison. */
5445 if (GET_CODE (op0) == IOR)
5447 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5448 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5450 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5451 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5452 && (UINTVAL (inner_const)
5453 & (HOST_WIDE_INT_1U
5454 << sign_bitnum)));
5456 switch (code)
5458 case EQ:
5459 case LEU:
5460 return const0_rtx;
5461 case NE:
5462 case GTU:
5463 return const_true_rtx;
5464 case LT:
5465 case LE:
5466 if (has_sign)
5467 return const_true_rtx;
5468 break;
5469 case GT:
5470 case GE:
5471 if (has_sign)
5472 return const0_rtx;
5473 break;
5474 default:
5475 break;
5481 /* Optimize comparison of ABS with zero. */
5482 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5483 && (GET_CODE (trueop0) == ABS
5484 || (GET_CODE (trueop0) == FLOAT_EXTEND
5485 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5487 switch (code)
5489 case LT:
5490 /* Optimize abs(x) < 0.0. */
5491 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5492 return const0_rtx;
5493 break;
5495 case GE:
5496 /* Optimize abs(x) >= 0.0. */
5497 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5498 return const_true_rtx;
5499 break;
5501 case UNGE:
5502 /* Optimize ! (abs(x) < 0.0). */
5503 return const_true_rtx;
5505 default:
5506 break;
5510 return 0;
5513 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5514 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5515 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5516 can be simplified to that or NULL_RTX if not.
5517 Assume X is compared against zero with CMP_CODE and the true
5518 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5520 static rtx
5521 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5523 if (cmp_code != EQ && cmp_code != NE)
5524 return NULL_RTX;
5526 /* Result on X == 0 and X !=0 respectively. */
5527 rtx on_zero, on_nonzero;
5528 if (cmp_code == EQ)
5530 on_zero = true_val;
5531 on_nonzero = false_val;
5533 else
5535 on_zero = false_val;
5536 on_nonzero = true_val;
5539 rtx_code op_code = GET_CODE (on_nonzero);
5540 if ((op_code != CLZ && op_code != CTZ)
5541 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5542 || !CONST_INT_P (on_zero))
5543 return NULL_RTX;
5545 HOST_WIDE_INT op_val;
5546 scalar_int_mode mode ATTRIBUTE_UNUSED
5547 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5548 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5549 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5550 && op_val == INTVAL (on_zero))
5551 return on_nonzero;
5553 return NULL_RTX;
5557 /* Simplify CODE, an operation with result mode MODE and three operands,
5558 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5559 a constant. Return 0 if no simplifications is possible. */
5562 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5563 machine_mode op0_mode, rtx op0, rtx op1,
5564 rtx op2)
5566 bool any_change = false;
5567 rtx tem, trueop2;
5568 scalar_int_mode int_mode, int_op0_mode;
5570 switch (code)
5572 case FMA:
5573 /* Simplify negations around the multiplication. */
5574 /* -a * -b + c => a * b + c. */
5575 if (GET_CODE (op0) == NEG)
5577 tem = simplify_unary_operation (NEG, mode, op1, mode);
5578 if (tem)
5579 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5581 else if (GET_CODE (op1) == NEG)
5583 tem = simplify_unary_operation (NEG, mode, op0, mode);
5584 if (tem)
5585 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5588 /* Canonicalize the two multiplication operands. */
5589 /* a * -b + c => -b * a + c. */
5590 if (swap_commutative_operands_p (op0, op1))
5591 std::swap (op0, op1), any_change = true;
5593 if (any_change)
5594 return gen_rtx_FMA (mode, op0, op1, op2);
5595 return NULL_RTX;
5597 case SIGN_EXTRACT:
5598 case ZERO_EXTRACT:
5599 if (CONST_INT_P (op0)
5600 && CONST_INT_P (op1)
5601 && CONST_INT_P (op2)
5602 && is_a <scalar_int_mode> (mode, &int_mode)
5603 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5604 && HWI_COMPUTABLE_MODE_P (int_mode))
5606 /* Extracting a bit-field from a constant */
5607 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5608 HOST_WIDE_INT op1val = INTVAL (op1);
5609 HOST_WIDE_INT op2val = INTVAL (op2);
5610 if (!BITS_BIG_ENDIAN)
5611 val >>= op2val;
5612 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5613 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5614 else
5615 /* Not enough information to calculate the bit position. */
5616 break;
5618 if (HOST_BITS_PER_WIDE_INT != op1val)
5620 /* First zero-extend. */
5621 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5622 /* If desired, propagate sign bit. */
5623 if (code == SIGN_EXTRACT
5624 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5625 != 0)
5626 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5629 return gen_int_mode (val, int_mode);
5631 break;
5633 case IF_THEN_ELSE:
5634 if (CONST_INT_P (op0))
5635 return op0 != const0_rtx ? op1 : op2;
5637 /* Convert c ? a : a into "a". */
5638 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5639 return op1;
5641 /* Convert a != b ? a : b into "a". */
5642 if (GET_CODE (op0) == NE
5643 && ! side_effects_p (op0)
5644 && ! HONOR_NANS (mode)
5645 && ! HONOR_SIGNED_ZEROS (mode)
5646 && ((rtx_equal_p (XEXP (op0, 0), op1)
5647 && rtx_equal_p (XEXP (op0, 1), op2))
5648 || (rtx_equal_p (XEXP (op0, 0), op2)
5649 && rtx_equal_p (XEXP (op0, 1), op1))))
5650 return op1;
5652 /* Convert a == b ? a : b into "b". */
5653 if (GET_CODE (op0) == EQ
5654 && ! side_effects_p (op0)
5655 && ! HONOR_NANS (mode)
5656 && ! HONOR_SIGNED_ZEROS (mode)
5657 && ((rtx_equal_p (XEXP (op0, 0), op1)
5658 && rtx_equal_p (XEXP (op0, 1), op2))
5659 || (rtx_equal_p (XEXP (op0, 0), op2)
5660 && rtx_equal_p (XEXP (op0, 1), op1))))
5661 return op2;
5663 /* Convert (!c) != {0,...,0} ? a : b into
5664 c != {0,...,0} ? b : a for vector modes. */
5665 if (VECTOR_MODE_P (GET_MODE (op1))
5666 && GET_CODE (op0) == NE
5667 && GET_CODE (XEXP (op0, 0)) == NOT
5668 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5670 rtx cv = XEXP (op0, 1);
5671 int nunits = CONST_VECTOR_NUNITS (cv);
5672 bool ok = true;
5673 for (int i = 0; i < nunits; ++i)
5674 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5676 ok = false;
5677 break;
5679 if (ok)
5681 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5682 XEXP (XEXP (op0, 0), 0),
5683 XEXP (op0, 1));
5684 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5685 return retval;
5689 /* Convert x == 0 ? N : clz (x) into clz (x) when
5690 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5691 Similarly for ctz (x). */
5692 if (COMPARISON_P (op0) && !side_effects_p (op0)
5693 && XEXP (op0, 1) == const0_rtx)
5695 rtx simplified
5696 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5697 op1, op2);
5698 if (simplified)
5699 return simplified;
5702 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5704 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5705 ? GET_MODE (XEXP (op0, 1))
5706 : GET_MODE (XEXP (op0, 0)));
5707 rtx temp;
5709 /* Look for happy constants in op1 and op2. */
5710 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5712 HOST_WIDE_INT t = INTVAL (op1);
5713 HOST_WIDE_INT f = INTVAL (op2);
5715 if (t == STORE_FLAG_VALUE && f == 0)
5716 code = GET_CODE (op0);
5717 else if (t == 0 && f == STORE_FLAG_VALUE)
5719 enum rtx_code tmp;
5720 tmp = reversed_comparison_code (op0, NULL);
5721 if (tmp == UNKNOWN)
5722 break;
5723 code = tmp;
5725 else
5726 break;
5728 return simplify_gen_relational (code, mode, cmp_mode,
5729 XEXP (op0, 0), XEXP (op0, 1));
5732 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5733 cmp_mode, XEXP (op0, 0),
5734 XEXP (op0, 1));
5736 /* See if any simplifications were possible. */
5737 if (temp)
5739 if (CONST_INT_P (temp))
5740 return temp == const0_rtx ? op2 : op1;
5741 else if (temp)
5742 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5745 break;
5747 case VEC_MERGE:
5748 gcc_assert (GET_MODE (op0) == mode);
5749 gcc_assert (GET_MODE (op1) == mode);
5750 gcc_assert (VECTOR_MODE_P (mode));
5751 trueop2 = avoid_constant_pool_reference (op2);
5752 if (CONST_INT_P (trueop2))
5754 unsigned n_elts = GET_MODE_NUNITS (mode);
5755 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5756 unsigned HOST_WIDE_INT mask;
5757 if (n_elts == HOST_BITS_PER_WIDE_INT)
5758 mask = -1;
5759 else
5760 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5762 if (!(sel & mask) && !side_effects_p (op0))
5763 return op1;
5764 if ((sel & mask) == mask && !side_effects_p (op1))
5765 return op0;
5767 rtx trueop0 = avoid_constant_pool_reference (op0);
5768 rtx trueop1 = avoid_constant_pool_reference (op1);
5769 if (GET_CODE (trueop0) == CONST_VECTOR
5770 && GET_CODE (trueop1) == CONST_VECTOR)
5772 rtvec v = rtvec_alloc (n_elts);
5773 unsigned int i;
5775 for (i = 0; i < n_elts; i++)
5776 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5777 ? CONST_VECTOR_ELT (trueop0, i)
5778 : CONST_VECTOR_ELT (trueop1, i));
5779 return gen_rtx_CONST_VECTOR (mode, v);
5782 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5783 if no element from a appears in the result. */
5784 if (GET_CODE (op0) == VEC_MERGE)
5786 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5787 if (CONST_INT_P (tem))
5789 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5790 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5791 return simplify_gen_ternary (code, mode, mode,
5792 XEXP (op0, 1), op1, op2);
5793 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5794 return simplify_gen_ternary (code, mode, mode,
5795 XEXP (op0, 0), op1, op2);
5798 if (GET_CODE (op1) == VEC_MERGE)
5800 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5801 if (CONST_INT_P (tem))
5803 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5804 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5805 return simplify_gen_ternary (code, mode, mode,
5806 op0, XEXP (op1, 1), op2);
5807 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5808 return simplify_gen_ternary (code, mode, mode,
5809 op0, XEXP (op1, 0), op2);
5813 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5814 with a. */
5815 if (GET_CODE (op0) == VEC_DUPLICATE
5816 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5817 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5818 && mode_nunits[GET_MODE (XEXP (op0, 0))] == 1)
5820 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5821 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5823 if (XEXP (XEXP (op0, 0), 0) == op1
5824 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5825 return op1;
5828 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
5829 (const_int N))
5830 with (vec_concat (X) (B)) if N == 1 or
5831 (vec_concat (A) (X)) if N == 2. */
5832 if (GET_CODE (op0) == VEC_DUPLICATE
5833 && GET_CODE (op1) == CONST_VECTOR
5834 && CONST_VECTOR_NUNITS (op1) == 2
5835 && GET_MODE_NUNITS (GET_MODE (op0)) == 2
5836 && IN_RANGE (sel, 1, 2))
5838 rtx newop0 = XEXP (op0, 0);
5839 rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
5840 if (sel == 2)
5841 std::swap (newop0, newop1);
5842 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5844 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
5845 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
5846 Only applies for vectors of two elements. */
5847 if (GET_CODE (op0) == VEC_DUPLICATE
5848 && GET_CODE (op1) == VEC_CONCAT
5849 && GET_MODE_NUNITS (GET_MODE (op0)) == 2
5850 && GET_MODE_NUNITS (GET_MODE (op1)) == 2
5851 && IN_RANGE (sel, 1, 2))
5853 rtx newop0 = XEXP (op0, 0);
5854 rtx newop1 = XEXP (op1, 2 - sel);
5855 rtx otherop = XEXP (op1, sel - 1);
5856 if (sel == 2)
5857 std::swap (newop0, newop1);
5858 /* Don't want to throw away the other part of the vec_concat if
5859 it has side-effects. */
5860 if (!side_effects_p (otherop))
5861 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5864 /* Replace:
5866 (vec_merge:outer (vec_duplicate:outer x:inner)
5867 (subreg:outer y:inner 0)
5868 (const_int N))
5870 with (vec_concat:outer x:inner y:inner) if N == 1,
5871 or (vec_concat:outer y:inner x:inner) if N == 2.
5872 We assume that degenrate cases (N == 0 or N == 3), which
5873 represent taking all elements from either input, are handled
5874 elsewhere.
5876 Implicitly, this means we have a paradoxical subreg, but such
5877 a check is cheap, so make it anyway.
5879 Only applies for vectors of two elements. */
5881 if ((GET_CODE (op0) == VEC_DUPLICATE
5882 || GET_CODE (op1) == VEC_DUPLICATE)
5883 && GET_MODE (op0) == GET_MODE (op1)
5884 && GET_MODE_NUNITS (GET_MODE (op0)) == 2
5885 && GET_MODE_NUNITS (GET_MODE (op1)) == 2
5886 && IN_RANGE (sel, 1, 2))
5888 rtx newop0 = op0, newop1 = op1;
5890 /* Canonicalize locally such that the VEC_DUPLICATE is always
5891 the first operand. */
5892 if (GET_CODE (newop1) == VEC_DUPLICATE)
5894 std::swap (newop0, newop1);
5895 /* If we swap the operand order, we also need to swap
5896 the selector mask. */
5897 sel = sel == 1 ? 2 : 1;
5900 if (GET_CODE (newop1) == SUBREG
5901 && paradoxical_subreg_p (newop1)
5902 && subreg_lowpart_p (newop1)
5903 && GET_MODE (SUBREG_REG (newop1))
5904 == GET_MODE (XEXP (newop0, 0)))
5906 newop0 = XEXP (newop0, 0);
5907 newop1 = SUBREG_REG (newop1);
5908 if (sel == 2)
5909 std::swap (newop0, newop1);
5910 return simplify_gen_binary (VEC_CONCAT, mode,
5911 newop0, newop1);
5915 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
5916 (const_int n))
5917 with (vec_concat x y) or (vec_concat y x) depending on value
5918 of N. */
5919 if (GET_CODE (op0) == VEC_DUPLICATE
5920 && GET_CODE (op1) == VEC_DUPLICATE
5921 && GET_MODE_NUNITS (GET_MODE (op0)) == 2
5922 && GET_MODE_NUNITS (GET_MODE (op1)) == 2
5923 && IN_RANGE (sel, 1, 2))
5925 rtx newop0 = XEXP (op0, 0);
5926 rtx newop1 = XEXP (op1, 0);
5927 if (sel == 2)
5928 std::swap (newop0, newop1);
5930 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5934 if (rtx_equal_p (op0, op1)
5935 && !side_effects_p (op2) && !side_effects_p (op1))
5936 return op0;
5938 break;
5940 default:
5941 gcc_unreachable ();
5944 return 0;
5947 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5948 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5949 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5951 Works by unpacking OP into a collection of 8-bit values
5952 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5953 and then repacking them again for OUTERMODE. */
5955 static rtx
5956 simplify_immed_subreg (fixed_size_mode outermode, rtx op,
5957 fixed_size_mode innermode, unsigned int byte)
5959 enum {
5960 value_bit = 8,
5961 value_mask = (1 << value_bit) - 1
5963 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
5964 int value_start;
5965 int i;
5966 int elem;
5968 int num_elem;
5969 rtx * elems;
5970 int elem_bitsize;
5971 rtx result_s = NULL;
5972 rtvec result_v = NULL;
5973 enum mode_class outer_class;
5974 scalar_mode outer_submode;
5975 int max_bitsize;
5977 /* Some ports misuse CCmode. */
5978 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
5979 return op;
5981 /* We have no way to represent a complex constant at the rtl level. */
5982 if (COMPLEX_MODE_P (outermode))
5983 return NULL_RTX;
5985 /* We support any size mode. */
5986 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
5987 GET_MODE_BITSIZE (innermode));
5989 /* Unpack the value. */
5991 if (GET_CODE (op) == CONST_VECTOR)
5993 num_elem = CONST_VECTOR_NUNITS (op);
5994 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
5996 else
5998 num_elem = 1;
5999 elem_bitsize = max_bitsize;
6001 /* If this asserts, it is too complicated; reducing value_bit may help. */
6002 gcc_assert (BITS_PER_UNIT % value_bit == 0);
6003 /* I don't know how to handle endianness of sub-units. */
6004 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
6006 for (elem = 0; elem < num_elem; elem++)
6008 unsigned char * vp;
6009 rtx el = (GET_CODE (op) == CONST_VECTOR
6010 ? CONST_VECTOR_ELT (op, elem)
6011 : op);
6013 /* Vectors are kept in target memory order. (This is probably
6014 a mistake.) */
6016 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6017 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6018 / BITS_PER_UNIT);
6019 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6020 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6021 unsigned bytele = (subword_byte % UNITS_PER_WORD
6022 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6023 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
6026 switch (GET_CODE (el))
6028 case CONST_INT:
6029 for (i = 0;
6030 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6031 i += value_bit)
6032 *vp++ = INTVAL (el) >> i;
6033 /* CONST_INTs are always logically sign-extended. */
6034 for (; i < elem_bitsize; i += value_bit)
6035 *vp++ = INTVAL (el) < 0 ? -1 : 0;
6036 break;
6038 case CONST_WIDE_INT:
6040 rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
6041 unsigned char extend = wi::sign_mask (val);
6042 int prec = wi::get_precision (val);
6044 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
6045 *vp++ = wi::extract_uhwi (val, i, value_bit);
6046 for (; i < elem_bitsize; i += value_bit)
6047 *vp++ = extend;
6049 break;
6051 case CONST_DOUBLE:
6052 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
6054 unsigned char extend = 0;
6055 /* If this triggers, someone should have generated a
6056 CONST_INT instead. */
6057 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
6059 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6060 *vp++ = CONST_DOUBLE_LOW (el) >> i;
6061 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
6063 *vp++
6064 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
6065 i += value_bit;
6068 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
6069 extend = -1;
6070 for (; i < elem_bitsize; i += value_bit)
6071 *vp++ = extend;
6073 else
6075 /* This is big enough for anything on the platform. */
6076 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
6077 scalar_float_mode el_mode;
6079 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
6080 int bitsize = GET_MODE_BITSIZE (el_mode);
6082 gcc_assert (bitsize <= elem_bitsize);
6083 gcc_assert (bitsize % value_bit == 0);
6085 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
6086 GET_MODE (el));
6088 /* real_to_target produces its result in words affected by
6089 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6090 and use WORDS_BIG_ENDIAN instead; see the documentation
6091 of SUBREG in rtl.texi. */
6092 for (i = 0; i < bitsize; i += value_bit)
6094 int ibase;
6095 if (WORDS_BIG_ENDIAN)
6096 ibase = bitsize - 1 - i;
6097 else
6098 ibase = i;
6099 *vp++ = tmp[ibase / 32] >> i % 32;
6102 /* It shouldn't matter what's done here, so fill it with
6103 zero. */
6104 for (; i < elem_bitsize; i += value_bit)
6105 *vp++ = 0;
6107 break;
6109 case CONST_FIXED:
6110 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
6112 for (i = 0; i < elem_bitsize; i += value_bit)
6113 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6115 else
6117 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6118 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6119 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
6120 i += value_bit)
6121 *vp++ = CONST_FIXED_VALUE_HIGH (el)
6122 >> (i - HOST_BITS_PER_WIDE_INT);
6123 for (; i < elem_bitsize; i += value_bit)
6124 *vp++ = 0;
6126 break;
6128 default:
6129 gcc_unreachable ();
6133 /* Now, pick the right byte to start with. */
6134 /* Renumber BYTE so that the least-significant byte is byte 0. A special
6135 case is paradoxical SUBREGs, which shouldn't be adjusted since they
6136 will already have offset 0. */
6137 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
6139 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
6140 - byte);
6141 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6142 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6143 byte = (subword_byte % UNITS_PER_WORD
6144 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6147 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6148 so if it's become negative it will instead be very large.) */
6149 gcc_assert (byte < GET_MODE_SIZE (innermode));
6151 /* Convert from bytes to chunks of size value_bit. */
6152 value_start = byte * (BITS_PER_UNIT / value_bit);
6154 /* Re-pack the value. */
6155 num_elem = GET_MODE_NUNITS (outermode);
6157 if (VECTOR_MODE_P (outermode))
6159 result_v = rtvec_alloc (num_elem);
6160 elems = &RTVEC_ELT (result_v, 0);
6162 else
6163 elems = &result_s;
6165 outer_submode = GET_MODE_INNER (outermode);
6166 outer_class = GET_MODE_CLASS (outer_submode);
6167 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
6169 gcc_assert (elem_bitsize % value_bit == 0);
6170 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
6172 for (elem = 0; elem < num_elem; elem++)
6174 unsigned char *vp;
6176 /* Vectors are stored in target memory order. (This is probably
6177 a mistake.) */
6179 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6180 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6181 / BITS_PER_UNIT);
6182 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6183 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6184 unsigned bytele = (subword_byte % UNITS_PER_WORD
6185 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6186 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
6189 switch (outer_class)
6191 case MODE_INT:
6192 case MODE_PARTIAL_INT:
6194 int u;
6195 int base = 0;
6196 int units
6197 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
6198 / HOST_BITS_PER_WIDE_INT;
6199 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
6200 wide_int r;
6202 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
6203 return NULL_RTX;
6204 for (u = 0; u < units; u++)
6206 unsigned HOST_WIDE_INT buf = 0;
6207 for (i = 0;
6208 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
6209 i += value_bit)
6210 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6212 tmp[u] = buf;
6213 base += HOST_BITS_PER_WIDE_INT;
6215 r = wide_int::from_array (tmp, units,
6216 GET_MODE_PRECISION (outer_submode));
6217 #if TARGET_SUPPORTS_WIDE_INT == 0
6218 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6219 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
6220 return NULL_RTX;
6221 #endif
6222 elems[elem] = immed_wide_int_const (r, outer_submode);
6224 break;
6226 case MODE_FLOAT:
6227 case MODE_DECIMAL_FLOAT:
6229 REAL_VALUE_TYPE r;
6230 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
6232 /* real_from_target wants its input in words affected by
6233 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6234 and use WORDS_BIG_ENDIAN instead; see the documentation
6235 of SUBREG in rtl.texi. */
6236 for (i = 0; i < elem_bitsize; i += value_bit)
6238 int ibase;
6239 if (WORDS_BIG_ENDIAN)
6240 ibase = elem_bitsize - 1 - i;
6241 else
6242 ibase = i;
6243 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
6246 real_from_target (&r, tmp, outer_submode);
6247 elems[elem] = const_double_from_real_value (r, outer_submode);
6249 break;
6251 case MODE_FRACT:
6252 case MODE_UFRACT:
6253 case MODE_ACCUM:
6254 case MODE_UACCUM:
6256 FIXED_VALUE_TYPE f;
6257 f.data.low = 0;
6258 f.data.high = 0;
6259 f.mode = outer_submode;
6261 for (i = 0;
6262 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6263 i += value_bit)
6264 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6265 for (; i < elem_bitsize; i += value_bit)
6266 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6267 << (i - HOST_BITS_PER_WIDE_INT));
6269 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6271 break;
6273 default:
6274 gcc_unreachable ();
6277 if (VECTOR_MODE_P (outermode))
6278 return gen_rtx_CONST_VECTOR (outermode, result_v);
6279 else
6280 return result_s;
6283 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6284 Return 0 if no simplifications are possible. */
6286 simplify_subreg (machine_mode outermode, rtx op,
6287 machine_mode innermode, poly_uint64 byte)
6289 /* Little bit of sanity checking. */
6290 gcc_assert (innermode != VOIDmode);
6291 gcc_assert (outermode != VOIDmode);
6292 gcc_assert (innermode != BLKmode);
6293 gcc_assert (outermode != BLKmode);
6295 gcc_assert (GET_MODE (op) == innermode
6296 || GET_MODE (op) == VOIDmode);
6298 if (!multiple_p (byte, GET_MODE_SIZE (outermode)))
6299 return NULL_RTX;
6301 if (maybe_ge (byte, GET_MODE_SIZE (innermode)))
6302 return NULL_RTX;
6304 if (outermode == innermode && known_eq (byte, 0U))
6305 return op;
6307 if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
6309 rtx elt;
6311 if (VECTOR_MODE_P (outermode)
6312 && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
6313 && vec_duplicate_p (op, &elt))
6314 return gen_vec_duplicate (outermode, elt);
6316 if (outermode == GET_MODE_INNER (innermode)
6317 && vec_duplicate_p (op, &elt))
6318 return elt;
6321 if (CONST_SCALAR_INT_P (op)
6322 || CONST_DOUBLE_AS_FLOAT_P (op)
6323 || CONST_FIXED_P (op)
6324 || GET_CODE (op) == CONST_VECTOR)
6326 /* simplify_immed_subreg deconstructs OP into bytes and constructs
6327 the result from bytes, so it only works if the sizes of the modes
6328 and the value of the offset are known at compile time. Cases that
6329 that apply to general modes and offsets should be handled here
6330 before calling simplify_immed_subreg. */
6331 fixed_size_mode fs_outermode, fs_innermode;
6332 unsigned HOST_WIDE_INT cbyte;
6333 if (is_a <fixed_size_mode> (outermode, &fs_outermode)
6334 && is_a <fixed_size_mode> (innermode, &fs_innermode)
6335 && byte.is_constant (&cbyte))
6336 return simplify_immed_subreg (fs_outermode, op, fs_innermode, cbyte);
6338 return NULL_RTX;
6341 /* Changing mode twice with SUBREG => just change it once,
6342 or not at all if changing back op starting mode. */
6343 if (GET_CODE (op) == SUBREG)
6345 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6346 rtx newx;
6348 if (outermode == innermostmode
6349 && known_eq (byte, 0U)
6350 && known_eq (SUBREG_BYTE (op), 0))
6351 return SUBREG_REG (op);
6353 /* Work out the memory offset of the final OUTERMODE value relative
6354 to the inner value of OP. */
6355 poly_int64 mem_offset = subreg_memory_offset (outermode,
6356 innermode, byte);
6357 poly_int64 op_mem_offset = subreg_memory_offset (op);
6358 poly_int64 final_offset = mem_offset + op_mem_offset;
6360 /* See whether resulting subreg will be paradoxical. */
6361 if (!paradoxical_subreg_p (outermode, innermostmode))
6363 /* In nonparadoxical subregs we can't handle negative offsets. */
6364 if (maybe_lt (final_offset, 0))
6365 return NULL_RTX;
6366 /* Bail out in case resulting subreg would be incorrect. */
6367 if (!multiple_p (final_offset, GET_MODE_SIZE (outermode))
6368 || maybe_ge (final_offset, GET_MODE_SIZE (innermostmode)))
6369 return NULL_RTX;
6371 else
6373 poly_int64 required_offset = subreg_memory_offset (outermode,
6374 innermostmode, 0);
6375 if (maybe_ne (final_offset, required_offset))
6376 return NULL_RTX;
6377 /* Paradoxical subregs always have byte offset 0. */
6378 final_offset = 0;
6381 /* Recurse for further possible simplifications. */
6382 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6383 final_offset);
6384 if (newx)
6385 return newx;
6386 if (validate_subreg (outermode, innermostmode,
6387 SUBREG_REG (op), final_offset))
6389 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6390 if (SUBREG_PROMOTED_VAR_P (op)
6391 && SUBREG_PROMOTED_SIGN (op) >= 0
6392 && GET_MODE_CLASS (outermode) == MODE_INT
6393 && IN_RANGE (GET_MODE_SIZE (outermode),
6394 GET_MODE_SIZE (innermode),
6395 GET_MODE_SIZE (innermostmode))
6396 && subreg_lowpart_p (newx))
6398 SUBREG_PROMOTED_VAR_P (newx) = 1;
6399 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6401 return newx;
6403 return NULL_RTX;
6406 /* SUBREG of a hard register => just change the register number
6407 and/or mode. If the hard register is not valid in that mode,
6408 suppress this simplification. If the hard register is the stack,
6409 frame, or argument pointer, leave this as a SUBREG. */
6411 if (REG_P (op) && HARD_REGISTER_P (op))
6413 unsigned int regno, final_regno;
6415 regno = REGNO (op);
6416 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6417 if (HARD_REGISTER_NUM_P (final_regno))
6419 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
6420 subreg_memory_offset (outermode,
6421 innermode, byte));
6423 /* Propagate original regno. We don't have any way to specify
6424 the offset inside original regno, so do so only for lowpart.
6425 The information is used only by alias analysis that can not
6426 grog partial register anyway. */
6428 if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
6429 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6430 return x;
6434 /* If we have a SUBREG of a register that we are replacing and we are
6435 replacing it with a MEM, make a new MEM and try replacing the
6436 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6437 or if we would be widening it. */
6439 if (MEM_P (op)
6440 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6441 /* Allow splitting of volatile memory references in case we don't
6442 have instruction to move the whole thing. */
6443 && (! MEM_VOLATILE_P (op)
6444 || ! have_insn_for (SET, innermode))
6445 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
6446 return adjust_address_nv (op, outermode, byte);
6448 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6449 of two parts. */
6450 if (GET_CODE (op) == CONCAT
6451 || GET_CODE (op) == VEC_CONCAT)
6453 unsigned int part_size;
6454 poly_uint64 final_offset;
6455 rtx part, res;
6457 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6458 if (part_mode == VOIDmode)
6459 part_mode = GET_MODE_INNER (GET_MODE (op));
6460 part_size = GET_MODE_SIZE (part_mode);
6461 if (known_lt (byte, part_size))
6463 part = XEXP (op, 0);
6464 final_offset = byte;
6466 else if (known_ge (byte, part_size))
6468 part = XEXP (op, 1);
6469 final_offset = byte - part_size;
6471 else
6472 return NULL_RTX;
6474 if (maybe_gt (final_offset + GET_MODE_SIZE (outermode), part_size))
6475 return NULL_RTX;
6477 part_mode = GET_MODE (part);
6478 if (part_mode == VOIDmode)
6479 part_mode = GET_MODE_INNER (GET_MODE (op));
6480 res = simplify_subreg (outermode, part, part_mode, final_offset);
6481 if (res)
6482 return res;
6483 if (validate_subreg (outermode, part_mode, part, final_offset))
6484 return gen_rtx_SUBREG (outermode, part, final_offset);
6485 return NULL_RTX;
6488 /* A SUBREG resulting from a zero extension may fold to zero if
6489 it extracts higher bits that the ZERO_EXTEND's source bits. */
6490 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6492 poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
6493 if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
6494 return CONST0_RTX (outermode);
6497 scalar_int_mode int_outermode, int_innermode;
6498 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6499 && is_a <scalar_int_mode> (innermode, &int_innermode)
6500 && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
6502 /* Handle polynomial integers. The upper bits of a paradoxical
6503 subreg are undefined, so this is safe regardless of whether
6504 we're truncating or extending. */
6505 if (CONST_POLY_INT_P (op))
6507 poly_wide_int val
6508 = poly_wide_int::from (const_poly_int_value (op),
6509 GET_MODE_PRECISION (int_outermode),
6510 SIGNED);
6511 return immed_wide_int_const (val, int_outermode);
6514 if (GET_MODE_PRECISION (int_outermode)
6515 < GET_MODE_PRECISION (int_innermode))
6517 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6518 if (tem)
6519 return tem;
6523 return NULL_RTX;
6526 /* Make a SUBREG operation or equivalent if it folds. */
6529 simplify_gen_subreg (machine_mode outermode, rtx op,
6530 machine_mode innermode, poly_uint64 byte)
6532 rtx newx;
6534 newx = simplify_subreg (outermode, op, innermode, byte);
6535 if (newx)
6536 return newx;
6538 if (GET_CODE (op) == SUBREG
6539 || GET_CODE (op) == CONCAT
6540 || GET_MODE (op) == VOIDmode)
6541 return NULL_RTX;
6543 if (validate_subreg (outermode, innermode, op, byte))
6544 return gen_rtx_SUBREG (outermode, op, byte);
6546 return NULL_RTX;
6549 /* Generates a subreg to get the least significant part of EXPR (in mode
6550 INNER_MODE) to OUTER_MODE. */
6553 lowpart_subreg (machine_mode outer_mode, rtx expr,
6554 machine_mode inner_mode)
6556 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6557 subreg_lowpart_offset (outer_mode, inner_mode));
6560 /* Simplify X, an rtx expression.
6562 Return the simplified expression or NULL if no simplifications
6563 were possible.
6565 This is the preferred entry point into the simplification routines;
6566 however, we still allow passes to call the more specific routines.
6568 Right now GCC has three (yes, three) major bodies of RTL simplification
6569 code that need to be unified.
6571 1. fold_rtx in cse.c. This code uses various CSE specific
6572 information to aid in RTL simplification.
6574 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6575 it uses combine specific information to aid in RTL
6576 simplification.
6578 3. The routines in this file.
6581 Long term we want to only have one body of simplification code; to
6582 get to that state I recommend the following steps:
6584 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6585 which are not pass dependent state into these routines.
6587 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6588 use this routine whenever possible.
6590 3. Allow for pass dependent state to be provided to these
6591 routines and add simplifications based on the pass dependent
6592 state. Remove code from cse.c & combine.c that becomes
6593 redundant/dead.
6595 It will take time, but ultimately the compiler will be easier to
6596 maintain and improve. It's totally silly that when we add a
6597 simplification that it needs to be added to 4 places (3 for RTL
6598 simplification and 1 for tree simplification. */
6601 simplify_rtx (const_rtx x)
6603 const enum rtx_code code = GET_CODE (x);
6604 const machine_mode mode = GET_MODE (x);
6606 switch (GET_RTX_CLASS (code))
6608 case RTX_UNARY:
6609 return simplify_unary_operation (code, mode,
6610 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6611 case RTX_COMM_ARITH:
6612 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6613 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6615 /* Fall through. */
6617 case RTX_BIN_ARITH:
6618 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6620 case RTX_TERNARY:
6621 case RTX_BITFIELD_OPS:
6622 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6623 XEXP (x, 0), XEXP (x, 1),
6624 XEXP (x, 2));
6626 case RTX_COMPARE:
6627 case RTX_COMM_COMPARE:
6628 return simplify_relational_operation (code, mode,
6629 ((GET_MODE (XEXP (x, 0))
6630 != VOIDmode)
6631 ? GET_MODE (XEXP (x, 0))
6632 : GET_MODE (XEXP (x, 1))),
6633 XEXP (x, 0),
6634 XEXP (x, 1));
6636 case RTX_EXTRA:
6637 if (code == SUBREG)
6638 return simplify_subreg (mode, SUBREG_REG (x),
6639 GET_MODE (SUBREG_REG (x)),
6640 SUBREG_BYTE (x));
6641 break;
6643 case RTX_OBJ:
6644 if (code == LO_SUM)
6646 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6647 if (GET_CODE (XEXP (x, 0)) == HIGH
6648 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6649 return XEXP (x, 1);
6651 break;
6653 default:
6654 break;
6656 return NULL;
6659 #if CHECKING_P
6661 namespace selftest {
6663 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6665 static rtx
6666 make_test_reg (machine_mode mode)
6668 static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
6670 return gen_rtx_REG (mode, test_reg_num++);
6673 /* Test vector simplifications involving VEC_DUPLICATE in which the
6674 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6675 register that holds one element of MODE. */
6677 static void
6678 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
6680 scalar_mode inner_mode = GET_MODE_INNER (mode);
6681 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6682 unsigned int nunits = GET_MODE_NUNITS (mode);
6683 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
6685 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6686 rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
6687 rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
6688 ASSERT_RTX_EQ (duplicate,
6689 simplify_unary_operation (NOT, mode,
6690 duplicate_not, mode));
6692 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6693 rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
6694 ASSERT_RTX_EQ (duplicate,
6695 simplify_unary_operation (NEG, mode,
6696 duplicate_neg, mode));
6698 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6699 ASSERT_RTX_EQ (duplicate,
6700 simplify_binary_operation (PLUS, mode, duplicate,
6701 CONST0_RTX (mode)));
6703 ASSERT_RTX_EQ (duplicate,
6704 simplify_binary_operation (MINUS, mode, duplicate,
6705 CONST0_RTX (mode)));
6707 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
6708 simplify_binary_operation (MINUS, mode, duplicate,
6709 duplicate));
6712 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6713 rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
6714 ASSERT_RTX_PTR_EQ (scalar_reg,
6715 simplify_binary_operation (VEC_SELECT, inner_mode,
6716 duplicate, zero_par));
6718 /* And again with the final element. */
6719 rtx last_index = gen_int_mode (GET_MODE_NUNITS (mode) - 1, word_mode);
6720 rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
6721 ASSERT_RTX_PTR_EQ (scalar_reg,
6722 simplify_binary_operation (VEC_SELECT, inner_mode,
6723 duplicate, last_par));
6725 /* Test a scalar subreg of a VEC_DUPLICATE. */
6726 poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
6727 ASSERT_RTX_EQ (scalar_reg,
6728 simplify_gen_subreg (inner_mode, duplicate,
6729 mode, offset));
6731 machine_mode narrower_mode;
6732 if (nunits > 2
6733 && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
6734 && VECTOR_MODE_P (narrower_mode))
6736 /* Test VEC_SELECT of a vector. */
6737 rtx vec_par
6738 = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
6739 rtx narrower_duplicate
6740 = gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
6741 ASSERT_RTX_EQ (narrower_duplicate,
6742 simplify_binary_operation (VEC_SELECT, narrower_mode,
6743 duplicate, vec_par));
6745 /* Test a vector subreg of a VEC_DUPLICATE. */
6746 poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
6747 ASSERT_RTX_EQ (narrower_duplicate,
6748 simplify_gen_subreg (narrower_mode, duplicate,
6749 mode, offset));
6753 /* Test vector simplifications involving VEC_SERIES in which the
6754 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6755 register that holds one element of MODE. */
6757 static void
6758 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
6760 /* Test unary cases with VEC_SERIES arguments. */
6761 scalar_mode inner_mode = GET_MODE_INNER (mode);
6762 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6763 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6764 rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
6765 rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
6766 rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
6767 rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
6768 rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
6769 rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
6770 neg_scalar_reg);
6771 ASSERT_RTX_EQ (series_0_r,
6772 simplify_unary_operation (NEG, mode, series_0_nr, mode));
6773 ASSERT_RTX_EQ (series_r_m1,
6774 simplify_unary_operation (NEG, mode, series_nr_1, mode));
6775 ASSERT_RTX_EQ (series_r_r,
6776 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
6778 /* Test that a VEC_SERIES with a zero step is simplified away. */
6779 ASSERT_RTX_EQ (duplicate,
6780 simplify_binary_operation (VEC_SERIES, mode,
6781 scalar_reg, const0_rtx));
6783 /* Test PLUS and MINUS with VEC_SERIES. */
6784 rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
6785 rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
6786 rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
6787 ASSERT_RTX_EQ (series_r_r,
6788 simplify_binary_operation (PLUS, mode, series_0_r,
6789 duplicate));
6790 ASSERT_RTX_EQ (series_r_1,
6791 simplify_binary_operation (PLUS, mode, duplicate,
6792 series_0_1));
6793 ASSERT_RTX_EQ (series_r_m1,
6794 simplify_binary_operation (PLUS, mode, duplicate,
6795 series_0_m1));
6796 ASSERT_RTX_EQ (series_0_r,
6797 simplify_binary_operation (MINUS, mode, series_r_r,
6798 duplicate));
6799 ASSERT_RTX_EQ (series_r_m1,
6800 simplify_binary_operation (MINUS, mode, duplicate,
6801 series_0_1));
6802 ASSERT_RTX_EQ (series_r_1,
6803 simplify_binary_operation (MINUS, mode, duplicate,
6804 series_0_m1));
6805 ASSERT_RTX_EQ (series_0_m1,
6806 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
6807 constm1_rtx));
6810 /* Verify some simplifications involving vectors. */
6812 static void
6813 test_vector_ops ()
6815 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
6817 machine_mode mode = (machine_mode) i;
6818 if (VECTOR_MODE_P (mode))
6820 rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
6821 test_vector_ops_duplicate (mode, scalar_reg);
6822 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
6823 && GET_MODE_NUNITS (mode) > 2)
6824 test_vector_ops_series (mode, scalar_reg);
6829 template<unsigned int N>
6830 struct simplify_const_poly_int_tests
6832 static void run ();
6835 template<>
6836 struct simplify_const_poly_int_tests<1>
6838 static void run () {}
6841 /* Test various CONST_POLY_INT properties. */
6843 template<unsigned int N>
6844 void
6845 simplify_const_poly_int_tests<N>::run ()
6847 rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
6848 rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
6849 rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
6850 rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
6851 rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
6852 rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
6853 rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
6854 rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
6855 rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
6856 rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
6857 rtx two = GEN_INT (2);
6858 rtx six = GEN_INT (6);
6859 poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
6861 /* These tests only try limited operation combinations. Fuller arithmetic
6862 testing is done directly on poly_ints. */
6863 ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
6864 ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
6865 ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
6866 ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
6867 ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
6868 ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
6869 ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
6870 ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
6871 ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
6872 ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
6873 ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
6876 /* Run all of the selftests within this file. */
6878 void
6879 simplify_rtx_c_tests ()
6881 test_vector_ops ();
6882 simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
6885 } // namespace selftest
6887 #endif /* CHECKING_P */