PR libstdc++/85951 for make_signed/make_unsigned for character types
[official-gcc.git] / gcc / simplify-rtx.c
blobd2714db7ae8ef946a6ce035bea59ddbec890e905
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
48 static rtx neg_const_int (machine_mode, const_rtx);
49 static bool plus_minus_operand_p (const_rtx);
50 static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
51 static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53 static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55 static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56 static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
59 /* Negate a CONST_INT rtx. */
60 static rtx
61 neg_const_int (machine_mode mode, const_rtx i)
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
65 if (!HWI_COMPUTABLE_MODE_P (mode)
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (machine_mode mode, const_rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80 scalar_int_mode int_mode;
82 if (!is_int_mode (mode, &int_mode))
83 return false;
85 width = GET_MODE_PRECISION (int_mode);
86 if (width == 0)
87 return false;
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92 #if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x))
95 unsigned int i;
96 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 return false;
99 for (i = 0; i < elts - 1; i++)
100 if (CONST_WIDE_INT_ELT (x, i) != 0)
101 return false;
102 val = CONST_WIDE_INT_ELT (x, elts - 1);
103 width %= HOST_BITS_PER_WIDE_INT;
104 if (width == 0)
105 width = HOST_BITS_PER_WIDE_INT;
107 #else
108 else if (width <= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x)
110 && CONST_DOUBLE_LOW (x) == 0)
112 val = CONST_DOUBLE_HIGH (x);
113 width -= HOST_BITS_PER_WIDE_INT;
115 #endif
116 else
117 /* X is not an integer constant. */
118 return false;
120 if (width < HOST_BITS_PER_WIDE_INT)
121 val &= (HOST_WIDE_INT_1U << width) - 1;
122 return val == (HOST_WIDE_INT_1U << (width - 1));
125 /* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
129 bool
130 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
132 unsigned int width;
133 scalar_int_mode int_mode;
135 if (!is_int_mode (mode, &int_mode))
136 return false;
138 width = GET_MODE_PRECISION (int_mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
142 val &= GET_MODE_MASK (int_mode);
143 return val == (HOST_WIDE_INT_1U << (width - 1));
146 /* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148 bool
149 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
151 unsigned int width;
153 scalar_int_mode int_mode;
154 if (!is_int_mode (mode, &int_mode))
155 return false;
157 width = GET_MODE_PRECISION (int_mode);
158 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159 return false;
161 val &= HOST_WIDE_INT_1U << (width - 1);
162 return val != 0;
165 /* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
167 bool
168 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
170 unsigned int width;
172 scalar_int_mode int_mode;
173 if (!is_int_mode (mode, &int_mode))
174 return false;
176 width = GET_MODE_PRECISION (int_mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
180 val &= HOST_WIDE_INT_1U << (width - 1);
181 return val == 0;
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
191 rtx tem;
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x)
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 HOST_WIDE_INT offset = 0;
215 switch (GET_CODE (x))
217 case MEM:
218 break;
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
229 default:
230 return x;
233 if (GET_MODE (x) == BLKmode)
234 return x;
236 addr = XEXP (x, 0);
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr) == CONST
243 && GET_CODE (XEXP (addr, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
246 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247 addr = XEXP (XEXP (addr, 0), 0);
250 if (GET_CODE (addr) == LO_SUM)
251 addr = XEXP (addr, 1);
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr))
258 c = get_pool_constant (addr);
259 cmode = get_pool_mode (addr);
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset == 0 && cmode == GET_MODE (x))
265 return c;
266 else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
268 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
269 if (tem && CONSTANT_P (tem))
270 return tem;
274 return x;
277 /* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
282 delegitimize_mem_from_attrs (rtx x)
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
290 tree decl = MEM_EXPR (x);
291 machine_mode mode = GET_MODE (x);
292 poly_int64 offset = 0;
294 switch (TREE_CODE (decl))
296 default:
297 decl = NULL;
298 break;
300 case VAR_DECL:
301 break;
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
311 poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
312 tree toffset;
313 int unsignedp, reversep, volatilep = 0;
315 decl
316 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
317 &unsignedp, &reversep, &volatilep);
318 if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
319 || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
320 || (toffset && !poly_int_tree_p (toffset, &toffset_val)))
321 decl = NULL;
322 else
323 offset += bytepos + toffset_val;
324 break;
328 if (decl
329 && mode == GET_MODE (x)
330 && VAR_P (decl)
331 && (TREE_STATIC (decl)
332 || DECL_THREAD_LOCAL_P (decl))
333 && DECL_RTL_SET_P (decl)
334 && MEM_P (DECL_RTL (decl)))
336 rtx newx;
338 offset += MEM_OFFSET (x);
340 newx = DECL_RTL (decl);
342 if (MEM_P (newx))
344 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
345 poly_int64 n_offset, o_offset;
347 /* Avoid creating a new MEM needlessly if we already had
348 the same address. We do if there's no OFFSET and the
349 old address X is identical to NEWX, or if X is of the
350 form (plus NEWX OFFSET), or the NEWX is of the form
351 (plus Y (const_int Z)) and X is that with the offset
352 added: (plus Y (const_int Z+OFFSET)). */
353 n = strip_offset (n, &n_offset);
354 o = strip_offset (o, &o_offset);
355 if (!(known_eq (o_offset, n_offset + offset)
356 && rtx_equal_p (o, n)))
357 x = adjust_address_nv (newx, mode, offset);
359 else if (GET_MODE (x) == GET_MODE (newx)
360 && known_eq (offset, 0))
361 x = newx;
365 return x;
368 /* Make a unary operation by first seeing if it folds and otherwise making
369 the specified operation. */
372 simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
373 machine_mode op_mode)
375 rtx tem;
377 /* If this simplifies, use it. */
378 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
379 return tem;
381 return gen_rtx_fmt_e (code, mode, op);
384 /* Likewise for ternary operations. */
387 simplify_gen_ternary (enum rtx_code code, machine_mode mode,
388 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
390 rtx tem;
392 /* If this simplifies, use it. */
393 if ((tem = simplify_ternary_operation (code, mode, op0_mode,
394 op0, op1, op2)) != 0)
395 return tem;
397 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
400 /* Likewise, for relational operations.
401 CMP_MODE specifies mode comparison is done in. */
404 simplify_gen_relational (enum rtx_code code, machine_mode mode,
405 machine_mode cmp_mode, rtx op0, rtx op1)
407 rtx tem;
409 if ((tem = simplify_relational_operation (code, mode, cmp_mode,
410 op0, op1)) != 0)
411 return tem;
413 return gen_rtx_fmt_ee (code, mode, op0, op1);
416 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
417 and simplify the result. If FN is non-NULL, call this callback on each
418 X, if it returns non-NULL, replace X with its return value and simplify the
419 result. */
422 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
423 rtx (*fn) (rtx, const_rtx, void *), void *data)
425 enum rtx_code code = GET_CODE (x);
426 machine_mode mode = GET_MODE (x);
427 machine_mode op_mode;
428 const char *fmt;
429 rtx op0, op1, op2, newx, op;
430 rtvec vec, newvec;
431 int i, j;
433 if (__builtin_expect (fn != NULL, 0))
435 newx = fn (x, old_rtx, data);
436 if (newx)
437 return newx;
439 else if (rtx_equal_p (x, old_rtx))
440 return copy_rtx ((rtx) data);
442 switch (GET_RTX_CLASS (code))
444 case RTX_UNARY:
445 op0 = XEXP (x, 0);
446 op_mode = GET_MODE (op0);
447 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
448 if (op0 == XEXP (x, 0))
449 return x;
450 return simplify_gen_unary (code, mode, op0, op_mode);
452 case RTX_BIN_ARITH:
453 case RTX_COMM_ARITH:
454 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
455 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
456 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
457 return x;
458 return simplify_gen_binary (code, mode, op0, op1);
460 case RTX_COMPARE:
461 case RTX_COMM_COMPARE:
462 op0 = XEXP (x, 0);
463 op1 = XEXP (x, 1);
464 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
465 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
466 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
467 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
468 return x;
469 return simplify_gen_relational (code, mode, op_mode, op0, op1);
471 case RTX_TERNARY:
472 case RTX_BITFIELD_OPS:
473 op0 = XEXP (x, 0);
474 op_mode = GET_MODE (op0);
475 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
477 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
478 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
479 return x;
480 if (op_mode == VOIDmode)
481 op_mode = GET_MODE (op0);
482 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
484 case RTX_EXTRA:
485 if (code == SUBREG)
487 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
488 if (op0 == SUBREG_REG (x))
489 return x;
490 op0 = simplify_gen_subreg (GET_MODE (x), op0,
491 GET_MODE (SUBREG_REG (x)),
492 SUBREG_BYTE (x));
493 return op0 ? op0 : x;
495 break;
497 case RTX_OBJ:
498 if (code == MEM)
500 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
501 if (op0 == XEXP (x, 0))
502 return x;
503 return replace_equiv_address_nv (x, op0);
505 else if (code == LO_SUM)
507 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
508 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
510 /* (lo_sum (high x) y) -> y where x and y have the same base. */
511 if (GET_CODE (op0) == HIGH)
513 rtx base0, base1, offset0, offset1;
514 split_const (XEXP (op0, 0), &base0, &offset0);
515 split_const (op1, &base1, &offset1);
516 if (rtx_equal_p (base0, base1))
517 return op1;
520 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
521 return x;
522 return gen_rtx_LO_SUM (mode, op0, op1);
524 break;
526 default:
527 break;
530 newx = x;
531 fmt = GET_RTX_FORMAT (code);
532 for (i = 0; fmt[i]; i++)
533 switch (fmt[i])
535 case 'E':
536 vec = XVEC (x, i);
537 newvec = XVEC (newx, i);
538 for (j = 0; j < GET_NUM_ELEM (vec); j++)
540 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
541 old_rtx, fn, data);
542 if (op != RTVEC_ELT (vec, j))
544 if (newvec == vec)
546 newvec = shallow_copy_rtvec (vec);
547 if (x == newx)
548 newx = shallow_copy_rtx (x);
549 XVEC (newx, i) = newvec;
551 RTVEC_ELT (newvec, j) = op;
554 break;
556 case 'e':
557 if (XEXP (x, i))
559 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
560 if (op != XEXP (x, i))
562 if (x == newx)
563 newx = shallow_copy_rtx (x);
564 XEXP (newx, i) = op;
567 break;
569 return newx;
572 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
573 resulting RTX. Return a new RTX which is as simplified as possible. */
576 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
578 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
581 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
582 Only handle cases where the truncated value is inherently an rvalue.
584 RTL provides two ways of truncating a value:
586 1. a lowpart subreg. This form is only a truncation when both
587 the outer and inner modes (here MODE and OP_MODE respectively)
588 are scalar integers, and only then when the subreg is used as
589 an rvalue.
591 It is only valid to form such truncating subregs if the
592 truncation requires no action by the target. The onus for
593 proving this is on the creator of the subreg -- e.g. the
594 caller to simplify_subreg or simplify_gen_subreg -- and typically
595 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
597 2. a TRUNCATE. This form handles both scalar and compound integers.
599 The first form is preferred where valid. However, the TRUNCATE
600 handling in simplify_unary_operation turns the second form into the
601 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
602 so it is generally safe to form rvalue truncations using:
604 simplify_gen_unary (TRUNCATE, ...)
606 and leave simplify_unary_operation to work out which representation
607 should be used.
609 Because of the proof requirements on (1), simplify_truncation must
610 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
611 regardless of whether the outer truncation came from a SUBREG or a
612 TRUNCATE. For example, if the caller has proven that an SImode
613 truncation of:
615 (and:DI X Y)
617 is a no-op and can be represented as a subreg, it does not follow
618 that SImode truncations of X and Y are also no-ops. On a target
619 like 64-bit MIPS that requires SImode values to be stored in
620 sign-extended form, an SImode truncation of:
622 (and:DI (reg:DI X) (const_int 63))
624 is trivially a no-op because only the lower 6 bits can be set.
625 However, X is still an arbitrary 64-bit number and so we cannot
626 assume that truncating it too is a no-op. */
628 static rtx
629 simplify_truncation (machine_mode mode, rtx op,
630 machine_mode op_mode)
632 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
633 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
634 scalar_int_mode int_mode, int_op_mode, subreg_mode;
636 gcc_assert (precision <= op_precision);
638 /* Optimize truncations of zero and sign extended values. */
639 if (GET_CODE (op) == ZERO_EXTEND
640 || GET_CODE (op) == SIGN_EXTEND)
642 /* There are three possibilities. If MODE is the same as the
643 origmode, we can omit both the extension and the subreg.
644 If MODE is not larger than the origmode, we can apply the
645 truncation without the extension. Finally, if the outermode
646 is larger than the origmode, we can just extend to the appropriate
647 mode. */
648 machine_mode origmode = GET_MODE (XEXP (op, 0));
649 if (mode == origmode)
650 return XEXP (op, 0);
651 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
652 return simplify_gen_unary (TRUNCATE, mode,
653 XEXP (op, 0), origmode);
654 else
655 return simplify_gen_unary (GET_CODE (op), mode,
656 XEXP (op, 0), origmode);
659 /* If the machine can perform operations in the truncated mode, distribute
660 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
661 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
662 if (1
663 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
664 && (GET_CODE (op) == PLUS
665 || GET_CODE (op) == MINUS
666 || GET_CODE (op) == MULT))
668 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
669 if (op0)
671 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
672 if (op1)
673 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
677 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
678 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
679 the outer subreg is effectively a truncation to the original mode. */
680 if ((GET_CODE (op) == LSHIFTRT
681 || GET_CODE (op) == ASHIFTRT)
682 /* Ensure that OP_MODE is at least twice as wide as MODE
683 to avoid the possibility that an outer LSHIFTRT shifts by more
684 than the sign extension's sign_bit_copies and introduces zeros
685 into the high bits of the result. */
686 && 2 * precision <= op_precision
687 && CONST_INT_P (XEXP (op, 1))
688 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
689 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
690 && UINTVAL (XEXP (op, 1)) < precision)
691 return simplify_gen_binary (ASHIFTRT, mode,
692 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
694 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
695 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
696 the outer subreg is effectively a truncation to the original mode. */
697 if ((GET_CODE (op) == LSHIFTRT
698 || GET_CODE (op) == ASHIFTRT)
699 && CONST_INT_P (XEXP (op, 1))
700 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
701 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
702 && UINTVAL (XEXP (op, 1)) < precision)
703 return simplify_gen_binary (LSHIFTRT, mode,
704 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
706 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
707 to (ashift:QI (x:QI) C), where C is a suitable small constant and
708 the outer subreg is effectively a truncation to the original mode. */
709 if (GET_CODE (op) == ASHIFT
710 && CONST_INT_P (XEXP (op, 1))
711 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
712 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
713 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
714 && UINTVAL (XEXP (op, 1)) < precision)
715 return simplify_gen_binary (ASHIFT, mode,
716 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
718 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
719 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
720 and C2. */
721 if (GET_CODE (op) == AND
722 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
723 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
724 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
725 && CONST_INT_P (XEXP (op, 1)))
727 rtx op0 = (XEXP (XEXP (op, 0), 0));
728 rtx shift_op = XEXP (XEXP (op, 0), 1);
729 rtx mask_op = XEXP (op, 1);
730 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
731 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
733 if (shift < precision
734 /* If doing this transform works for an X with all bits set,
735 it works for any X. */
736 && ((GET_MODE_MASK (mode) >> shift) & mask)
737 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
738 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
739 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
741 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
742 return simplify_gen_binary (AND, mode, op0, mask_op);
746 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
747 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
748 changing len. */
749 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
750 && REG_P (XEXP (op, 0))
751 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
752 && CONST_INT_P (XEXP (op, 1))
753 && CONST_INT_P (XEXP (op, 2)))
755 rtx op0 = XEXP (op, 0);
756 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
757 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
758 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
760 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
761 if (op0)
763 pos -= op_precision - precision;
764 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
765 XEXP (op, 1), GEN_INT (pos));
768 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
770 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
771 if (op0)
772 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
773 XEXP (op, 1), XEXP (op, 2));
777 /* Recognize a word extraction from a multi-word subreg. */
778 if ((GET_CODE (op) == LSHIFTRT
779 || GET_CODE (op) == ASHIFTRT)
780 && SCALAR_INT_MODE_P (mode)
781 && SCALAR_INT_MODE_P (op_mode)
782 && precision >= BITS_PER_WORD
783 && 2 * precision <= op_precision
784 && CONST_INT_P (XEXP (op, 1))
785 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
786 && UINTVAL (XEXP (op, 1)) < op_precision)
788 poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
789 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
790 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
791 (WORDS_BIG_ENDIAN
792 ? byte - shifted_bytes
793 : byte + shifted_bytes));
796 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
797 and try replacing the TRUNCATE and shift with it. Don't do this
798 if the MEM has a mode-dependent address. */
799 if ((GET_CODE (op) == LSHIFTRT
800 || GET_CODE (op) == ASHIFTRT)
801 && is_a <scalar_int_mode> (mode, &int_mode)
802 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
803 && MEM_P (XEXP (op, 0))
804 && CONST_INT_P (XEXP (op, 1))
805 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
806 && INTVAL (XEXP (op, 1)) > 0
807 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
808 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
809 MEM_ADDR_SPACE (XEXP (op, 0)))
810 && ! MEM_VOLATILE_P (XEXP (op, 0))
811 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
812 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
814 poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
815 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
816 return adjust_address_nv (XEXP (op, 0), int_mode,
817 (WORDS_BIG_ENDIAN
818 ? byte - shifted_bytes
819 : byte + shifted_bytes));
822 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
823 (OP:SI foo:SI) if OP is NEG or ABS. */
824 if ((GET_CODE (op) == ABS
825 || GET_CODE (op) == NEG)
826 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
827 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
828 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
829 return simplify_gen_unary (GET_CODE (op), mode,
830 XEXP (XEXP (op, 0), 0), mode);
832 /* (truncate:A (subreg:B (truncate:C X) 0)) is
833 (truncate:A X). */
834 if (GET_CODE (op) == SUBREG
835 && is_a <scalar_int_mode> (mode, &int_mode)
836 && SCALAR_INT_MODE_P (op_mode)
837 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
838 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
839 && subreg_lowpart_p (op))
841 rtx inner = XEXP (SUBREG_REG (op), 0);
842 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
843 return simplify_gen_unary (TRUNCATE, int_mode, inner,
844 GET_MODE (inner));
845 else
846 /* If subreg above is paradoxical and C is narrower
847 than A, return (subreg:A (truncate:C X) 0). */
848 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
851 /* (truncate:A (truncate:B X)) is (truncate:A X). */
852 if (GET_CODE (op) == TRUNCATE)
853 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
854 GET_MODE (XEXP (op, 0)));
856 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
857 in mode A. */
858 if (GET_CODE (op) == IOR
859 && SCALAR_INT_MODE_P (mode)
860 && SCALAR_INT_MODE_P (op_mode)
861 && CONST_INT_P (XEXP (op, 1))
862 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
863 return constm1_rtx;
865 return NULL_RTX;
868 /* Try to simplify a unary operation CODE whose output mode is to be
869 MODE with input operand OP whose mode was originally OP_MODE.
870 Return zero if no simplification can be made. */
872 simplify_unary_operation (enum rtx_code code, machine_mode mode,
873 rtx op, machine_mode op_mode)
875 rtx trueop, tem;
877 trueop = avoid_constant_pool_reference (op);
879 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
880 if (tem)
881 return tem;
883 return simplify_unary_operation_1 (code, mode, op);
886 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
887 to be exact. */
889 static bool
890 exact_int_to_float_conversion_p (const_rtx op)
892 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
893 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
894 /* Constants shouldn't reach here. */
895 gcc_assert (op0_mode != VOIDmode);
896 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
897 int in_bits = in_prec;
898 if (HWI_COMPUTABLE_MODE_P (op0_mode))
900 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
901 if (GET_CODE (op) == FLOAT)
902 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
903 else if (GET_CODE (op) == UNSIGNED_FLOAT)
904 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
905 else
906 gcc_unreachable ();
907 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
909 return in_bits <= out_bits;
912 /* Perform some simplifications we can do even if the operands
913 aren't constant. */
914 static rtx
915 simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
917 enum rtx_code reversed;
918 rtx temp, elt, base, step;
919 scalar_int_mode inner, int_mode, op_mode, op0_mode;
921 switch (code)
923 case NOT:
924 /* (not (not X)) == X. */
925 if (GET_CODE (op) == NOT)
926 return XEXP (op, 0);
928 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
929 comparison is all ones. */
930 if (COMPARISON_P (op)
931 && (mode == BImode || STORE_FLAG_VALUE == -1)
932 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
933 return simplify_gen_relational (reversed, mode, VOIDmode,
934 XEXP (op, 0), XEXP (op, 1));
936 /* (not (plus X -1)) can become (neg X). */
937 if (GET_CODE (op) == PLUS
938 && XEXP (op, 1) == constm1_rtx)
939 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
941 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
942 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
943 and MODE_VECTOR_INT. */
944 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
945 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
946 CONSTM1_RTX (mode));
948 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
949 if (GET_CODE (op) == XOR
950 && CONST_INT_P (XEXP (op, 1))
951 && (temp = simplify_unary_operation (NOT, mode,
952 XEXP (op, 1), mode)) != 0)
953 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
955 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
956 if (GET_CODE (op) == PLUS
957 && CONST_INT_P (XEXP (op, 1))
958 && mode_signbit_p (mode, XEXP (op, 1))
959 && (temp = simplify_unary_operation (NOT, mode,
960 XEXP (op, 1), mode)) != 0)
961 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
964 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
965 operands other than 1, but that is not valid. We could do a
966 similar simplification for (not (lshiftrt C X)) where C is
967 just the sign bit, but this doesn't seem common enough to
968 bother with. */
969 if (GET_CODE (op) == ASHIFT
970 && XEXP (op, 0) == const1_rtx)
972 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
973 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
976 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
977 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
978 so we can perform the above simplification. */
979 if (STORE_FLAG_VALUE == -1
980 && is_a <scalar_int_mode> (mode, &int_mode)
981 && GET_CODE (op) == ASHIFTRT
982 && CONST_INT_P (XEXP (op, 1))
983 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
984 return simplify_gen_relational (GE, int_mode, VOIDmode,
985 XEXP (op, 0), const0_rtx);
988 if (partial_subreg_p (op)
989 && subreg_lowpart_p (op)
990 && GET_CODE (SUBREG_REG (op)) == ASHIFT
991 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
993 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
994 rtx x;
996 x = gen_rtx_ROTATE (inner_mode,
997 simplify_gen_unary (NOT, inner_mode, const1_rtx,
998 inner_mode),
999 XEXP (SUBREG_REG (op), 1));
1000 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1001 if (temp)
1002 return temp;
1005 /* Apply De Morgan's laws to reduce number of patterns for machines
1006 with negating logical insns (and-not, nand, etc.). If result has
1007 only one NOT, put it first, since that is how the patterns are
1008 coded. */
1009 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1011 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1012 machine_mode op_mode;
1014 op_mode = GET_MODE (in1);
1015 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1017 op_mode = GET_MODE (in2);
1018 if (op_mode == VOIDmode)
1019 op_mode = mode;
1020 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1022 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1023 std::swap (in1, in2);
1025 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1026 mode, in1, in2);
1029 /* (not (bswap x)) -> (bswap (not x)). */
1030 if (GET_CODE (op) == BSWAP)
1032 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1033 return simplify_gen_unary (BSWAP, mode, x, mode);
1035 break;
1037 case NEG:
1038 /* (neg (neg X)) == X. */
1039 if (GET_CODE (op) == NEG)
1040 return XEXP (op, 0);
1042 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1043 If comparison is not reversible use
1044 x ? y : (neg y). */
1045 if (GET_CODE (op) == IF_THEN_ELSE)
1047 rtx cond = XEXP (op, 0);
1048 rtx true_rtx = XEXP (op, 1);
1049 rtx false_rtx = XEXP (op, 2);
1051 if ((GET_CODE (true_rtx) == NEG
1052 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1053 || (GET_CODE (false_rtx) == NEG
1054 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1056 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1057 temp = reversed_comparison (cond, mode);
1058 else
1060 temp = cond;
1061 std::swap (true_rtx, false_rtx);
1063 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1064 mode, temp, true_rtx, false_rtx);
1068 /* (neg (plus X 1)) can become (not X). */
1069 if (GET_CODE (op) == PLUS
1070 && XEXP (op, 1) == const1_rtx)
1071 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1073 /* Similarly, (neg (not X)) is (plus X 1). */
1074 if (GET_CODE (op) == NOT)
1075 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1076 CONST1_RTX (mode));
1078 /* (neg (minus X Y)) can become (minus Y X). This transformation
1079 isn't safe for modes with signed zeros, since if X and Y are
1080 both +0, (minus Y X) is the same as (minus X Y). If the
1081 rounding mode is towards +infinity (or -infinity) then the two
1082 expressions will be rounded differently. */
1083 if (GET_CODE (op) == MINUS
1084 && !HONOR_SIGNED_ZEROS (mode)
1085 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1086 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1088 if (GET_CODE (op) == PLUS
1089 && !HONOR_SIGNED_ZEROS (mode)
1090 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1092 /* (neg (plus A C)) is simplified to (minus -C A). */
1093 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1094 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1096 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1097 if (temp)
1098 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1101 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1102 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1103 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1106 /* (neg (mult A B)) becomes (mult A (neg B)).
1107 This works even for floating-point values. */
1108 if (GET_CODE (op) == MULT
1109 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1111 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1112 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1115 /* NEG commutes with ASHIFT since it is multiplication. Only do
1116 this if we can then eliminate the NEG (e.g., if the operand
1117 is a constant). */
1118 if (GET_CODE (op) == ASHIFT)
1120 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1121 if (temp)
1122 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1125 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1126 C is equal to the width of MODE minus 1. */
1127 if (GET_CODE (op) == ASHIFTRT
1128 && CONST_INT_P (XEXP (op, 1))
1129 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1130 return simplify_gen_binary (LSHIFTRT, mode,
1131 XEXP (op, 0), XEXP (op, 1));
1133 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1134 C is equal to the width of MODE minus 1. */
1135 if (GET_CODE (op) == LSHIFTRT
1136 && CONST_INT_P (XEXP (op, 1))
1137 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1138 return simplify_gen_binary (ASHIFTRT, mode,
1139 XEXP (op, 0), XEXP (op, 1));
1141 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1142 if (GET_CODE (op) == XOR
1143 && XEXP (op, 1) == const1_rtx
1144 && nonzero_bits (XEXP (op, 0), mode) == 1)
1145 return plus_constant (mode, XEXP (op, 0), -1);
1147 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1148 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1149 if (GET_CODE (op) == LT
1150 && XEXP (op, 1) == const0_rtx
1151 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1153 int_mode = as_a <scalar_int_mode> (mode);
1154 int isize = GET_MODE_PRECISION (inner);
1155 if (STORE_FLAG_VALUE == 1)
1157 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1158 gen_int_shift_amount (inner,
1159 isize - 1));
1160 if (int_mode == inner)
1161 return temp;
1162 if (GET_MODE_PRECISION (int_mode) > isize)
1163 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1164 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1166 else if (STORE_FLAG_VALUE == -1)
1168 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1169 gen_int_shift_amount (inner,
1170 isize - 1));
1171 if (int_mode == inner)
1172 return temp;
1173 if (GET_MODE_PRECISION (int_mode) > isize)
1174 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1175 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1179 if (vec_series_p (op, &base, &step))
1181 /* Only create a new series if we can simplify both parts. In other
1182 cases this isn't really a simplification, and it's not necessarily
1183 a win to replace a vector operation with a scalar operation. */
1184 scalar_mode inner_mode = GET_MODE_INNER (mode);
1185 base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1186 if (base)
1188 step = simplify_unary_operation (NEG, inner_mode,
1189 step, inner_mode);
1190 if (step)
1191 return gen_vec_series (mode, base, step);
1194 break;
1196 case TRUNCATE:
1197 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1198 with the umulXi3_highpart patterns. */
1199 if (GET_CODE (op) == LSHIFTRT
1200 && GET_CODE (XEXP (op, 0)) == MULT)
1201 break;
1203 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1205 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1207 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1208 if (temp)
1209 return temp;
1211 /* We can't handle truncation to a partial integer mode here
1212 because we don't know the real bitsize of the partial
1213 integer mode. */
1214 break;
1217 if (GET_MODE (op) != VOIDmode)
1219 temp = simplify_truncation (mode, op, GET_MODE (op));
1220 if (temp)
1221 return temp;
1224 /* If we know that the value is already truncated, we can
1225 replace the TRUNCATE with a SUBREG. */
1226 if (known_eq (GET_MODE_NUNITS (mode), 1)
1227 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1228 || truncated_to_mode (mode, op)))
1230 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1231 if (temp)
1232 return temp;
1235 /* A truncate of a comparison can be replaced with a subreg if
1236 STORE_FLAG_VALUE permits. This is like the previous test,
1237 but it works even if the comparison is done in a mode larger
1238 than HOST_BITS_PER_WIDE_INT. */
1239 if (HWI_COMPUTABLE_MODE_P (mode)
1240 && COMPARISON_P (op)
1241 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1243 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1244 if (temp)
1245 return temp;
1248 /* A truncate of a memory is just loading the low part of the memory
1249 if we are not changing the meaning of the address. */
1250 if (GET_CODE (op) == MEM
1251 && !VECTOR_MODE_P (mode)
1252 && !MEM_VOLATILE_P (op)
1253 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1255 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1256 if (temp)
1257 return temp;
1260 break;
1262 case FLOAT_TRUNCATE:
1263 if (DECIMAL_FLOAT_MODE_P (mode))
1264 break;
1266 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1267 if (GET_CODE (op) == FLOAT_EXTEND
1268 && GET_MODE (XEXP (op, 0)) == mode)
1269 return XEXP (op, 0);
1271 /* (float_truncate:SF (float_truncate:DF foo:XF))
1272 = (float_truncate:SF foo:XF).
1273 This may eliminate double rounding, so it is unsafe.
1275 (float_truncate:SF (float_extend:XF foo:DF))
1276 = (float_truncate:SF foo:DF).
1278 (float_truncate:DF (float_extend:XF foo:SF))
1279 = (float_extend:DF foo:SF). */
1280 if ((GET_CODE (op) == FLOAT_TRUNCATE
1281 && flag_unsafe_math_optimizations)
1282 || GET_CODE (op) == FLOAT_EXTEND)
1283 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1284 > GET_MODE_UNIT_SIZE (mode)
1285 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1286 mode,
1287 XEXP (op, 0), mode);
1289 /* (float_truncate (float x)) is (float x) */
1290 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1291 && (flag_unsafe_math_optimizations
1292 || exact_int_to_float_conversion_p (op)))
1293 return simplify_gen_unary (GET_CODE (op), mode,
1294 XEXP (op, 0),
1295 GET_MODE (XEXP (op, 0)));
1297 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1298 (OP:SF foo:SF) if OP is NEG or ABS. */
1299 if ((GET_CODE (op) == ABS
1300 || GET_CODE (op) == NEG)
1301 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1302 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1303 return simplify_gen_unary (GET_CODE (op), mode,
1304 XEXP (XEXP (op, 0), 0), mode);
1306 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1307 is (float_truncate:SF x). */
1308 if (GET_CODE (op) == SUBREG
1309 && subreg_lowpart_p (op)
1310 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1311 return SUBREG_REG (op);
1312 break;
1314 case FLOAT_EXTEND:
1315 if (DECIMAL_FLOAT_MODE_P (mode))
1316 break;
1318 /* (float_extend (float_extend x)) is (float_extend x)
1320 (float_extend (float x)) is (float x) assuming that double
1321 rounding can't happen.
1323 if (GET_CODE (op) == FLOAT_EXTEND
1324 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1325 && exact_int_to_float_conversion_p (op)))
1326 return simplify_gen_unary (GET_CODE (op), mode,
1327 XEXP (op, 0),
1328 GET_MODE (XEXP (op, 0)));
1330 break;
1332 case ABS:
1333 /* (abs (neg <foo>)) -> (abs <foo>) */
1334 if (GET_CODE (op) == NEG)
1335 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1336 GET_MODE (XEXP (op, 0)));
1338 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1339 do nothing. */
1340 if (GET_MODE (op) == VOIDmode)
1341 break;
1343 /* If operand is something known to be positive, ignore the ABS. */
1344 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1345 || val_signbit_known_clear_p (GET_MODE (op),
1346 nonzero_bits (op, GET_MODE (op))))
1347 return op;
1349 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1350 if (is_a <scalar_int_mode> (mode, &int_mode)
1351 && (num_sign_bit_copies (op, int_mode)
1352 == GET_MODE_PRECISION (int_mode)))
1353 return gen_rtx_NEG (int_mode, op);
1355 break;
1357 case FFS:
1358 /* (ffs (*_extend <X>)) = (ffs <X>) */
1359 if (GET_CODE (op) == SIGN_EXTEND
1360 || GET_CODE (op) == ZERO_EXTEND)
1361 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1362 GET_MODE (XEXP (op, 0)));
1363 break;
1365 case POPCOUNT:
1366 switch (GET_CODE (op))
1368 case BSWAP:
1369 case ZERO_EXTEND:
1370 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1371 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1372 GET_MODE (XEXP (op, 0)));
1374 case ROTATE:
1375 case ROTATERT:
1376 /* Rotations don't affect popcount. */
1377 if (!side_effects_p (XEXP (op, 1)))
1378 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1379 GET_MODE (XEXP (op, 0)));
1380 break;
1382 default:
1383 break;
1385 break;
1387 case PARITY:
1388 switch (GET_CODE (op))
1390 case NOT:
1391 case BSWAP:
1392 case ZERO_EXTEND:
1393 case SIGN_EXTEND:
1394 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1395 GET_MODE (XEXP (op, 0)));
1397 case ROTATE:
1398 case ROTATERT:
1399 /* Rotations don't affect parity. */
1400 if (!side_effects_p (XEXP (op, 1)))
1401 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1402 GET_MODE (XEXP (op, 0)));
1403 break;
1405 default:
1406 break;
1408 break;
1410 case BSWAP:
1411 /* (bswap (bswap x)) -> x. */
1412 if (GET_CODE (op) == BSWAP)
1413 return XEXP (op, 0);
1414 break;
1416 case FLOAT:
1417 /* (float (sign_extend <X>)) = (float <X>). */
1418 if (GET_CODE (op) == SIGN_EXTEND)
1419 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1420 GET_MODE (XEXP (op, 0)));
1421 break;
1423 case SIGN_EXTEND:
1424 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1425 becomes just the MINUS if its mode is MODE. This allows
1426 folding switch statements on machines using casesi (such as
1427 the VAX). */
1428 if (GET_CODE (op) == TRUNCATE
1429 && GET_MODE (XEXP (op, 0)) == mode
1430 && GET_CODE (XEXP (op, 0)) == MINUS
1431 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1432 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1433 return XEXP (op, 0);
1435 /* Extending a widening multiplication should be canonicalized to
1436 a wider widening multiplication. */
1437 if (GET_CODE (op) == MULT)
1439 rtx lhs = XEXP (op, 0);
1440 rtx rhs = XEXP (op, 1);
1441 enum rtx_code lcode = GET_CODE (lhs);
1442 enum rtx_code rcode = GET_CODE (rhs);
1444 /* Widening multiplies usually extend both operands, but sometimes
1445 they use a shift to extract a portion of a register. */
1446 if ((lcode == SIGN_EXTEND
1447 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1448 && (rcode == SIGN_EXTEND
1449 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1451 machine_mode lmode = GET_MODE (lhs);
1452 machine_mode rmode = GET_MODE (rhs);
1453 int bits;
1455 if (lcode == ASHIFTRT)
1456 /* Number of bits not shifted off the end. */
1457 bits = (GET_MODE_UNIT_PRECISION (lmode)
1458 - INTVAL (XEXP (lhs, 1)));
1459 else /* lcode == SIGN_EXTEND */
1460 /* Size of inner mode. */
1461 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1463 if (rcode == ASHIFTRT)
1464 bits += (GET_MODE_UNIT_PRECISION (rmode)
1465 - INTVAL (XEXP (rhs, 1)));
1466 else /* rcode == SIGN_EXTEND */
1467 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1469 /* We can only widen multiplies if the result is mathematiclly
1470 equivalent. I.e. if overflow was impossible. */
1471 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1472 return simplify_gen_binary
1473 (MULT, mode,
1474 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1475 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1479 /* Check for a sign extension of a subreg of a promoted
1480 variable, where the promotion is sign-extended, and the
1481 target mode is the same as the variable's promotion. */
1482 if (GET_CODE (op) == SUBREG
1483 && SUBREG_PROMOTED_VAR_P (op)
1484 && SUBREG_PROMOTED_SIGNED_P (op)
1485 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1487 temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op));
1488 if (temp)
1489 return temp;
1492 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1493 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1494 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1496 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1497 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1498 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1499 GET_MODE (XEXP (op, 0)));
1502 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1503 is (sign_extend:M (subreg:O <X>)) if there is mode with
1504 GET_MODE_BITSIZE (N) - I bits.
1505 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1506 is similarly (zero_extend:M (subreg:O <X>)). */
1507 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1508 && GET_CODE (XEXP (op, 0)) == ASHIFT
1509 && is_a <scalar_int_mode> (mode, &int_mode)
1510 && CONST_INT_P (XEXP (op, 1))
1511 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1512 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1513 GET_MODE_BITSIZE (op_mode) > INTVAL (XEXP (op, 1))))
1515 scalar_int_mode tmode;
1516 gcc_assert (GET_MODE_BITSIZE (int_mode)
1517 > GET_MODE_BITSIZE (op_mode));
1518 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode)
1519 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1521 rtx inner =
1522 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1523 if (inner)
1524 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1525 ? SIGN_EXTEND : ZERO_EXTEND,
1526 int_mode, inner, tmode);
1530 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1531 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1532 if (GET_CODE (op) == LSHIFTRT
1533 && CONST_INT_P (XEXP (op, 1))
1534 && XEXP (op, 1) != const0_rtx)
1535 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1537 #if defined(POINTERS_EXTEND_UNSIGNED)
1538 /* As we do not know which address space the pointer is referring to,
1539 we can do this only if the target does not support different pointer
1540 or address modes depending on the address space. */
1541 if (target_default_pointer_address_modes_p ()
1542 && ! POINTERS_EXTEND_UNSIGNED
1543 && mode == Pmode && GET_MODE (op) == ptr_mode
1544 && (CONSTANT_P (op)
1545 || (GET_CODE (op) == SUBREG
1546 && REG_P (SUBREG_REG (op))
1547 && REG_POINTER (SUBREG_REG (op))
1548 && GET_MODE (SUBREG_REG (op)) == Pmode))
1549 && !targetm.have_ptr_extend ())
1551 temp
1552 = convert_memory_address_addr_space_1 (Pmode, op,
1553 ADDR_SPACE_GENERIC, false,
1554 true);
1555 if (temp)
1556 return temp;
1558 #endif
1559 break;
1561 case ZERO_EXTEND:
1562 /* Check for a zero extension of a subreg of a promoted
1563 variable, where the promotion is zero-extended, and the
1564 target mode is the same as the variable's promotion. */
1565 if (GET_CODE (op) == SUBREG
1566 && SUBREG_PROMOTED_VAR_P (op)
1567 && SUBREG_PROMOTED_UNSIGNED_P (op)
1568 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1570 temp = rtl_hooks.gen_lowpart_no_emit (mode, SUBREG_REG (op));
1571 if (temp)
1572 return temp;
1575 /* Extending a widening multiplication should be canonicalized to
1576 a wider widening multiplication. */
1577 if (GET_CODE (op) == MULT)
1579 rtx lhs = XEXP (op, 0);
1580 rtx rhs = XEXP (op, 1);
1581 enum rtx_code lcode = GET_CODE (lhs);
1582 enum rtx_code rcode = GET_CODE (rhs);
1584 /* Widening multiplies usually extend both operands, but sometimes
1585 they use a shift to extract a portion of a register. */
1586 if ((lcode == ZERO_EXTEND
1587 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1588 && (rcode == ZERO_EXTEND
1589 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1591 machine_mode lmode = GET_MODE (lhs);
1592 machine_mode rmode = GET_MODE (rhs);
1593 int bits;
1595 if (lcode == LSHIFTRT)
1596 /* Number of bits not shifted off the end. */
1597 bits = (GET_MODE_UNIT_PRECISION (lmode)
1598 - INTVAL (XEXP (lhs, 1)));
1599 else /* lcode == ZERO_EXTEND */
1600 /* Size of inner mode. */
1601 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1603 if (rcode == LSHIFTRT)
1604 bits += (GET_MODE_UNIT_PRECISION (rmode)
1605 - INTVAL (XEXP (rhs, 1)));
1606 else /* rcode == ZERO_EXTEND */
1607 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1609 /* We can only widen multiplies if the result is mathematiclly
1610 equivalent. I.e. if overflow was impossible. */
1611 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1612 return simplify_gen_binary
1613 (MULT, mode,
1614 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1615 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1619 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1620 if (GET_CODE (op) == ZERO_EXTEND)
1621 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1622 GET_MODE (XEXP (op, 0)));
1624 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1625 is (zero_extend:M (subreg:O <X>)) if there is mode with
1626 GET_MODE_PRECISION (N) - I bits. */
1627 if (GET_CODE (op) == LSHIFTRT
1628 && GET_CODE (XEXP (op, 0)) == ASHIFT
1629 && is_a <scalar_int_mode> (mode, &int_mode)
1630 && CONST_INT_P (XEXP (op, 1))
1631 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1632 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1633 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1635 scalar_int_mode tmode;
1636 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1637 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1639 rtx inner =
1640 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1641 if (inner)
1642 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1643 inner, tmode);
1647 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1648 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1649 of mode N. E.g.
1650 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1651 (and:SI (reg:SI) (const_int 63)). */
1652 if (partial_subreg_p (op)
1653 && is_a <scalar_int_mode> (mode, &int_mode)
1654 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1655 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1656 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1657 && subreg_lowpart_p (op)
1658 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1659 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1661 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1662 return SUBREG_REG (op);
1663 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1664 op0_mode);
1667 #if defined(POINTERS_EXTEND_UNSIGNED)
1668 /* As we do not know which address space the pointer is referring to,
1669 we can do this only if the target does not support different pointer
1670 or address modes depending on the address space. */
1671 if (target_default_pointer_address_modes_p ()
1672 && POINTERS_EXTEND_UNSIGNED > 0
1673 && mode == Pmode && GET_MODE (op) == ptr_mode
1674 && (CONSTANT_P (op)
1675 || (GET_CODE (op) == SUBREG
1676 && REG_P (SUBREG_REG (op))
1677 && REG_POINTER (SUBREG_REG (op))
1678 && GET_MODE (SUBREG_REG (op)) == Pmode))
1679 && !targetm.have_ptr_extend ())
1681 temp
1682 = convert_memory_address_addr_space_1 (Pmode, op,
1683 ADDR_SPACE_GENERIC, false,
1684 true);
1685 if (temp)
1686 return temp;
1688 #endif
1689 break;
1691 default:
1692 break;
1695 if (VECTOR_MODE_P (mode)
1696 && vec_duplicate_p (op, &elt)
1697 && code != VEC_DUPLICATE)
1699 /* Try applying the operator to ELT and see if that simplifies.
1700 We can duplicate the result if so.
1702 The reason we don't use simplify_gen_unary is that it isn't
1703 necessarily a win to convert things like:
1705 (neg:V (vec_duplicate:V (reg:S R)))
1709 (vec_duplicate:V (neg:S (reg:S R)))
1711 The first might be done entirely in vector registers while the
1712 second might need a move between register files. */
1713 temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1714 elt, GET_MODE_INNER (GET_MODE (op)));
1715 if (temp)
1716 return gen_vec_duplicate (mode, temp);
1719 return 0;
1722 /* Try to compute the value of a unary operation CODE whose output mode is to
1723 be MODE with input operand OP whose mode was originally OP_MODE.
1724 Return zero if the value cannot be computed. */
1726 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1727 rtx op, machine_mode op_mode)
1729 scalar_int_mode result_mode;
1731 if (code == VEC_DUPLICATE)
1733 gcc_assert (VECTOR_MODE_P (mode));
1734 if (GET_MODE (op) != VOIDmode)
1736 if (!VECTOR_MODE_P (GET_MODE (op)))
1737 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1738 else
1739 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1740 (GET_MODE (op)));
1742 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1743 return gen_const_vec_duplicate (mode, op);
1744 unsigned int n_elts;
1745 if (GET_CODE (op) == CONST_VECTOR
1746 && GET_MODE_NUNITS (mode).is_constant (&n_elts))
1748 /* This must be constant if we're duplicating it to a constant
1749 number of elements. */
1750 unsigned int in_n_elts = CONST_VECTOR_NUNITS (op).to_constant ();
1751 gcc_assert (in_n_elts < n_elts);
1752 gcc_assert ((n_elts % in_n_elts) == 0);
1753 rtvec v = rtvec_alloc (n_elts);
1754 for (unsigned i = 0; i < n_elts; i++)
1755 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1756 return gen_rtx_CONST_VECTOR (mode, v);
1760 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1762 unsigned int n_elts;
1763 if (!CONST_VECTOR_NUNITS (op).is_constant (&n_elts))
1764 return NULL_RTX;
1766 machine_mode opmode = GET_MODE (op);
1767 gcc_assert (known_eq (GET_MODE_NUNITS (mode), n_elts));
1768 gcc_assert (known_eq (GET_MODE_NUNITS (opmode), n_elts));
1770 rtvec v = rtvec_alloc (n_elts);
1771 unsigned int i;
1773 for (i = 0; i < n_elts; i++)
1775 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1776 CONST_VECTOR_ELT (op, i),
1777 GET_MODE_INNER (opmode));
1778 if (!x || !valid_for_const_vector_p (mode, x))
1779 return 0;
1780 RTVEC_ELT (v, i) = x;
1782 return gen_rtx_CONST_VECTOR (mode, v);
1785 /* The order of these tests is critical so that, for example, we don't
1786 check the wrong mode (input vs. output) for a conversion operation,
1787 such as FIX. At some point, this should be simplified. */
1789 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1791 REAL_VALUE_TYPE d;
1793 if (op_mode == VOIDmode)
1795 /* CONST_INT have VOIDmode as the mode. We assume that all
1796 the bits of the constant are significant, though, this is
1797 a dangerous assumption as many times CONST_INTs are
1798 created and used with garbage in the bits outside of the
1799 precision of the implied mode of the const_int. */
1800 op_mode = MAX_MODE_INT;
1803 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1805 /* Avoid the folding if flag_signaling_nans is on and
1806 operand is a signaling NaN. */
1807 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1808 return 0;
1810 d = real_value_truncate (mode, d);
1811 return const_double_from_real_value (d, mode);
1813 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1815 REAL_VALUE_TYPE d;
1817 if (op_mode == VOIDmode)
1819 /* CONST_INT have VOIDmode as the mode. We assume that all
1820 the bits of the constant are significant, though, this is
1821 a dangerous assumption as many times CONST_INTs are
1822 created and used with garbage in the bits outside of the
1823 precision of the implied mode of the const_int. */
1824 op_mode = MAX_MODE_INT;
1827 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1829 /* Avoid the folding if flag_signaling_nans is on and
1830 operand is a signaling NaN. */
1831 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1832 return 0;
1834 d = real_value_truncate (mode, d);
1835 return const_double_from_real_value (d, mode);
1838 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1840 unsigned int width = GET_MODE_PRECISION (result_mode);
1841 wide_int result;
1842 scalar_int_mode imode = (op_mode == VOIDmode
1843 ? result_mode
1844 : as_a <scalar_int_mode> (op_mode));
1845 rtx_mode_t op0 = rtx_mode_t (op, imode);
1846 int int_value;
1848 #if TARGET_SUPPORTS_WIDE_INT == 0
1849 /* This assert keeps the simplification from producing a result
1850 that cannot be represented in a CONST_DOUBLE but a lot of
1851 upstream callers expect that this function never fails to
1852 simplify something and so you if you added this to the test
1853 above the code would die later anyway. If this assert
1854 happens, you just need to make the port support wide int. */
1855 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1856 #endif
1858 switch (code)
1860 case NOT:
1861 result = wi::bit_not (op0);
1862 break;
1864 case NEG:
1865 result = wi::neg (op0);
1866 break;
1868 case ABS:
1869 result = wi::abs (op0);
1870 break;
1872 case FFS:
1873 result = wi::shwi (wi::ffs (op0), result_mode);
1874 break;
1876 case CLZ:
1877 if (wi::ne_p (op0, 0))
1878 int_value = wi::clz (op0);
1879 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1880 return NULL_RTX;
1881 result = wi::shwi (int_value, result_mode);
1882 break;
1884 case CLRSB:
1885 result = wi::shwi (wi::clrsb (op0), result_mode);
1886 break;
1888 case CTZ:
1889 if (wi::ne_p (op0, 0))
1890 int_value = wi::ctz (op0);
1891 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1892 return NULL_RTX;
1893 result = wi::shwi (int_value, result_mode);
1894 break;
1896 case POPCOUNT:
1897 result = wi::shwi (wi::popcount (op0), result_mode);
1898 break;
1900 case PARITY:
1901 result = wi::shwi (wi::parity (op0), result_mode);
1902 break;
1904 case BSWAP:
1905 result = wide_int (op0).bswap ();
1906 break;
1908 case TRUNCATE:
1909 case ZERO_EXTEND:
1910 result = wide_int::from (op0, width, UNSIGNED);
1911 break;
1913 case SIGN_EXTEND:
1914 result = wide_int::from (op0, width, SIGNED);
1915 break;
1917 case SQRT:
1918 default:
1919 return 0;
1922 return immed_wide_int_const (result, result_mode);
1925 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1926 && SCALAR_FLOAT_MODE_P (mode)
1927 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1929 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1930 switch (code)
1932 case SQRT:
1933 return 0;
1934 case ABS:
1935 d = real_value_abs (&d);
1936 break;
1937 case NEG:
1938 d = real_value_negate (&d);
1939 break;
1940 case FLOAT_TRUNCATE:
1941 /* Don't perform the operation if flag_signaling_nans is on
1942 and the operand is a signaling NaN. */
1943 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1944 return NULL_RTX;
1945 d = real_value_truncate (mode, d);
1946 break;
1947 case FLOAT_EXTEND:
1948 /* Don't perform the operation if flag_signaling_nans is on
1949 and the operand is a signaling NaN. */
1950 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1951 return NULL_RTX;
1952 /* All this does is change the mode, unless changing
1953 mode class. */
1954 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1955 real_convert (&d, mode, &d);
1956 break;
1957 case FIX:
1958 /* Don't perform the operation if flag_signaling_nans is on
1959 and the operand is a signaling NaN. */
1960 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1961 return NULL_RTX;
1962 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1963 break;
1964 case NOT:
1966 long tmp[4];
1967 int i;
1969 real_to_target (tmp, &d, GET_MODE (op));
1970 for (i = 0; i < 4; i++)
1971 tmp[i] = ~tmp[i];
1972 real_from_target (&d, tmp, mode);
1973 break;
1975 default:
1976 gcc_unreachable ();
1978 return const_double_from_real_value (d, mode);
1980 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1981 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1982 && is_int_mode (mode, &result_mode))
1984 unsigned int width = GET_MODE_PRECISION (result_mode);
1985 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1986 operators are intentionally left unspecified (to ease implementation
1987 by target backends), for consistency, this routine implements the
1988 same semantics for constant folding as used by the middle-end. */
1990 /* This was formerly used only for non-IEEE float.
1991 eggert@twinsun.com says it is safe for IEEE also. */
1992 REAL_VALUE_TYPE t;
1993 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1994 wide_int wmax, wmin;
1995 /* This is part of the abi to real_to_integer, but we check
1996 things before making this call. */
1997 bool fail;
1999 switch (code)
2001 case FIX:
2002 if (REAL_VALUE_ISNAN (*x))
2003 return const0_rtx;
2005 /* Test against the signed upper bound. */
2006 wmax = wi::max_value (width, SIGNED);
2007 real_from_integer (&t, VOIDmode, wmax, SIGNED);
2008 if (real_less (&t, x))
2009 return immed_wide_int_const (wmax, mode);
2011 /* Test against the signed lower bound. */
2012 wmin = wi::min_value (width, SIGNED);
2013 real_from_integer (&t, VOIDmode, wmin, SIGNED);
2014 if (real_less (x, &t))
2015 return immed_wide_int_const (wmin, mode);
2017 return immed_wide_int_const (real_to_integer (x, &fail, width),
2018 mode);
2020 case UNSIGNED_FIX:
2021 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2022 return const0_rtx;
2024 /* Test against the unsigned upper bound. */
2025 wmax = wi::max_value (width, UNSIGNED);
2026 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2027 if (real_less (&t, x))
2028 return immed_wide_int_const (wmax, mode);
2030 return immed_wide_int_const (real_to_integer (x, &fail, width),
2031 mode);
2033 default:
2034 gcc_unreachable ();
2038 /* Handle polynomial integers. */
2039 else if (CONST_POLY_INT_P (op))
2041 poly_wide_int result;
2042 switch (code)
2044 case NEG:
2045 result = -const_poly_int_value (op);
2046 break;
2048 case NOT:
2049 result = ~const_poly_int_value (op);
2050 break;
2052 default:
2053 return NULL_RTX;
2055 return immed_wide_int_const (result, mode);
2058 return NULL_RTX;
2061 /* Subroutine of simplify_binary_operation to simplify a binary operation
2062 CODE that can commute with byte swapping, with result mode MODE and
2063 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2064 Return zero if no simplification or canonicalization is possible. */
2066 static rtx
2067 simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2068 rtx op0, rtx op1)
2070 rtx tem;
2072 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2073 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2075 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2076 simplify_gen_unary (BSWAP, mode, op1, mode));
2077 return simplify_gen_unary (BSWAP, mode, tem, mode);
2080 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2081 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2083 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2084 return simplify_gen_unary (BSWAP, mode, tem, mode);
2087 return NULL_RTX;
2090 /* Subroutine of simplify_binary_operation to simplify a commutative,
2091 associative binary operation CODE with result mode MODE, operating
2092 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2093 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2094 canonicalization is possible. */
2096 static rtx
2097 simplify_associative_operation (enum rtx_code code, machine_mode mode,
2098 rtx op0, rtx op1)
2100 rtx tem;
2102 /* Linearize the operator to the left. */
2103 if (GET_CODE (op1) == code)
2105 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2106 if (GET_CODE (op0) == code)
2108 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2109 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2112 /* "a op (b op c)" becomes "(b op c) op a". */
2113 if (! swap_commutative_operands_p (op1, op0))
2114 return simplify_gen_binary (code, mode, op1, op0);
2116 std::swap (op0, op1);
2119 if (GET_CODE (op0) == code)
2121 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2122 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2124 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2125 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2128 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2129 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2130 if (tem != 0)
2131 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2133 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2134 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2135 if (tem != 0)
2136 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2139 return 0;
2143 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2144 and OP1. Return 0 if no simplification is possible.
2146 Don't use this for relational operations such as EQ or LT.
2147 Use simplify_relational_operation instead. */
2149 simplify_binary_operation (enum rtx_code code, machine_mode mode,
2150 rtx op0, rtx op1)
2152 rtx trueop0, trueop1;
2153 rtx tem;
2155 /* Relational operations don't work here. We must know the mode
2156 of the operands in order to do the comparison correctly.
2157 Assuming a full word can give incorrect results.
2158 Consider comparing 128 with -128 in QImode. */
2159 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2160 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2162 /* Make sure the constant is second. */
2163 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2164 && swap_commutative_operands_p (op0, op1))
2165 std::swap (op0, op1);
2167 trueop0 = avoid_constant_pool_reference (op0);
2168 trueop1 = avoid_constant_pool_reference (op1);
2170 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2171 if (tem)
2172 return tem;
2173 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2175 if (tem)
2176 return tem;
2178 /* If the above steps did not result in a simplification and op0 or op1
2179 were constant pool references, use the referenced constants directly. */
2180 if (trueop0 != op0 || trueop1 != op1)
2181 return simplify_gen_binary (code, mode, trueop0, trueop1);
2183 return NULL_RTX;
2186 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2187 which OP0 and OP1 are both vector series or vector duplicates
2188 (which are really just series with a step of 0). If so, try to
2189 form a new series by applying CODE to the bases and to the steps.
2190 Return null if no simplification is possible.
2192 MODE is the mode of the operation and is known to be a vector
2193 integer mode. */
2195 static rtx
2196 simplify_binary_operation_series (rtx_code code, machine_mode mode,
2197 rtx op0, rtx op1)
2199 rtx base0, step0;
2200 if (vec_duplicate_p (op0, &base0))
2201 step0 = const0_rtx;
2202 else if (!vec_series_p (op0, &base0, &step0))
2203 return NULL_RTX;
2205 rtx base1, step1;
2206 if (vec_duplicate_p (op1, &base1))
2207 step1 = const0_rtx;
2208 else if (!vec_series_p (op1, &base1, &step1))
2209 return NULL_RTX;
2211 /* Only create a new series if we can simplify both parts. In other
2212 cases this isn't really a simplification, and it's not necessarily
2213 a win to replace a vector operation with a scalar operation. */
2214 scalar_mode inner_mode = GET_MODE_INNER (mode);
2215 rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2216 if (!new_base)
2217 return NULL_RTX;
2219 rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2220 if (!new_step)
2221 return NULL_RTX;
2223 return gen_vec_series (mode, new_base, new_step);
2226 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2227 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2228 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2229 actual constants. */
2231 static rtx
2232 simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2233 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2235 rtx tem, reversed, opleft, opright, elt0, elt1;
2236 HOST_WIDE_INT val;
2237 scalar_int_mode int_mode, inner_mode;
2238 poly_int64 offset;
2240 /* Even if we can't compute a constant result,
2241 there are some cases worth simplifying. */
2243 switch (code)
2245 case PLUS:
2246 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2247 when x is NaN, infinite, or finite and nonzero. They aren't
2248 when x is -0 and the rounding mode is not towards -infinity,
2249 since (-0) + 0 is then 0. */
2250 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2251 return op0;
2253 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2254 transformations are safe even for IEEE. */
2255 if (GET_CODE (op0) == NEG)
2256 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2257 else if (GET_CODE (op1) == NEG)
2258 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2260 /* (~a) + 1 -> -a */
2261 if (INTEGRAL_MODE_P (mode)
2262 && GET_CODE (op0) == NOT
2263 && trueop1 == const1_rtx)
2264 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2266 /* Handle both-operands-constant cases. We can only add
2267 CONST_INTs to constants since the sum of relocatable symbols
2268 can't be handled by most assemblers. Don't add CONST_INT
2269 to CONST_INT since overflow won't be computed properly if wider
2270 than HOST_BITS_PER_WIDE_INT. */
2272 if ((GET_CODE (op0) == CONST
2273 || GET_CODE (op0) == SYMBOL_REF
2274 || GET_CODE (op0) == LABEL_REF)
2275 && CONST_INT_P (op1))
2276 return plus_constant (mode, op0, INTVAL (op1));
2277 else if ((GET_CODE (op1) == CONST
2278 || GET_CODE (op1) == SYMBOL_REF
2279 || GET_CODE (op1) == LABEL_REF)
2280 && CONST_INT_P (op0))
2281 return plus_constant (mode, op1, INTVAL (op0));
2283 /* See if this is something like X * C - X or vice versa or
2284 if the multiplication is written as a shift. If so, we can
2285 distribute and make a new multiply, shift, or maybe just
2286 have X (if C is 2 in the example above). But don't make
2287 something more expensive than we had before. */
2289 if (is_a <scalar_int_mode> (mode, &int_mode))
2291 rtx lhs = op0, rhs = op1;
2293 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2294 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2296 if (GET_CODE (lhs) == NEG)
2298 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2299 lhs = XEXP (lhs, 0);
2301 else if (GET_CODE (lhs) == MULT
2302 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2304 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2305 lhs = XEXP (lhs, 0);
2307 else if (GET_CODE (lhs) == ASHIFT
2308 && CONST_INT_P (XEXP (lhs, 1))
2309 && INTVAL (XEXP (lhs, 1)) >= 0
2310 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2312 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2313 GET_MODE_PRECISION (int_mode));
2314 lhs = XEXP (lhs, 0);
2317 if (GET_CODE (rhs) == NEG)
2319 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2320 rhs = XEXP (rhs, 0);
2322 else if (GET_CODE (rhs) == MULT
2323 && CONST_INT_P (XEXP (rhs, 1)))
2325 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2326 rhs = XEXP (rhs, 0);
2328 else if (GET_CODE (rhs) == ASHIFT
2329 && CONST_INT_P (XEXP (rhs, 1))
2330 && INTVAL (XEXP (rhs, 1)) >= 0
2331 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2333 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2334 GET_MODE_PRECISION (int_mode));
2335 rhs = XEXP (rhs, 0);
2338 if (rtx_equal_p (lhs, rhs))
2340 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2341 rtx coeff;
2342 bool speed = optimize_function_for_speed_p (cfun);
2344 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2346 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2347 return (set_src_cost (tem, int_mode, speed)
2348 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2352 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2353 if (CONST_SCALAR_INT_P (op1)
2354 && GET_CODE (op0) == XOR
2355 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2356 && mode_signbit_p (mode, op1))
2357 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2358 simplify_gen_binary (XOR, mode, op1,
2359 XEXP (op0, 1)));
2361 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2362 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2363 && GET_CODE (op0) == MULT
2364 && GET_CODE (XEXP (op0, 0)) == NEG)
2366 rtx in1, in2;
2368 in1 = XEXP (XEXP (op0, 0), 0);
2369 in2 = XEXP (op0, 1);
2370 return simplify_gen_binary (MINUS, mode, op1,
2371 simplify_gen_binary (MULT, mode,
2372 in1, in2));
2375 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2376 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2377 is 1. */
2378 if (COMPARISON_P (op0)
2379 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2380 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2381 && (reversed = reversed_comparison (op0, mode)))
2382 return
2383 simplify_gen_unary (NEG, mode, reversed, mode);
2385 /* If one of the operands is a PLUS or a MINUS, see if we can
2386 simplify this by the associative law.
2387 Don't use the associative law for floating point.
2388 The inaccuracy makes it nonassociative,
2389 and subtle programs can break if operations are associated. */
2391 if (INTEGRAL_MODE_P (mode)
2392 && (plus_minus_operand_p (op0)
2393 || plus_minus_operand_p (op1))
2394 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2395 return tem;
2397 /* Reassociate floating point addition only when the user
2398 specifies associative math operations. */
2399 if (FLOAT_MODE_P (mode)
2400 && flag_associative_math)
2402 tem = simplify_associative_operation (code, mode, op0, op1);
2403 if (tem)
2404 return tem;
2407 /* Handle vector series. */
2408 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2410 tem = simplify_binary_operation_series (code, mode, op0, op1);
2411 if (tem)
2412 return tem;
2414 break;
2416 case COMPARE:
2417 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2418 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2419 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2420 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2422 rtx xop00 = XEXP (op0, 0);
2423 rtx xop10 = XEXP (op1, 0);
2425 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2426 return xop00;
2428 if (REG_P (xop00) && REG_P (xop10)
2429 && REGNO (xop00) == REGNO (xop10)
2430 && GET_MODE (xop00) == mode
2431 && GET_MODE (xop10) == mode
2432 && GET_MODE_CLASS (mode) == MODE_CC)
2433 return xop00;
2435 break;
2437 case MINUS:
2438 /* We can't assume x-x is 0 even with non-IEEE floating point,
2439 but since it is zero except in very strange circumstances, we
2440 will treat it as zero with -ffinite-math-only. */
2441 if (rtx_equal_p (trueop0, trueop1)
2442 && ! side_effects_p (op0)
2443 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2444 return CONST0_RTX (mode);
2446 /* Change subtraction from zero into negation. (0 - x) is the
2447 same as -x when x is NaN, infinite, or finite and nonzero.
2448 But if the mode has signed zeros, and does not round towards
2449 -infinity, then 0 - 0 is 0, not -0. */
2450 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2451 return simplify_gen_unary (NEG, mode, op1, mode);
2453 /* (-1 - a) is ~a, unless the expression contains symbolic
2454 constants, in which case not retaining additions and
2455 subtractions could cause invalid assembly to be produced. */
2456 if (trueop0 == constm1_rtx
2457 && !contains_symbolic_reference_p (op1))
2458 return simplify_gen_unary (NOT, mode, op1, mode);
2460 /* Subtracting 0 has no effect unless the mode has signed zeros
2461 and supports rounding towards -infinity. In such a case,
2462 0 - 0 is -0. */
2463 if (!(HONOR_SIGNED_ZEROS (mode)
2464 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2465 && trueop1 == CONST0_RTX (mode))
2466 return op0;
2468 /* See if this is something like X * C - X or vice versa or
2469 if the multiplication is written as a shift. If so, we can
2470 distribute and make a new multiply, shift, or maybe just
2471 have X (if C is 2 in the example above). But don't make
2472 something more expensive than we had before. */
2474 if (is_a <scalar_int_mode> (mode, &int_mode))
2476 rtx lhs = op0, rhs = op1;
2478 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2479 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2481 if (GET_CODE (lhs) == NEG)
2483 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2484 lhs = XEXP (lhs, 0);
2486 else if (GET_CODE (lhs) == MULT
2487 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2489 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2490 lhs = XEXP (lhs, 0);
2492 else if (GET_CODE (lhs) == ASHIFT
2493 && CONST_INT_P (XEXP (lhs, 1))
2494 && INTVAL (XEXP (lhs, 1)) >= 0
2495 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2497 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2498 GET_MODE_PRECISION (int_mode));
2499 lhs = XEXP (lhs, 0);
2502 if (GET_CODE (rhs) == NEG)
2504 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2505 rhs = XEXP (rhs, 0);
2507 else if (GET_CODE (rhs) == MULT
2508 && CONST_INT_P (XEXP (rhs, 1)))
2510 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2511 rhs = XEXP (rhs, 0);
2513 else if (GET_CODE (rhs) == ASHIFT
2514 && CONST_INT_P (XEXP (rhs, 1))
2515 && INTVAL (XEXP (rhs, 1)) >= 0
2516 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2518 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2519 GET_MODE_PRECISION (int_mode));
2520 negcoeff1 = -negcoeff1;
2521 rhs = XEXP (rhs, 0);
2524 if (rtx_equal_p (lhs, rhs))
2526 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2527 rtx coeff;
2528 bool speed = optimize_function_for_speed_p (cfun);
2530 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2532 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2533 return (set_src_cost (tem, int_mode, speed)
2534 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2538 /* (a - (-b)) -> (a + b). True even for IEEE. */
2539 if (GET_CODE (op1) == NEG)
2540 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2542 /* (-x - c) may be simplified as (-c - x). */
2543 if (GET_CODE (op0) == NEG
2544 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2546 tem = simplify_unary_operation (NEG, mode, op1, mode);
2547 if (tem)
2548 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2551 if ((GET_CODE (op0) == CONST
2552 || GET_CODE (op0) == SYMBOL_REF
2553 || GET_CODE (op0) == LABEL_REF)
2554 && poly_int_rtx_p (op1, &offset))
2555 return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
2557 /* Don't let a relocatable value get a negative coeff. */
2558 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2559 return simplify_gen_binary (PLUS, mode,
2560 op0,
2561 neg_const_int (mode, op1));
2563 /* (x - (x & y)) -> (x & ~y) */
2564 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2566 if (rtx_equal_p (op0, XEXP (op1, 0)))
2568 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2569 GET_MODE (XEXP (op1, 1)));
2570 return simplify_gen_binary (AND, mode, op0, tem);
2572 if (rtx_equal_p (op0, XEXP (op1, 1)))
2574 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2575 GET_MODE (XEXP (op1, 0)));
2576 return simplify_gen_binary (AND, mode, op0, tem);
2580 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2581 by reversing the comparison code if valid. */
2582 if (STORE_FLAG_VALUE == 1
2583 && trueop0 == const1_rtx
2584 && COMPARISON_P (op1)
2585 && (reversed = reversed_comparison (op1, mode)))
2586 return reversed;
2588 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2589 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2590 && GET_CODE (op1) == MULT
2591 && GET_CODE (XEXP (op1, 0)) == NEG)
2593 rtx in1, in2;
2595 in1 = XEXP (XEXP (op1, 0), 0);
2596 in2 = XEXP (op1, 1);
2597 return simplify_gen_binary (PLUS, mode,
2598 simplify_gen_binary (MULT, mode,
2599 in1, in2),
2600 op0);
2603 /* Canonicalize (minus (neg A) (mult B C)) to
2604 (minus (mult (neg B) C) A). */
2605 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2606 && GET_CODE (op1) == MULT
2607 && GET_CODE (op0) == NEG)
2609 rtx in1, in2;
2611 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2612 in2 = XEXP (op1, 1);
2613 return simplify_gen_binary (MINUS, mode,
2614 simplify_gen_binary (MULT, mode,
2615 in1, in2),
2616 XEXP (op0, 0));
2619 /* If one of the operands is a PLUS or a MINUS, see if we can
2620 simplify this by the associative law. This will, for example,
2621 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2622 Don't use the associative law for floating point.
2623 The inaccuracy makes it nonassociative,
2624 and subtle programs can break if operations are associated. */
2626 if (INTEGRAL_MODE_P (mode)
2627 && (plus_minus_operand_p (op0)
2628 || plus_minus_operand_p (op1))
2629 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2630 return tem;
2632 /* Handle vector series. */
2633 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2635 tem = simplify_binary_operation_series (code, mode, op0, op1);
2636 if (tem)
2637 return tem;
2639 break;
2641 case MULT:
2642 if (trueop1 == constm1_rtx)
2643 return simplify_gen_unary (NEG, mode, op0, mode);
2645 if (GET_CODE (op0) == NEG)
2647 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2648 /* If op1 is a MULT as well and simplify_unary_operation
2649 just moved the NEG to the second operand, simplify_gen_binary
2650 below could through simplify_associative_operation move
2651 the NEG around again and recurse endlessly. */
2652 if (temp
2653 && GET_CODE (op1) == MULT
2654 && GET_CODE (temp) == MULT
2655 && XEXP (op1, 0) == XEXP (temp, 0)
2656 && GET_CODE (XEXP (temp, 1)) == NEG
2657 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2658 temp = NULL_RTX;
2659 if (temp)
2660 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2662 if (GET_CODE (op1) == NEG)
2664 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2665 /* If op0 is a MULT as well and simplify_unary_operation
2666 just moved the NEG to the second operand, simplify_gen_binary
2667 below could through simplify_associative_operation move
2668 the NEG around again and recurse endlessly. */
2669 if (temp
2670 && GET_CODE (op0) == MULT
2671 && GET_CODE (temp) == MULT
2672 && XEXP (op0, 0) == XEXP (temp, 0)
2673 && GET_CODE (XEXP (temp, 1)) == NEG
2674 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2675 temp = NULL_RTX;
2676 if (temp)
2677 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2680 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2681 x is NaN, since x * 0 is then also NaN. Nor is it valid
2682 when the mode has signed zeros, since multiplying a negative
2683 number by 0 will give -0, not 0. */
2684 if (!HONOR_NANS (mode)
2685 && !HONOR_SIGNED_ZEROS (mode)
2686 && trueop1 == CONST0_RTX (mode)
2687 && ! side_effects_p (op0))
2688 return op1;
2690 /* In IEEE floating point, x*1 is not equivalent to x for
2691 signalling NaNs. */
2692 if (!HONOR_SNANS (mode)
2693 && trueop1 == CONST1_RTX (mode))
2694 return op0;
2696 /* Convert multiply by constant power of two into shift. */
2697 if (CONST_SCALAR_INT_P (trueop1))
2699 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2700 if (val >= 0)
2701 return simplify_gen_binary (ASHIFT, mode, op0,
2702 gen_int_shift_amount (mode, val));
2705 /* x*2 is x+x and x*(-1) is -x */
2706 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2707 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2708 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2709 && GET_MODE (op0) == mode)
2711 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2713 if (real_equal (d1, &dconst2))
2714 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2716 if (!HONOR_SNANS (mode)
2717 && real_equal (d1, &dconstm1))
2718 return simplify_gen_unary (NEG, mode, op0, mode);
2721 /* Optimize -x * -x as x * x. */
2722 if (FLOAT_MODE_P (mode)
2723 && GET_CODE (op0) == NEG
2724 && GET_CODE (op1) == NEG
2725 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2726 && !side_effects_p (XEXP (op0, 0)))
2727 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2729 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2730 if (SCALAR_FLOAT_MODE_P (mode)
2731 && GET_CODE (op0) == ABS
2732 && GET_CODE (op1) == ABS
2733 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2734 && !side_effects_p (XEXP (op0, 0)))
2735 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2737 /* Reassociate multiplication, but for floating point MULTs
2738 only when the user specifies unsafe math optimizations. */
2739 if (! FLOAT_MODE_P (mode)
2740 || flag_unsafe_math_optimizations)
2742 tem = simplify_associative_operation (code, mode, op0, op1);
2743 if (tem)
2744 return tem;
2746 break;
2748 case IOR:
2749 if (trueop1 == CONST0_RTX (mode))
2750 return op0;
2751 if (INTEGRAL_MODE_P (mode)
2752 && trueop1 == CONSTM1_RTX (mode)
2753 && !side_effects_p (op0))
2754 return op1;
2755 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2756 return op0;
2757 /* A | (~A) -> -1 */
2758 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2759 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2760 && ! side_effects_p (op0)
2761 && SCALAR_INT_MODE_P (mode))
2762 return constm1_rtx;
2764 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2765 if (CONST_INT_P (op1)
2766 && HWI_COMPUTABLE_MODE_P (mode)
2767 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2768 && !side_effects_p (op0))
2769 return op1;
2771 /* Canonicalize (X & C1) | C2. */
2772 if (GET_CODE (op0) == AND
2773 && CONST_INT_P (trueop1)
2774 && CONST_INT_P (XEXP (op0, 1)))
2776 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2777 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2778 HOST_WIDE_INT c2 = INTVAL (trueop1);
2780 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2781 if ((c1 & c2) == c1
2782 && !side_effects_p (XEXP (op0, 0)))
2783 return trueop1;
2785 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2786 if (((c1|c2) & mask) == mask)
2787 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2790 /* Convert (A & B) | A to A. */
2791 if (GET_CODE (op0) == AND
2792 && (rtx_equal_p (XEXP (op0, 0), op1)
2793 || rtx_equal_p (XEXP (op0, 1), op1))
2794 && ! side_effects_p (XEXP (op0, 0))
2795 && ! side_effects_p (XEXP (op0, 1)))
2796 return op1;
2798 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2799 mode size to (rotate A CX). */
2801 if (GET_CODE (op1) == ASHIFT
2802 || GET_CODE (op1) == SUBREG)
2804 opleft = op1;
2805 opright = op0;
2807 else
2809 opright = op1;
2810 opleft = op0;
2813 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2814 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2815 && CONST_INT_P (XEXP (opleft, 1))
2816 && CONST_INT_P (XEXP (opright, 1))
2817 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2818 == GET_MODE_UNIT_PRECISION (mode)))
2819 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2821 /* Same, but for ashift that has been "simplified" to a wider mode
2822 by simplify_shift_const. */
2824 if (GET_CODE (opleft) == SUBREG
2825 && is_a <scalar_int_mode> (mode, &int_mode)
2826 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2827 &inner_mode)
2828 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2829 && GET_CODE (opright) == LSHIFTRT
2830 && GET_CODE (XEXP (opright, 0)) == SUBREG
2831 && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
2832 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2833 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2834 SUBREG_REG (XEXP (opright, 0)))
2835 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2836 && CONST_INT_P (XEXP (opright, 1))
2837 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2838 + INTVAL (XEXP (opright, 1))
2839 == GET_MODE_PRECISION (int_mode)))
2840 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2841 XEXP (SUBREG_REG (opleft), 1));
2843 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2844 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2845 the PLUS does not affect any of the bits in OP1: then we can do
2846 the IOR as a PLUS and we can associate. This is valid if OP1
2847 can be safely shifted left C bits. */
2848 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2849 && GET_CODE (XEXP (op0, 0)) == PLUS
2850 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2851 && CONST_INT_P (XEXP (op0, 1))
2852 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2854 int count = INTVAL (XEXP (op0, 1));
2855 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2857 if (mask >> count == INTVAL (trueop1)
2858 && trunc_int_for_mode (mask, mode) == mask
2859 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2860 return simplify_gen_binary (ASHIFTRT, mode,
2861 plus_constant (mode, XEXP (op0, 0),
2862 mask),
2863 XEXP (op0, 1));
2866 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2867 if (tem)
2868 return tem;
2870 tem = simplify_associative_operation (code, mode, op0, op1);
2871 if (tem)
2872 return tem;
2873 break;
2875 case XOR:
2876 if (trueop1 == CONST0_RTX (mode))
2877 return op0;
2878 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2879 return simplify_gen_unary (NOT, mode, op0, mode);
2880 if (rtx_equal_p (trueop0, trueop1)
2881 && ! side_effects_p (op0)
2882 && GET_MODE_CLASS (mode) != MODE_CC)
2883 return CONST0_RTX (mode);
2885 /* Canonicalize XOR of the most significant bit to PLUS. */
2886 if (CONST_SCALAR_INT_P (op1)
2887 && mode_signbit_p (mode, op1))
2888 return simplify_gen_binary (PLUS, mode, op0, op1);
2889 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2890 if (CONST_SCALAR_INT_P (op1)
2891 && GET_CODE (op0) == PLUS
2892 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2893 && mode_signbit_p (mode, XEXP (op0, 1)))
2894 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2895 simplify_gen_binary (XOR, mode, op1,
2896 XEXP (op0, 1)));
2898 /* If we are XORing two things that have no bits in common,
2899 convert them into an IOR. This helps to detect rotation encoded
2900 using those methods and possibly other simplifications. */
2902 if (HWI_COMPUTABLE_MODE_P (mode)
2903 && (nonzero_bits (op0, mode)
2904 & nonzero_bits (op1, mode)) == 0)
2905 return (simplify_gen_binary (IOR, mode, op0, op1));
2907 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2908 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2909 (NOT y). */
2911 int num_negated = 0;
2913 if (GET_CODE (op0) == NOT)
2914 num_negated++, op0 = XEXP (op0, 0);
2915 if (GET_CODE (op1) == NOT)
2916 num_negated++, op1 = XEXP (op1, 0);
2918 if (num_negated == 2)
2919 return simplify_gen_binary (XOR, mode, op0, op1);
2920 else if (num_negated == 1)
2921 return simplify_gen_unary (NOT, mode,
2922 simplify_gen_binary (XOR, mode, op0, op1),
2923 mode);
2926 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2927 correspond to a machine insn or result in further simplifications
2928 if B is a constant. */
2930 if (GET_CODE (op0) == AND
2931 && rtx_equal_p (XEXP (op0, 1), op1)
2932 && ! side_effects_p (op1))
2933 return simplify_gen_binary (AND, mode,
2934 simplify_gen_unary (NOT, mode,
2935 XEXP (op0, 0), mode),
2936 op1);
2938 else if (GET_CODE (op0) == AND
2939 && rtx_equal_p (XEXP (op0, 0), op1)
2940 && ! side_effects_p (op1))
2941 return simplify_gen_binary (AND, mode,
2942 simplify_gen_unary (NOT, mode,
2943 XEXP (op0, 1), mode),
2944 op1);
2946 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2947 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2948 out bits inverted twice and not set by C. Similarly, given
2949 (xor (and (xor A B) C) D), simplify without inverting C in
2950 the xor operand: (xor (and A C) (B&C)^D).
2952 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2953 && GET_CODE (XEXP (op0, 0)) == XOR
2954 && CONST_INT_P (op1)
2955 && CONST_INT_P (XEXP (op0, 1))
2956 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2958 enum rtx_code op = GET_CODE (op0);
2959 rtx a = XEXP (XEXP (op0, 0), 0);
2960 rtx b = XEXP (XEXP (op0, 0), 1);
2961 rtx c = XEXP (op0, 1);
2962 rtx d = op1;
2963 HOST_WIDE_INT bval = INTVAL (b);
2964 HOST_WIDE_INT cval = INTVAL (c);
2965 HOST_WIDE_INT dval = INTVAL (d);
2966 HOST_WIDE_INT xcval;
2968 if (op == IOR)
2969 xcval = ~cval;
2970 else
2971 xcval = cval;
2973 return simplify_gen_binary (XOR, mode,
2974 simplify_gen_binary (op, mode, a, c),
2975 gen_int_mode ((bval & xcval) ^ dval,
2976 mode));
2979 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2980 we can transform like this:
2981 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2982 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2983 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2984 Attempt a few simplifications when B and C are both constants. */
2985 if (GET_CODE (op0) == AND
2986 && CONST_INT_P (op1)
2987 && CONST_INT_P (XEXP (op0, 1)))
2989 rtx a = XEXP (op0, 0);
2990 rtx b = XEXP (op0, 1);
2991 rtx c = op1;
2992 HOST_WIDE_INT bval = INTVAL (b);
2993 HOST_WIDE_INT cval = INTVAL (c);
2995 /* Instead of computing ~A&C, we compute its negated value,
2996 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2997 optimize for sure. If it does not simplify, we still try
2998 to compute ~A&C below, but since that always allocates
2999 RTL, we don't try that before committing to returning a
3000 simplified expression. */
3001 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
3002 GEN_INT (~cval));
3004 if ((~cval & bval) == 0)
3006 rtx na_c = NULL_RTX;
3007 if (n_na_c)
3008 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
3009 else
3011 /* If ~A does not simplify, don't bother: we don't
3012 want to simplify 2 operations into 3, and if na_c
3013 were to simplify with na, n_na_c would have
3014 simplified as well. */
3015 rtx na = simplify_unary_operation (NOT, mode, a, mode);
3016 if (na)
3017 na_c = simplify_gen_binary (AND, mode, na, c);
3020 /* Try to simplify ~A&C | ~B&C. */
3021 if (na_c != NULL_RTX)
3022 return simplify_gen_binary (IOR, mode, na_c,
3023 gen_int_mode (~bval & cval, mode));
3025 else
3027 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3028 if (n_na_c == CONSTM1_RTX (mode))
3030 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3031 gen_int_mode (~cval & bval,
3032 mode));
3033 return simplify_gen_binary (IOR, mode, a_nc_b,
3034 gen_int_mode (~bval & cval,
3035 mode));
3040 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3041 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3042 machines, and also has shorter instruction path length. */
3043 if (GET_CODE (op0) == AND
3044 && GET_CODE (XEXP (op0, 0)) == XOR
3045 && CONST_INT_P (XEXP (op0, 1))
3046 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3048 rtx a = trueop1;
3049 rtx b = XEXP (XEXP (op0, 0), 1);
3050 rtx c = XEXP (op0, 1);
3051 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3052 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3053 rtx bc = simplify_gen_binary (AND, mode, b, c);
3054 return simplify_gen_binary (IOR, mode, a_nc, bc);
3056 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3057 else if (GET_CODE (op0) == AND
3058 && GET_CODE (XEXP (op0, 0)) == XOR
3059 && CONST_INT_P (XEXP (op0, 1))
3060 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3062 rtx a = XEXP (XEXP (op0, 0), 0);
3063 rtx b = trueop1;
3064 rtx c = XEXP (op0, 1);
3065 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3066 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3067 rtx ac = simplify_gen_binary (AND, mode, a, c);
3068 return simplify_gen_binary (IOR, mode, ac, b_nc);
3071 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3072 comparison if STORE_FLAG_VALUE is 1. */
3073 if (STORE_FLAG_VALUE == 1
3074 && trueop1 == const1_rtx
3075 && COMPARISON_P (op0)
3076 && (reversed = reversed_comparison (op0, mode)))
3077 return reversed;
3079 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3080 is (lt foo (const_int 0)), so we can perform the above
3081 simplification if STORE_FLAG_VALUE is 1. */
3083 if (is_a <scalar_int_mode> (mode, &int_mode)
3084 && STORE_FLAG_VALUE == 1
3085 && trueop1 == const1_rtx
3086 && GET_CODE (op0) == LSHIFTRT
3087 && CONST_INT_P (XEXP (op0, 1))
3088 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3089 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3091 /* (xor (comparison foo bar) (const_int sign-bit))
3092 when STORE_FLAG_VALUE is the sign bit. */
3093 if (is_a <scalar_int_mode> (mode, &int_mode)
3094 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3095 && trueop1 == const_true_rtx
3096 && COMPARISON_P (op0)
3097 && (reversed = reversed_comparison (op0, int_mode)))
3098 return reversed;
3100 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3101 if (tem)
3102 return tem;
3104 tem = simplify_associative_operation (code, mode, op0, op1);
3105 if (tem)
3106 return tem;
3107 break;
3109 case AND:
3110 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3111 return trueop1;
3112 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3113 return op0;
3114 if (HWI_COMPUTABLE_MODE_P (mode))
3116 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3117 HOST_WIDE_INT nzop1;
3118 if (CONST_INT_P (trueop1))
3120 HOST_WIDE_INT val1 = INTVAL (trueop1);
3121 /* If we are turning off bits already known off in OP0, we need
3122 not do an AND. */
3123 if ((nzop0 & ~val1) == 0)
3124 return op0;
3126 nzop1 = nonzero_bits (trueop1, mode);
3127 /* If we are clearing all the nonzero bits, the result is zero. */
3128 if ((nzop1 & nzop0) == 0
3129 && !side_effects_p (op0) && !side_effects_p (op1))
3130 return CONST0_RTX (mode);
3132 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3133 && GET_MODE_CLASS (mode) != MODE_CC)
3134 return op0;
3135 /* A & (~A) -> 0 */
3136 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3137 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3138 && ! side_effects_p (op0)
3139 && GET_MODE_CLASS (mode) != MODE_CC)
3140 return CONST0_RTX (mode);
3142 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3143 there are no nonzero bits of C outside of X's mode. */
3144 if ((GET_CODE (op0) == SIGN_EXTEND
3145 || GET_CODE (op0) == ZERO_EXTEND)
3146 && CONST_INT_P (trueop1)
3147 && HWI_COMPUTABLE_MODE_P (mode)
3148 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3149 & UINTVAL (trueop1)) == 0)
3151 machine_mode imode = GET_MODE (XEXP (op0, 0));
3152 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3153 gen_int_mode (INTVAL (trueop1),
3154 imode));
3155 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3158 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3159 we might be able to further simplify the AND with X and potentially
3160 remove the truncation altogether. */
3161 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3163 rtx x = XEXP (op0, 0);
3164 machine_mode xmode = GET_MODE (x);
3165 tem = simplify_gen_binary (AND, xmode, x,
3166 gen_int_mode (INTVAL (trueop1), xmode));
3167 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3170 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3171 if (GET_CODE (op0) == IOR
3172 && CONST_INT_P (trueop1)
3173 && CONST_INT_P (XEXP (op0, 1)))
3175 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3176 return simplify_gen_binary (IOR, mode,
3177 simplify_gen_binary (AND, mode,
3178 XEXP (op0, 0), op1),
3179 gen_int_mode (tmp, mode));
3182 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3183 insn (and may simplify more). */
3184 if (GET_CODE (op0) == XOR
3185 && rtx_equal_p (XEXP (op0, 0), op1)
3186 && ! side_effects_p (op1))
3187 return simplify_gen_binary (AND, mode,
3188 simplify_gen_unary (NOT, mode,
3189 XEXP (op0, 1), mode),
3190 op1);
3192 if (GET_CODE (op0) == XOR
3193 && rtx_equal_p (XEXP (op0, 1), op1)
3194 && ! side_effects_p (op1))
3195 return simplify_gen_binary (AND, mode,
3196 simplify_gen_unary (NOT, mode,
3197 XEXP (op0, 0), mode),
3198 op1);
3200 /* Similarly for (~(A ^ B)) & A. */
3201 if (GET_CODE (op0) == NOT
3202 && GET_CODE (XEXP (op0, 0)) == XOR
3203 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3204 && ! side_effects_p (op1))
3205 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3207 if (GET_CODE (op0) == NOT
3208 && GET_CODE (XEXP (op0, 0)) == XOR
3209 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3210 && ! side_effects_p (op1))
3211 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3213 /* Convert (A | B) & A to A. */
3214 if (GET_CODE (op0) == IOR
3215 && (rtx_equal_p (XEXP (op0, 0), op1)
3216 || rtx_equal_p (XEXP (op0, 1), op1))
3217 && ! side_effects_p (XEXP (op0, 0))
3218 && ! side_effects_p (XEXP (op0, 1)))
3219 return op1;
3221 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3222 ((A & N) + B) & M -> (A + B) & M
3223 Similarly if (N & M) == 0,
3224 ((A | N) + B) & M -> (A + B) & M
3225 and for - instead of + and/or ^ instead of |.
3226 Also, if (N & M) == 0, then
3227 (A +- N) & M -> A & M. */
3228 if (CONST_INT_P (trueop1)
3229 && HWI_COMPUTABLE_MODE_P (mode)
3230 && ~UINTVAL (trueop1)
3231 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3232 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3234 rtx pmop[2];
3235 int which;
3237 pmop[0] = XEXP (op0, 0);
3238 pmop[1] = XEXP (op0, 1);
3240 if (CONST_INT_P (pmop[1])
3241 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3242 return simplify_gen_binary (AND, mode, pmop[0], op1);
3244 for (which = 0; which < 2; which++)
3246 tem = pmop[which];
3247 switch (GET_CODE (tem))
3249 case AND:
3250 if (CONST_INT_P (XEXP (tem, 1))
3251 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3252 == UINTVAL (trueop1))
3253 pmop[which] = XEXP (tem, 0);
3254 break;
3255 case IOR:
3256 case XOR:
3257 if (CONST_INT_P (XEXP (tem, 1))
3258 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3259 pmop[which] = XEXP (tem, 0);
3260 break;
3261 default:
3262 break;
3266 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3268 tem = simplify_gen_binary (GET_CODE (op0), mode,
3269 pmop[0], pmop[1]);
3270 return simplify_gen_binary (code, mode, tem, op1);
3274 /* (and X (ior (not X) Y) -> (and X Y) */
3275 if (GET_CODE (op1) == IOR
3276 && GET_CODE (XEXP (op1, 0)) == NOT
3277 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3278 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3280 /* (and (ior (not X) Y) X) -> (and X Y) */
3281 if (GET_CODE (op0) == IOR
3282 && GET_CODE (XEXP (op0, 0)) == NOT
3283 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3284 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3286 /* (and X (ior Y (not X)) -> (and X Y) */
3287 if (GET_CODE (op1) == IOR
3288 && GET_CODE (XEXP (op1, 1)) == NOT
3289 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3290 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3292 /* (and (ior Y (not X)) X) -> (and X Y) */
3293 if (GET_CODE (op0) == IOR
3294 && GET_CODE (XEXP (op0, 1)) == NOT
3295 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3296 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3298 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3299 if (tem)
3300 return tem;
3302 tem = simplify_associative_operation (code, mode, op0, op1);
3303 if (tem)
3304 return tem;
3305 break;
3307 case UDIV:
3308 /* 0/x is 0 (or x&0 if x has side-effects). */
3309 if (trueop0 == CONST0_RTX (mode)
3310 && !cfun->can_throw_non_call_exceptions)
3312 if (side_effects_p (op1))
3313 return simplify_gen_binary (AND, mode, op1, trueop0);
3314 return trueop0;
3316 /* x/1 is x. */
3317 if (trueop1 == CONST1_RTX (mode))
3319 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3320 if (tem)
3321 return tem;
3323 /* Convert divide by power of two into shift. */
3324 if (CONST_INT_P (trueop1)
3325 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3326 return simplify_gen_binary (LSHIFTRT, mode, op0,
3327 gen_int_shift_amount (mode, val));
3328 break;
3330 case DIV:
3331 /* Handle floating point and integers separately. */
3332 if (SCALAR_FLOAT_MODE_P (mode))
3334 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3335 safe for modes with NaNs, since 0.0 / 0.0 will then be
3336 NaN rather than 0.0. Nor is it safe for modes with signed
3337 zeros, since dividing 0 by a negative number gives -0.0 */
3338 if (trueop0 == CONST0_RTX (mode)
3339 && !HONOR_NANS (mode)
3340 && !HONOR_SIGNED_ZEROS (mode)
3341 && ! side_effects_p (op1))
3342 return op0;
3343 /* x/1.0 is x. */
3344 if (trueop1 == CONST1_RTX (mode)
3345 && !HONOR_SNANS (mode))
3346 return op0;
3348 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3349 && trueop1 != CONST0_RTX (mode))
3351 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3353 /* x/-1.0 is -x. */
3354 if (real_equal (d1, &dconstm1)
3355 && !HONOR_SNANS (mode))
3356 return simplify_gen_unary (NEG, mode, op0, mode);
3358 /* Change FP division by a constant into multiplication.
3359 Only do this with -freciprocal-math. */
3360 if (flag_reciprocal_math
3361 && !real_equal (d1, &dconst0))
3363 REAL_VALUE_TYPE d;
3364 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3365 tem = const_double_from_real_value (d, mode);
3366 return simplify_gen_binary (MULT, mode, op0, tem);
3370 else if (SCALAR_INT_MODE_P (mode))
3372 /* 0/x is 0 (or x&0 if x has side-effects). */
3373 if (trueop0 == CONST0_RTX (mode)
3374 && !cfun->can_throw_non_call_exceptions)
3376 if (side_effects_p (op1))
3377 return simplify_gen_binary (AND, mode, op1, trueop0);
3378 return trueop0;
3380 /* x/1 is x. */
3381 if (trueop1 == CONST1_RTX (mode))
3383 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3384 if (tem)
3385 return tem;
3387 /* x/-1 is -x. */
3388 if (trueop1 == constm1_rtx)
3390 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3391 if (x)
3392 return simplify_gen_unary (NEG, mode, x, mode);
3395 break;
3397 case UMOD:
3398 /* 0%x is 0 (or x&0 if x has side-effects). */
3399 if (trueop0 == CONST0_RTX (mode))
3401 if (side_effects_p (op1))
3402 return simplify_gen_binary (AND, mode, op1, trueop0);
3403 return trueop0;
3405 /* x%1 is 0 (of x&0 if x has side-effects). */
3406 if (trueop1 == CONST1_RTX (mode))
3408 if (side_effects_p (op0))
3409 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3410 return CONST0_RTX (mode);
3412 /* Implement modulus by power of two as AND. */
3413 if (CONST_INT_P (trueop1)
3414 && exact_log2 (UINTVAL (trueop1)) > 0)
3415 return simplify_gen_binary (AND, mode, op0,
3416 gen_int_mode (UINTVAL (trueop1) - 1,
3417 mode));
3418 break;
3420 case MOD:
3421 /* 0%x is 0 (or x&0 if x has side-effects). */
3422 if (trueop0 == CONST0_RTX (mode))
3424 if (side_effects_p (op1))
3425 return simplify_gen_binary (AND, mode, op1, trueop0);
3426 return trueop0;
3428 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3429 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3431 if (side_effects_p (op0))
3432 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3433 return CONST0_RTX (mode);
3435 break;
3437 case ROTATERT:
3438 case ROTATE:
3439 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3440 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3441 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3442 amount instead. */
3443 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3444 if (CONST_INT_P (trueop1)
3445 && IN_RANGE (INTVAL (trueop1),
3446 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3447 GET_MODE_UNIT_PRECISION (mode) - 1))
3449 int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
3450 rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
3451 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3452 mode, op0, new_amount_rtx);
3454 #endif
3455 /* FALLTHRU */
3456 case ASHIFTRT:
3457 if (trueop1 == CONST0_RTX (mode))
3458 return op0;
3459 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3460 return op0;
3461 /* Rotating ~0 always results in ~0. */
3462 if (CONST_INT_P (trueop0)
3463 && HWI_COMPUTABLE_MODE_P (mode)
3464 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3465 && ! side_effects_p (op1))
3466 return op0;
3468 canonicalize_shift:
3469 /* Given:
3470 scalar modes M1, M2
3471 scalar constants c1, c2
3472 size (M2) > size (M1)
3473 c1 == size (M2) - size (M1)
3474 optimize:
3475 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3476 <low_part>)
3477 (const_int <c2>))
3479 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3480 <low_part>). */
3481 if ((code == ASHIFTRT || code == LSHIFTRT)
3482 && is_a <scalar_int_mode> (mode, &int_mode)
3483 && SUBREG_P (op0)
3484 && CONST_INT_P (op1)
3485 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3486 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3487 &inner_mode)
3488 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3489 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3490 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3491 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3492 && subreg_lowpart_p (op0))
3494 rtx tmp = gen_int_shift_amount
3495 (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
3496 tmp = simplify_gen_binary (code, inner_mode,
3497 XEXP (SUBREG_REG (op0), 0),
3498 tmp);
3499 return lowpart_subreg (int_mode, tmp, inner_mode);
3502 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3504 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3505 if (val != INTVAL (op1))
3506 return simplify_gen_binary (code, mode, op0,
3507 gen_int_shift_amount (mode, val));
3509 break;
3511 case ASHIFT:
3512 case SS_ASHIFT:
3513 case US_ASHIFT:
3514 if (trueop1 == CONST0_RTX (mode))
3515 return op0;
3516 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3517 return op0;
3518 goto canonicalize_shift;
3520 case LSHIFTRT:
3521 if (trueop1 == CONST0_RTX (mode))
3522 return op0;
3523 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3524 return op0;
3525 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3526 if (GET_CODE (op0) == CLZ
3527 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3528 && CONST_INT_P (trueop1)
3529 && STORE_FLAG_VALUE == 1
3530 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3532 unsigned HOST_WIDE_INT zero_val = 0;
3534 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3535 && zero_val == GET_MODE_PRECISION (inner_mode)
3536 && INTVAL (trueop1) == exact_log2 (zero_val))
3537 return simplify_gen_relational (EQ, mode, inner_mode,
3538 XEXP (op0, 0), const0_rtx);
3540 goto canonicalize_shift;
3542 case SMIN:
3543 if (HWI_COMPUTABLE_MODE_P (mode)
3544 && mode_signbit_p (mode, trueop1)
3545 && ! side_effects_p (op0))
3546 return op1;
3547 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3548 return op0;
3549 tem = simplify_associative_operation (code, mode, op0, op1);
3550 if (tem)
3551 return tem;
3552 break;
3554 case SMAX:
3555 if (HWI_COMPUTABLE_MODE_P (mode)
3556 && CONST_INT_P (trueop1)
3557 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3558 && ! side_effects_p (op0))
3559 return op1;
3560 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3561 return op0;
3562 tem = simplify_associative_operation (code, mode, op0, op1);
3563 if (tem)
3564 return tem;
3565 break;
3567 case UMIN:
3568 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3569 return op1;
3570 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3571 return op0;
3572 tem = simplify_associative_operation (code, mode, op0, op1);
3573 if (tem)
3574 return tem;
3575 break;
3577 case UMAX:
3578 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3579 return op1;
3580 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3581 return op0;
3582 tem = simplify_associative_operation (code, mode, op0, op1);
3583 if (tem)
3584 return tem;
3585 break;
3587 case SS_PLUS:
3588 case US_PLUS:
3589 case SS_MINUS:
3590 case US_MINUS:
3591 case SS_MULT:
3592 case US_MULT:
3593 case SS_DIV:
3594 case US_DIV:
3595 /* ??? There are simplifications that can be done. */
3596 return 0;
3598 case VEC_SERIES:
3599 if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
3600 return gen_vec_duplicate (mode, op0);
3601 if (valid_for_const_vector_p (mode, op0)
3602 && valid_for_const_vector_p (mode, op1))
3603 return gen_const_vec_series (mode, op0, op1);
3604 return 0;
3606 case VEC_SELECT:
3607 if (!VECTOR_MODE_P (mode))
3609 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3610 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3611 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3612 gcc_assert (XVECLEN (trueop1, 0) == 1);
3613 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3615 if (vec_duplicate_p (trueop0, &elt0))
3616 return elt0;
3618 if (GET_CODE (trueop0) == CONST_VECTOR)
3619 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3620 (trueop1, 0, 0)));
3622 /* Extract a scalar element from a nested VEC_SELECT expression
3623 (with optional nested VEC_CONCAT expression). Some targets
3624 (i386) extract scalar element from a vector using chain of
3625 nested VEC_SELECT expressions. When input operand is a memory
3626 operand, this operation can be simplified to a simple scalar
3627 load from an offseted memory address. */
3628 int n_elts;
3629 if (GET_CODE (trueop0) == VEC_SELECT
3630 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
3631 .is_constant (&n_elts)))
3633 rtx op0 = XEXP (trueop0, 0);
3634 rtx op1 = XEXP (trueop0, 1);
3636 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3637 int elem;
3639 rtvec vec;
3640 rtx tmp_op, tmp;
3642 gcc_assert (GET_CODE (op1) == PARALLEL);
3643 gcc_assert (i < n_elts);
3645 /* Select element, pointed by nested selector. */
3646 elem = INTVAL (XVECEXP (op1, 0, i));
3648 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3649 if (GET_CODE (op0) == VEC_CONCAT)
3651 rtx op00 = XEXP (op0, 0);
3652 rtx op01 = XEXP (op0, 1);
3654 machine_mode mode00, mode01;
3655 int n_elts00, n_elts01;
3657 mode00 = GET_MODE (op00);
3658 mode01 = GET_MODE (op01);
3660 /* Find out the number of elements of each operand.
3661 Since the concatenated result has a constant number
3662 of elements, the operands must too. */
3663 n_elts00 = GET_MODE_NUNITS (mode00).to_constant ();
3664 n_elts01 = GET_MODE_NUNITS (mode01).to_constant ();
3666 gcc_assert (n_elts == n_elts00 + n_elts01);
3668 /* Select correct operand of VEC_CONCAT
3669 and adjust selector. */
3670 if (elem < n_elts01)
3671 tmp_op = op00;
3672 else
3674 tmp_op = op01;
3675 elem -= n_elts00;
3678 else
3679 tmp_op = op0;
3681 vec = rtvec_alloc (1);
3682 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3684 tmp = gen_rtx_fmt_ee (code, mode,
3685 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3686 return tmp;
3689 else
3691 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3692 gcc_assert (GET_MODE_INNER (mode)
3693 == GET_MODE_INNER (GET_MODE (trueop0)));
3694 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3696 if (vec_duplicate_p (trueop0, &elt0))
3697 /* It doesn't matter which elements are selected by trueop1,
3698 because they are all the same. */
3699 return gen_vec_duplicate (mode, elt0);
3701 if (GET_CODE (trueop0) == CONST_VECTOR)
3703 unsigned n_elts = XVECLEN (trueop1, 0);
3704 rtvec v = rtvec_alloc (n_elts);
3705 unsigned int i;
3707 gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
3708 for (i = 0; i < n_elts; i++)
3710 rtx x = XVECEXP (trueop1, 0, i);
3712 gcc_assert (CONST_INT_P (x));
3713 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3714 INTVAL (x));
3717 return gen_rtx_CONST_VECTOR (mode, v);
3720 /* Recognize the identity. */
3721 if (GET_MODE (trueop0) == mode)
3723 bool maybe_ident = true;
3724 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3726 rtx j = XVECEXP (trueop1, 0, i);
3727 if (!CONST_INT_P (j) || INTVAL (j) != i)
3729 maybe_ident = false;
3730 break;
3733 if (maybe_ident)
3734 return trueop0;
3737 /* If we build {a,b} then permute it, build the result directly. */
3738 if (XVECLEN (trueop1, 0) == 2
3739 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3740 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3741 && GET_CODE (trueop0) == VEC_CONCAT
3742 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3743 && GET_MODE (XEXP (trueop0, 0)) == mode
3744 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3745 && GET_MODE (XEXP (trueop0, 1)) == mode)
3747 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3748 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3749 rtx subop0, subop1;
3751 gcc_assert (i0 < 4 && i1 < 4);
3752 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3753 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3755 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3758 if (XVECLEN (trueop1, 0) == 2
3759 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3760 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3761 && GET_CODE (trueop0) == VEC_CONCAT
3762 && GET_MODE (trueop0) == mode)
3764 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3765 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3766 rtx subop0, subop1;
3768 gcc_assert (i0 < 2 && i1 < 2);
3769 subop0 = XEXP (trueop0, i0);
3770 subop1 = XEXP (trueop0, i1);
3772 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3775 /* If we select one half of a vec_concat, return that. */
3776 int l0, l1;
3777 if (GET_CODE (trueop0) == VEC_CONCAT
3778 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
3779 .is_constant (&l0))
3780 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 1)))
3781 .is_constant (&l1))
3782 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
3784 rtx subop0 = XEXP (trueop0, 0);
3785 rtx subop1 = XEXP (trueop0, 1);
3786 machine_mode mode0 = GET_MODE (subop0);
3787 machine_mode mode1 = GET_MODE (subop1);
3788 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3789 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
3791 bool success = true;
3792 for (int i = 1; i < l0; ++i)
3794 rtx j = XVECEXP (trueop1, 0, i);
3795 if (!CONST_INT_P (j) || INTVAL (j) != i)
3797 success = false;
3798 break;
3801 if (success)
3802 return subop0;
3804 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
3806 bool success = true;
3807 for (int i = 1; i < l1; ++i)
3809 rtx j = XVECEXP (trueop1, 0, i);
3810 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
3812 success = false;
3813 break;
3816 if (success)
3817 return subop1;
3822 if (XVECLEN (trueop1, 0) == 1
3823 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3824 && GET_CODE (trueop0) == VEC_CONCAT)
3826 rtx vec = trueop0;
3827 offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3829 /* Try to find the element in the VEC_CONCAT. */
3830 while (GET_MODE (vec) != mode
3831 && GET_CODE (vec) == VEC_CONCAT)
3833 poly_int64 vec_size;
3835 if (CONST_INT_P (XEXP (vec, 0)))
3837 /* vec_concat of two const_ints doesn't make sense with
3838 respect to modes. */
3839 if (CONST_INT_P (XEXP (vec, 1)))
3840 return 0;
3842 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
3843 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
3845 else
3846 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3848 if (known_lt (offset, vec_size))
3849 vec = XEXP (vec, 0);
3850 else if (known_ge (offset, vec_size))
3852 offset -= vec_size;
3853 vec = XEXP (vec, 1);
3855 else
3856 break;
3857 vec = avoid_constant_pool_reference (vec);
3860 if (GET_MODE (vec) == mode)
3861 return vec;
3864 /* If we select elements in a vec_merge that all come from the same
3865 operand, select from that operand directly. */
3866 if (GET_CODE (op0) == VEC_MERGE)
3868 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
3869 if (CONST_INT_P (trueop02))
3871 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
3872 bool all_operand0 = true;
3873 bool all_operand1 = true;
3874 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3876 rtx j = XVECEXP (trueop1, 0, i);
3877 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
3878 all_operand1 = false;
3879 else
3880 all_operand0 = false;
3882 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
3883 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
3884 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
3885 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
3889 /* If we have two nested selects that are inverses of each
3890 other, replace them with the source operand. */
3891 if (GET_CODE (trueop0) == VEC_SELECT
3892 && GET_MODE (XEXP (trueop0, 0)) == mode)
3894 rtx op0_subop1 = XEXP (trueop0, 1);
3895 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
3896 gcc_assert (known_eq (XVECLEN (trueop1, 0), GET_MODE_NUNITS (mode)));
3898 /* Apply the outer ordering vector to the inner one. (The inner
3899 ordering vector is expressly permitted to be of a different
3900 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3901 then the two VEC_SELECTs cancel. */
3902 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
3904 rtx x = XVECEXP (trueop1, 0, i);
3905 if (!CONST_INT_P (x))
3906 return 0;
3907 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
3908 if (!CONST_INT_P (y) || i != INTVAL (y))
3909 return 0;
3911 return XEXP (trueop0, 0);
3914 return 0;
3915 case VEC_CONCAT:
3917 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3918 ? GET_MODE (trueop0)
3919 : GET_MODE_INNER (mode));
3920 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3921 ? GET_MODE (trueop1)
3922 : GET_MODE_INNER (mode));
3924 gcc_assert (VECTOR_MODE_P (mode));
3925 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode)
3926 + GET_MODE_SIZE (op1_mode),
3927 GET_MODE_SIZE (mode)));
3929 if (VECTOR_MODE_P (op0_mode))
3930 gcc_assert (GET_MODE_INNER (mode)
3931 == GET_MODE_INNER (op0_mode));
3932 else
3933 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3935 if (VECTOR_MODE_P (op1_mode))
3936 gcc_assert (GET_MODE_INNER (mode)
3937 == GET_MODE_INNER (op1_mode));
3938 else
3939 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3941 unsigned int n_elts, in_n_elts;
3942 if ((GET_CODE (trueop0) == CONST_VECTOR
3943 || CONST_SCALAR_INT_P (trueop0)
3944 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
3945 && (GET_CODE (trueop1) == CONST_VECTOR
3946 || CONST_SCALAR_INT_P (trueop1)
3947 || CONST_DOUBLE_AS_FLOAT_P (trueop1))
3948 && GET_MODE_NUNITS (mode).is_constant (&n_elts)
3949 && GET_MODE_NUNITS (op0_mode).is_constant (&in_n_elts))
3951 rtvec v = rtvec_alloc (n_elts);
3952 unsigned int i;
3953 for (i = 0; i < n_elts; i++)
3955 if (i < in_n_elts)
3957 if (!VECTOR_MODE_P (op0_mode))
3958 RTVEC_ELT (v, i) = trueop0;
3959 else
3960 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3962 else
3964 if (!VECTOR_MODE_P (op1_mode))
3965 RTVEC_ELT (v, i) = trueop1;
3966 else
3967 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3968 i - in_n_elts);
3972 return gen_rtx_CONST_VECTOR (mode, v);
3975 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3976 Restrict the transformation to avoid generating a VEC_SELECT with a
3977 mode unrelated to its operand. */
3978 if (GET_CODE (trueop0) == VEC_SELECT
3979 && GET_CODE (trueop1) == VEC_SELECT
3980 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
3981 && GET_MODE (XEXP (trueop0, 0)) == mode)
3983 rtx par0 = XEXP (trueop0, 1);
3984 rtx par1 = XEXP (trueop1, 1);
3985 int len0 = XVECLEN (par0, 0);
3986 int len1 = XVECLEN (par1, 0);
3987 rtvec vec = rtvec_alloc (len0 + len1);
3988 for (int i = 0; i < len0; i++)
3989 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
3990 for (int i = 0; i < len1; i++)
3991 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
3992 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
3993 gen_rtx_PARALLEL (VOIDmode, vec));
3996 return 0;
3998 default:
3999 gcc_unreachable ();
4002 if (mode == GET_MODE (op0)
4003 && mode == GET_MODE (op1)
4004 && vec_duplicate_p (op0, &elt0)
4005 && vec_duplicate_p (op1, &elt1))
4007 /* Try applying the operator to ELT and see if that simplifies.
4008 We can duplicate the result if so.
4010 The reason we don't use simplify_gen_binary is that it isn't
4011 necessarily a win to convert things like:
4013 (plus:V (vec_duplicate:V (reg:S R1))
4014 (vec_duplicate:V (reg:S R2)))
4018 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4020 The first might be done entirely in vector registers while the
4021 second might need a move between register files. */
4022 tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4023 elt0, elt1);
4024 if (tem)
4025 return gen_vec_duplicate (mode, tem);
4028 return 0;
4032 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4033 rtx op0, rtx op1)
4035 if (VECTOR_MODE_P (mode)
4036 && code != VEC_CONCAT
4037 && GET_CODE (op0) == CONST_VECTOR
4038 && GET_CODE (op1) == CONST_VECTOR)
4040 unsigned int n_elts;
4041 if (!CONST_VECTOR_NUNITS (op0).is_constant (&n_elts))
4042 return NULL_RTX;
4044 gcc_assert (known_eq (n_elts, CONST_VECTOR_NUNITS (op1)));
4045 gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
4046 rtvec v = rtvec_alloc (n_elts);
4047 unsigned int i;
4049 for (i = 0; i < n_elts; i++)
4051 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4052 CONST_VECTOR_ELT (op0, i),
4053 CONST_VECTOR_ELT (op1, i));
4054 if (!x || !valid_for_const_vector_p (mode, x))
4055 return 0;
4056 RTVEC_ELT (v, i) = x;
4059 return gen_rtx_CONST_VECTOR (mode, v);
4062 if (VECTOR_MODE_P (mode)
4063 && code == VEC_CONCAT
4064 && (CONST_SCALAR_INT_P (op0)
4065 || CONST_FIXED_P (op0)
4066 || CONST_DOUBLE_AS_FLOAT_P (op0))
4067 && (CONST_SCALAR_INT_P (op1)
4068 || CONST_DOUBLE_AS_FLOAT_P (op1)
4069 || CONST_FIXED_P (op1)))
4071 /* Both inputs have a constant number of elements, so the result
4072 must too. */
4073 unsigned n_elts = GET_MODE_NUNITS (mode).to_constant ();
4074 rtvec v = rtvec_alloc (n_elts);
4076 gcc_assert (n_elts >= 2);
4077 if (n_elts == 2)
4079 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4080 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4082 RTVEC_ELT (v, 0) = op0;
4083 RTVEC_ELT (v, 1) = op1;
4085 else
4087 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)).to_constant ();
4088 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)).to_constant ();
4089 unsigned i;
4091 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4092 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4093 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4095 for (i = 0; i < op0_n_elts; ++i)
4096 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op0, i);
4097 for (i = 0; i < op1_n_elts; ++i)
4098 RTVEC_ELT (v, op0_n_elts+i) = CONST_VECTOR_ELT (op1, i);
4101 return gen_rtx_CONST_VECTOR (mode, v);
4104 if (SCALAR_FLOAT_MODE_P (mode)
4105 && CONST_DOUBLE_AS_FLOAT_P (op0)
4106 && CONST_DOUBLE_AS_FLOAT_P (op1)
4107 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4109 if (code == AND
4110 || code == IOR
4111 || code == XOR)
4113 long tmp0[4];
4114 long tmp1[4];
4115 REAL_VALUE_TYPE r;
4116 int i;
4118 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4119 GET_MODE (op0));
4120 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4121 GET_MODE (op1));
4122 for (i = 0; i < 4; i++)
4124 switch (code)
4126 case AND:
4127 tmp0[i] &= tmp1[i];
4128 break;
4129 case IOR:
4130 tmp0[i] |= tmp1[i];
4131 break;
4132 case XOR:
4133 tmp0[i] ^= tmp1[i];
4134 break;
4135 default:
4136 gcc_unreachable ();
4139 real_from_target (&r, tmp0, mode);
4140 return const_double_from_real_value (r, mode);
4142 else
4144 REAL_VALUE_TYPE f0, f1, value, result;
4145 const REAL_VALUE_TYPE *opr0, *opr1;
4146 bool inexact;
4148 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4149 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4151 if (HONOR_SNANS (mode)
4152 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4153 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4154 return 0;
4156 real_convert (&f0, mode, opr0);
4157 real_convert (&f1, mode, opr1);
4159 if (code == DIV
4160 && real_equal (&f1, &dconst0)
4161 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4162 return 0;
4164 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4165 && flag_trapping_math
4166 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4168 int s0 = REAL_VALUE_NEGATIVE (f0);
4169 int s1 = REAL_VALUE_NEGATIVE (f1);
4171 switch (code)
4173 case PLUS:
4174 /* Inf + -Inf = NaN plus exception. */
4175 if (s0 != s1)
4176 return 0;
4177 break;
4178 case MINUS:
4179 /* Inf - Inf = NaN plus exception. */
4180 if (s0 == s1)
4181 return 0;
4182 break;
4183 case DIV:
4184 /* Inf / Inf = NaN plus exception. */
4185 return 0;
4186 default:
4187 break;
4191 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4192 && flag_trapping_math
4193 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4194 || (REAL_VALUE_ISINF (f1)
4195 && real_equal (&f0, &dconst0))))
4196 /* Inf * 0 = NaN plus exception. */
4197 return 0;
4199 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4200 &f0, &f1);
4201 real_convert (&result, mode, &value);
4203 /* Don't constant fold this floating point operation if
4204 the result has overflowed and flag_trapping_math. */
4206 if (flag_trapping_math
4207 && MODE_HAS_INFINITIES (mode)
4208 && REAL_VALUE_ISINF (result)
4209 && !REAL_VALUE_ISINF (f0)
4210 && !REAL_VALUE_ISINF (f1))
4211 /* Overflow plus exception. */
4212 return 0;
4214 /* Don't constant fold this floating point operation if the
4215 result may dependent upon the run-time rounding mode and
4216 flag_rounding_math is set, or if GCC's software emulation
4217 is unable to accurately represent the result. */
4219 if ((flag_rounding_math
4220 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4221 && (inexact || !real_identical (&result, &value)))
4222 return NULL_RTX;
4224 return const_double_from_real_value (result, mode);
4228 /* We can fold some multi-word operations. */
4229 scalar_int_mode int_mode;
4230 if (is_a <scalar_int_mode> (mode, &int_mode)
4231 && CONST_SCALAR_INT_P (op0)
4232 && CONST_SCALAR_INT_P (op1))
4234 wide_int result;
4235 bool overflow;
4236 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4237 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4239 #if TARGET_SUPPORTS_WIDE_INT == 0
4240 /* This assert keeps the simplification from producing a result
4241 that cannot be represented in a CONST_DOUBLE but a lot of
4242 upstream callers expect that this function never fails to
4243 simplify something and so you if you added this to the test
4244 above the code would die later anyway. If this assert
4245 happens, you just need to make the port support wide int. */
4246 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4247 #endif
4248 switch (code)
4250 case MINUS:
4251 result = wi::sub (pop0, pop1);
4252 break;
4254 case PLUS:
4255 result = wi::add (pop0, pop1);
4256 break;
4258 case MULT:
4259 result = wi::mul (pop0, pop1);
4260 break;
4262 case DIV:
4263 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4264 if (overflow)
4265 return NULL_RTX;
4266 break;
4268 case MOD:
4269 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4270 if (overflow)
4271 return NULL_RTX;
4272 break;
4274 case UDIV:
4275 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4276 if (overflow)
4277 return NULL_RTX;
4278 break;
4280 case UMOD:
4281 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4282 if (overflow)
4283 return NULL_RTX;
4284 break;
4286 case AND:
4287 result = wi::bit_and (pop0, pop1);
4288 break;
4290 case IOR:
4291 result = wi::bit_or (pop0, pop1);
4292 break;
4294 case XOR:
4295 result = wi::bit_xor (pop0, pop1);
4296 break;
4298 case SMIN:
4299 result = wi::smin (pop0, pop1);
4300 break;
4302 case SMAX:
4303 result = wi::smax (pop0, pop1);
4304 break;
4306 case UMIN:
4307 result = wi::umin (pop0, pop1);
4308 break;
4310 case UMAX:
4311 result = wi::umax (pop0, pop1);
4312 break;
4314 case LSHIFTRT:
4315 case ASHIFTRT:
4316 case ASHIFT:
4318 wide_int wop1 = pop1;
4319 if (SHIFT_COUNT_TRUNCATED)
4320 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
4321 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
4322 return NULL_RTX;
4324 switch (code)
4326 case LSHIFTRT:
4327 result = wi::lrshift (pop0, wop1);
4328 break;
4330 case ASHIFTRT:
4331 result = wi::arshift (pop0, wop1);
4332 break;
4334 case ASHIFT:
4335 result = wi::lshift (pop0, wop1);
4336 break;
4338 default:
4339 gcc_unreachable ();
4341 break;
4343 case ROTATE:
4344 case ROTATERT:
4346 if (wi::neg_p (pop1))
4347 return NULL_RTX;
4349 switch (code)
4351 case ROTATE:
4352 result = wi::lrotate (pop0, pop1);
4353 break;
4355 case ROTATERT:
4356 result = wi::rrotate (pop0, pop1);
4357 break;
4359 default:
4360 gcc_unreachable ();
4362 break;
4364 default:
4365 return NULL_RTX;
4367 return immed_wide_int_const (result, int_mode);
4370 /* Handle polynomial integers. */
4371 if (NUM_POLY_INT_COEFFS > 1
4372 && is_a <scalar_int_mode> (mode, &int_mode)
4373 && poly_int_rtx_p (op0)
4374 && poly_int_rtx_p (op1))
4376 poly_wide_int result;
4377 switch (code)
4379 case PLUS:
4380 result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
4381 break;
4383 case MINUS:
4384 result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
4385 break;
4387 case MULT:
4388 if (CONST_SCALAR_INT_P (op1))
4389 result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
4390 else
4391 return NULL_RTX;
4392 break;
4394 case ASHIFT:
4395 if (CONST_SCALAR_INT_P (op1))
4397 wide_int shift = rtx_mode_t (op1, mode);
4398 if (SHIFT_COUNT_TRUNCATED)
4399 shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
4400 else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
4401 return NULL_RTX;
4402 result = wi::to_poly_wide (op0, mode) << shift;
4404 else
4405 return NULL_RTX;
4406 break;
4408 case IOR:
4409 if (!CONST_SCALAR_INT_P (op1)
4410 || !can_ior_p (wi::to_poly_wide (op0, mode),
4411 rtx_mode_t (op1, mode), &result))
4412 return NULL_RTX;
4413 break;
4415 default:
4416 return NULL_RTX;
4418 return immed_wide_int_const (result, int_mode);
4421 return NULL_RTX;
4426 /* Return a positive integer if X should sort after Y. The value
4427 returned is 1 if and only if X and Y are both regs. */
4429 static int
4430 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
4432 int result;
4434 result = (commutative_operand_precedence (y)
4435 - commutative_operand_precedence (x));
4436 if (result)
4437 return result + result;
4439 /* Group together equal REGs to do more simplification. */
4440 if (REG_P (x) && REG_P (y))
4441 return REGNO (x) > REGNO (y);
4443 return 0;
4446 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4447 operands may be another PLUS or MINUS.
4449 Rather than test for specific case, we do this by a brute-force method
4450 and do all possible simplifications until no more changes occur. Then
4451 we rebuild the operation.
4453 May return NULL_RTX when no changes were made. */
4455 static rtx
4456 simplify_plus_minus (enum rtx_code code, machine_mode mode, rtx op0,
4457 rtx op1)
4459 struct simplify_plus_minus_op_data
4461 rtx op;
4462 short neg;
4463 } ops[16];
4464 rtx result, tem;
4465 int n_ops = 2;
4466 int changed, n_constants, canonicalized = 0;
4467 int i, j;
4469 memset (ops, 0, sizeof ops);
4471 /* Set up the two operands and then expand them until nothing has been
4472 changed. If we run out of room in our array, give up; this should
4473 almost never happen. */
4475 ops[0].op = op0;
4476 ops[0].neg = 0;
4477 ops[1].op = op1;
4478 ops[1].neg = (code == MINUS);
4482 changed = 0;
4483 n_constants = 0;
4485 for (i = 0; i < n_ops; i++)
4487 rtx this_op = ops[i].op;
4488 int this_neg = ops[i].neg;
4489 enum rtx_code this_code = GET_CODE (this_op);
4491 switch (this_code)
4493 case PLUS:
4494 case MINUS:
4495 if (n_ops == ARRAY_SIZE (ops))
4496 return NULL_RTX;
4498 ops[n_ops].op = XEXP (this_op, 1);
4499 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
4500 n_ops++;
4502 ops[i].op = XEXP (this_op, 0);
4503 changed = 1;
4504 /* If this operand was negated then we will potentially
4505 canonicalize the expression. Similarly if we don't
4506 place the operands adjacent we're re-ordering the
4507 expression and thus might be performing a
4508 canonicalization. Ignore register re-ordering.
4509 ??? It might be better to shuffle the ops array here,
4510 but then (plus (plus (A, B), plus (C, D))) wouldn't
4511 be seen as non-canonical. */
4512 if (this_neg
4513 || (i != n_ops - 2
4514 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
4515 canonicalized = 1;
4516 break;
4518 case NEG:
4519 ops[i].op = XEXP (this_op, 0);
4520 ops[i].neg = ! this_neg;
4521 changed = 1;
4522 canonicalized = 1;
4523 break;
4525 case CONST:
4526 if (n_ops != ARRAY_SIZE (ops)
4527 && GET_CODE (XEXP (this_op, 0)) == PLUS
4528 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
4529 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
4531 ops[i].op = XEXP (XEXP (this_op, 0), 0);
4532 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
4533 ops[n_ops].neg = this_neg;
4534 n_ops++;
4535 changed = 1;
4536 canonicalized = 1;
4538 break;
4540 case NOT:
4541 /* ~a -> (-a - 1) */
4542 if (n_ops != ARRAY_SIZE (ops))
4544 ops[n_ops].op = CONSTM1_RTX (mode);
4545 ops[n_ops++].neg = this_neg;
4546 ops[i].op = XEXP (this_op, 0);
4547 ops[i].neg = !this_neg;
4548 changed = 1;
4549 canonicalized = 1;
4551 break;
4553 case CONST_INT:
4554 n_constants++;
4555 if (this_neg)
4557 ops[i].op = neg_const_int (mode, this_op);
4558 ops[i].neg = 0;
4559 changed = 1;
4560 canonicalized = 1;
4562 break;
4564 default:
4565 break;
4569 while (changed);
4571 if (n_constants > 1)
4572 canonicalized = 1;
4574 gcc_assert (n_ops >= 2);
4576 /* If we only have two operands, we can avoid the loops. */
4577 if (n_ops == 2)
4579 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
4580 rtx lhs, rhs;
4582 /* Get the two operands. Be careful with the order, especially for
4583 the cases where code == MINUS. */
4584 if (ops[0].neg && ops[1].neg)
4586 lhs = gen_rtx_NEG (mode, ops[0].op);
4587 rhs = ops[1].op;
4589 else if (ops[0].neg)
4591 lhs = ops[1].op;
4592 rhs = ops[0].op;
4594 else
4596 lhs = ops[0].op;
4597 rhs = ops[1].op;
4600 return simplify_const_binary_operation (code, mode, lhs, rhs);
4603 /* Now simplify each pair of operands until nothing changes. */
4604 while (1)
4606 /* Insertion sort is good enough for a small array. */
4607 for (i = 1; i < n_ops; i++)
4609 struct simplify_plus_minus_op_data save;
4610 int cmp;
4612 j = i - 1;
4613 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
4614 if (cmp <= 0)
4615 continue;
4616 /* Just swapping registers doesn't count as canonicalization. */
4617 if (cmp != 1)
4618 canonicalized = 1;
4620 save = ops[i];
4622 ops[j + 1] = ops[j];
4623 while (j--
4624 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
4625 ops[j + 1] = save;
4628 changed = 0;
4629 for (i = n_ops - 1; i > 0; i--)
4630 for (j = i - 1; j >= 0; j--)
4632 rtx lhs = ops[j].op, rhs = ops[i].op;
4633 int lneg = ops[j].neg, rneg = ops[i].neg;
4635 if (lhs != 0 && rhs != 0)
4637 enum rtx_code ncode = PLUS;
4639 if (lneg != rneg)
4641 ncode = MINUS;
4642 if (lneg)
4643 std::swap (lhs, rhs);
4645 else if (swap_commutative_operands_p (lhs, rhs))
4646 std::swap (lhs, rhs);
4648 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
4649 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
4651 rtx tem_lhs, tem_rhs;
4653 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
4654 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
4655 tem = simplify_binary_operation (ncode, mode, tem_lhs,
4656 tem_rhs);
4658 if (tem && !CONSTANT_P (tem))
4659 tem = gen_rtx_CONST (GET_MODE (tem), tem);
4661 else
4662 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4664 if (tem)
4666 /* Reject "simplifications" that just wrap the two
4667 arguments in a CONST. Failure to do so can result
4668 in infinite recursion with simplify_binary_operation
4669 when it calls us to simplify CONST operations.
4670 Also, if we find such a simplification, don't try
4671 any more combinations with this rhs: We must have
4672 something like symbol+offset, ie. one of the
4673 trivial CONST expressions we handle later. */
4674 if (GET_CODE (tem) == CONST
4675 && GET_CODE (XEXP (tem, 0)) == ncode
4676 && XEXP (XEXP (tem, 0), 0) == lhs
4677 && XEXP (XEXP (tem, 0), 1) == rhs)
4678 break;
4679 lneg &= rneg;
4680 if (GET_CODE (tem) == NEG)
4681 tem = XEXP (tem, 0), lneg = !lneg;
4682 if (CONST_INT_P (tem) && lneg)
4683 tem = neg_const_int (mode, tem), lneg = 0;
4685 ops[i].op = tem;
4686 ops[i].neg = lneg;
4687 ops[j].op = NULL_RTX;
4688 changed = 1;
4689 canonicalized = 1;
4694 if (!changed)
4695 break;
4697 /* Pack all the operands to the lower-numbered entries. */
4698 for (i = 0, j = 0; j < n_ops; j++)
4699 if (ops[j].op)
4701 ops[i] = ops[j];
4702 i++;
4704 n_ops = i;
4707 /* If nothing changed, check that rematerialization of rtl instructions
4708 is still required. */
4709 if (!canonicalized)
4711 /* Perform rematerialization if only all operands are registers and
4712 all operations are PLUS. */
4713 /* ??? Also disallow (non-global, non-frame) fixed registers to work
4714 around rs6000 and how it uses the CA register. See PR67145. */
4715 for (i = 0; i < n_ops; i++)
4716 if (ops[i].neg
4717 || !REG_P (ops[i].op)
4718 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
4719 && fixed_regs[REGNO (ops[i].op)]
4720 && !global_regs[REGNO (ops[i].op)]
4721 && ops[i].op != frame_pointer_rtx
4722 && ops[i].op != arg_pointer_rtx
4723 && ops[i].op != stack_pointer_rtx))
4724 return NULL_RTX;
4725 goto gen_result;
4728 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4729 if (n_ops == 2
4730 && CONST_INT_P (ops[1].op)
4731 && CONSTANT_P (ops[0].op)
4732 && ops[0].neg)
4733 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
4735 /* We suppressed creation of trivial CONST expressions in the
4736 combination loop to avoid recursion. Create one manually now.
4737 The combination loop should have ensured that there is exactly
4738 one CONST_INT, and the sort will have ensured that it is last
4739 in the array and that any other constant will be next-to-last. */
4741 if (n_ops > 1
4742 && CONST_INT_P (ops[n_ops - 1].op)
4743 && CONSTANT_P (ops[n_ops - 2].op))
4745 rtx value = ops[n_ops - 1].op;
4746 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
4747 value = neg_const_int (mode, value);
4748 if (CONST_INT_P (value))
4750 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
4751 INTVAL (value));
4752 n_ops--;
4756 /* Put a non-negated operand first, if possible. */
4758 for (i = 0; i < n_ops && ops[i].neg; i++)
4759 continue;
4760 if (i == n_ops)
4761 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
4762 else if (i != 0)
4764 tem = ops[0].op;
4765 ops[0] = ops[i];
4766 ops[i].op = tem;
4767 ops[i].neg = 1;
4770 /* Now make the result by performing the requested operations. */
4771 gen_result:
4772 result = ops[0].op;
4773 for (i = 1; i < n_ops; i++)
4774 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
4775 mode, result, ops[i].op);
4777 return result;
4780 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4781 static bool
4782 plus_minus_operand_p (const_rtx x)
4784 return GET_CODE (x) == PLUS
4785 || GET_CODE (x) == MINUS
4786 || (GET_CODE (x) == CONST
4787 && GET_CODE (XEXP (x, 0)) == PLUS
4788 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
4789 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
4792 /* Like simplify_binary_operation except used for relational operators.
4793 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4794 not also be VOIDmode.
4796 CMP_MODE specifies in which mode the comparison is done in, so it is
4797 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4798 the operands or, if both are VOIDmode, the operands are compared in
4799 "infinite precision". */
4801 simplify_relational_operation (enum rtx_code code, machine_mode mode,
4802 machine_mode cmp_mode, rtx op0, rtx op1)
4804 rtx tem, trueop0, trueop1;
4806 if (cmp_mode == VOIDmode)
4807 cmp_mode = GET_MODE (op0);
4808 if (cmp_mode == VOIDmode)
4809 cmp_mode = GET_MODE (op1);
4811 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
4812 if (tem)
4814 if (SCALAR_FLOAT_MODE_P (mode))
4816 if (tem == const0_rtx)
4817 return CONST0_RTX (mode);
4818 #ifdef FLOAT_STORE_FLAG_VALUE
4820 REAL_VALUE_TYPE val;
4821 val = FLOAT_STORE_FLAG_VALUE (mode);
4822 return const_double_from_real_value (val, mode);
4824 #else
4825 return NULL_RTX;
4826 #endif
4828 if (VECTOR_MODE_P (mode))
4830 if (tem == const0_rtx)
4831 return CONST0_RTX (mode);
4832 #ifdef VECTOR_STORE_FLAG_VALUE
4834 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4835 if (val == NULL_RTX)
4836 return NULL_RTX;
4837 if (val == const1_rtx)
4838 return CONST1_RTX (mode);
4840 return gen_const_vec_duplicate (mode, val);
4842 #else
4843 return NULL_RTX;
4844 #endif
4847 return tem;
4850 /* For the following tests, ensure const0_rtx is op1. */
4851 if (swap_commutative_operands_p (op0, op1)
4852 || (op0 == const0_rtx && op1 != const0_rtx))
4853 std::swap (op0, op1), code = swap_condition (code);
4855 /* If op0 is a compare, extract the comparison arguments from it. */
4856 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4857 return simplify_gen_relational (code, mode, VOIDmode,
4858 XEXP (op0, 0), XEXP (op0, 1));
4860 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4861 || CC0_P (op0))
4862 return NULL_RTX;
4864 trueop0 = avoid_constant_pool_reference (op0);
4865 trueop1 = avoid_constant_pool_reference (op1);
4866 return simplify_relational_operation_1 (code, mode, cmp_mode,
4867 trueop0, trueop1);
4870 /* This part of simplify_relational_operation is only used when CMP_MODE
4871 is not in class MODE_CC (i.e. it is a real comparison).
4873 MODE is the mode of the result, while CMP_MODE specifies in which
4874 mode the comparison is done in, so it is the mode of the operands. */
4876 static rtx
4877 simplify_relational_operation_1 (enum rtx_code code, machine_mode mode,
4878 machine_mode cmp_mode, rtx op0, rtx op1)
4880 enum rtx_code op0code = GET_CODE (op0);
4882 if (op1 == const0_rtx && COMPARISON_P (op0))
4884 /* If op0 is a comparison, extract the comparison arguments
4885 from it. */
4886 if (code == NE)
4888 if (GET_MODE (op0) == mode)
4889 return simplify_rtx (op0);
4890 else
4891 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4892 XEXP (op0, 0), XEXP (op0, 1));
4894 else if (code == EQ)
4896 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
4897 if (new_code != UNKNOWN)
4898 return simplify_gen_relational (new_code, mode, VOIDmode,
4899 XEXP (op0, 0), XEXP (op0, 1));
4903 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4904 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4905 if ((code == LTU || code == GEU)
4906 && GET_CODE (op0) == PLUS
4907 && CONST_INT_P (XEXP (op0, 1))
4908 && (rtx_equal_p (op1, XEXP (op0, 0))
4909 || rtx_equal_p (op1, XEXP (op0, 1)))
4910 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4911 && XEXP (op0, 1) != const0_rtx)
4913 rtx new_cmp
4914 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4915 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4916 cmp_mode, XEXP (op0, 0), new_cmp);
4919 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
4920 transformed into (LTU a -C). */
4921 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
4922 && CONST_INT_P (XEXP (op0, 1))
4923 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
4924 && XEXP (op0, 1) != const0_rtx)
4926 rtx new_cmp
4927 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4928 return simplify_gen_relational (LTU, mode, cmp_mode,
4929 XEXP (op0, 0), new_cmp);
4932 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4933 if ((code == LTU || code == GEU)
4934 && GET_CODE (op0) == PLUS
4935 && rtx_equal_p (op1, XEXP (op0, 1))
4936 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4937 && !rtx_equal_p (op1, XEXP (op0, 0)))
4938 return simplify_gen_relational (code, mode, cmp_mode, op0,
4939 copy_rtx (XEXP (op0, 0)));
4941 if (op1 == const0_rtx)
4943 /* Canonicalize (GTU x 0) as (NE x 0). */
4944 if (code == GTU)
4945 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4946 /* Canonicalize (LEU x 0) as (EQ x 0). */
4947 if (code == LEU)
4948 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4950 else if (op1 == const1_rtx)
4952 switch (code)
4954 case GE:
4955 /* Canonicalize (GE x 1) as (GT x 0). */
4956 return simplify_gen_relational (GT, mode, cmp_mode,
4957 op0, const0_rtx);
4958 case GEU:
4959 /* Canonicalize (GEU x 1) as (NE x 0). */
4960 return simplify_gen_relational (NE, mode, cmp_mode,
4961 op0, const0_rtx);
4962 case LT:
4963 /* Canonicalize (LT x 1) as (LE x 0). */
4964 return simplify_gen_relational (LE, mode, cmp_mode,
4965 op0, const0_rtx);
4966 case LTU:
4967 /* Canonicalize (LTU x 1) as (EQ x 0). */
4968 return simplify_gen_relational (EQ, mode, cmp_mode,
4969 op0, const0_rtx);
4970 default:
4971 break;
4974 else if (op1 == constm1_rtx)
4976 /* Canonicalize (LE x -1) as (LT x 0). */
4977 if (code == LE)
4978 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4979 /* Canonicalize (GT x -1) as (GE x 0). */
4980 if (code == GT)
4981 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4984 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4985 if ((code == EQ || code == NE)
4986 && (op0code == PLUS || op0code == MINUS)
4987 && CONSTANT_P (op1)
4988 && CONSTANT_P (XEXP (op0, 1))
4989 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4991 rtx x = XEXP (op0, 0);
4992 rtx c = XEXP (op0, 1);
4993 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
4994 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
4996 /* Detect an infinite recursive condition, where we oscillate at this
4997 simplification case between:
4998 A + B == C <---> C - B == A,
4999 where A, B, and C are all constants with non-simplifiable expressions,
5000 usually SYMBOL_REFs. */
5001 if (GET_CODE (tem) == invcode
5002 && CONSTANT_P (x)
5003 && rtx_equal_p (c, XEXP (tem, 1)))
5004 return NULL_RTX;
5006 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
5009 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5010 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5011 scalar_int_mode int_mode, int_cmp_mode;
5012 if (code == NE
5013 && op1 == const0_rtx
5014 && is_int_mode (mode, &int_mode)
5015 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
5016 /* ??? Work-around BImode bugs in the ia64 backend. */
5017 && int_mode != BImode
5018 && int_cmp_mode != BImode
5019 && nonzero_bits (op0, int_cmp_mode) == 1
5020 && STORE_FLAG_VALUE == 1)
5021 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
5022 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
5023 : lowpart_subreg (int_mode, op0, int_cmp_mode);
5025 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5026 if ((code == EQ || code == NE)
5027 && op1 == const0_rtx
5028 && op0code == XOR)
5029 return simplify_gen_relational (code, mode, cmp_mode,
5030 XEXP (op0, 0), XEXP (op0, 1));
5032 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5033 if ((code == EQ || code == NE)
5034 && op0code == XOR
5035 && rtx_equal_p (XEXP (op0, 0), op1)
5036 && !side_effects_p (XEXP (op0, 0)))
5037 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
5038 CONST0_RTX (mode));
5040 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5041 if ((code == EQ || code == NE)
5042 && op0code == XOR
5043 && rtx_equal_p (XEXP (op0, 1), op1)
5044 && !side_effects_p (XEXP (op0, 1)))
5045 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5046 CONST0_RTX (mode));
5048 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5049 if ((code == EQ || code == NE)
5050 && op0code == XOR
5051 && CONST_SCALAR_INT_P (op1)
5052 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5053 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5054 simplify_gen_binary (XOR, cmp_mode,
5055 XEXP (op0, 1), op1));
5057 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5058 constant folding if x/y is a constant. */
5059 if ((code == EQ || code == NE)
5060 && (op0code == AND || op0code == IOR)
5061 && !side_effects_p (op1)
5062 && op1 != CONST0_RTX (cmp_mode))
5064 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5065 (eq/ne (and (not y) x) 0). */
5066 if ((op0code == AND && rtx_equal_p (XEXP (op0, 0), op1))
5067 || (op0code == IOR && rtx_equal_p (XEXP (op0, 1), op1)))
5069 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1),
5070 cmp_mode);
5071 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5073 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5074 CONST0_RTX (cmp_mode));
5077 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5078 (eq/ne (and (not x) y) 0). */
5079 if ((op0code == AND && rtx_equal_p (XEXP (op0, 1), op1))
5080 || (op0code == IOR && rtx_equal_p (XEXP (op0, 0), op1)))
5082 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0),
5083 cmp_mode);
5084 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5086 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5087 CONST0_RTX (cmp_mode));
5091 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5092 if ((code == EQ || code == NE)
5093 && GET_CODE (op0) == BSWAP
5094 && CONST_SCALAR_INT_P (op1))
5095 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5096 simplify_gen_unary (BSWAP, cmp_mode,
5097 op1, cmp_mode));
5099 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5100 if ((code == EQ || code == NE)
5101 && GET_CODE (op0) == BSWAP
5102 && GET_CODE (op1) == BSWAP)
5103 return simplify_gen_relational (code, mode, cmp_mode,
5104 XEXP (op0, 0), XEXP (op1, 0));
5106 if (op0code == POPCOUNT && op1 == const0_rtx)
5107 switch (code)
5109 case EQ:
5110 case LE:
5111 case LEU:
5112 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5113 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5114 XEXP (op0, 0), const0_rtx);
5116 case NE:
5117 case GT:
5118 case GTU:
5119 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5120 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5121 XEXP (op0, 0), const0_rtx);
5123 default:
5124 break;
5127 return NULL_RTX;
5130 enum
5132 CMP_EQ = 1,
5133 CMP_LT = 2,
5134 CMP_GT = 4,
5135 CMP_LTU = 8,
5136 CMP_GTU = 16
5140 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5141 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5142 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5143 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5144 For floating-point comparisons, assume that the operands were ordered. */
5146 static rtx
5147 comparison_result (enum rtx_code code, int known_results)
5149 switch (code)
5151 case EQ:
5152 case UNEQ:
5153 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5154 case NE:
5155 case LTGT:
5156 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5158 case LT:
5159 case UNLT:
5160 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5161 case GE:
5162 case UNGE:
5163 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5165 case GT:
5166 case UNGT:
5167 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5168 case LE:
5169 case UNLE:
5170 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5172 case LTU:
5173 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5174 case GEU:
5175 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5177 case GTU:
5178 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5179 case LEU:
5180 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5182 case ORDERED:
5183 return const_true_rtx;
5184 case UNORDERED:
5185 return const0_rtx;
5186 default:
5187 gcc_unreachable ();
5191 /* Check if the given comparison (done in the given MODE) is actually
5192 a tautology or a contradiction. If the mode is VOID_mode, the
5193 comparison is done in "infinite precision". If no simplification
5194 is possible, this function returns zero. Otherwise, it returns
5195 either const_true_rtx or const0_rtx. */
5198 simplify_const_relational_operation (enum rtx_code code,
5199 machine_mode mode,
5200 rtx op0, rtx op1)
5202 rtx tem;
5203 rtx trueop0;
5204 rtx trueop1;
5206 gcc_assert (mode != VOIDmode
5207 || (GET_MODE (op0) == VOIDmode
5208 && GET_MODE (op1) == VOIDmode));
5210 /* If op0 is a compare, extract the comparison arguments from it. */
5211 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5213 op1 = XEXP (op0, 1);
5214 op0 = XEXP (op0, 0);
5216 if (GET_MODE (op0) != VOIDmode)
5217 mode = GET_MODE (op0);
5218 else if (GET_MODE (op1) != VOIDmode)
5219 mode = GET_MODE (op1);
5220 else
5221 return 0;
5224 /* We can't simplify MODE_CC values since we don't know what the
5225 actual comparison is. */
5226 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
5227 return 0;
5229 /* Make sure the constant is second. */
5230 if (swap_commutative_operands_p (op0, op1))
5232 std::swap (op0, op1);
5233 code = swap_condition (code);
5236 trueop0 = avoid_constant_pool_reference (op0);
5237 trueop1 = avoid_constant_pool_reference (op1);
5239 /* For integer comparisons of A and B maybe we can simplify A - B and can
5240 then simplify a comparison of that with zero. If A and B are both either
5241 a register or a CONST_INT, this can't help; testing for these cases will
5242 prevent infinite recursion here and speed things up.
5244 We can only do this for EQ and NE comparisons as otherwise we may
5245 lose or introduce overflow which we cannot disregard as undefined as
5246 we do not know the signedness of the operation on either the left or
5247 the right hand side of the comparison. */
5249 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5250 && (code == EQ || code == NE)
5251 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5252 && (REG_P (op1) || CONST_INT_P (trueop1)))
5253 && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
5254 /* We cannot do this if tem is a nonzero address. */
5255 && ! nonzero_address_p (tem))
5256 return simplify_const_relational_operation (signed_condition (code),
5257 mode, tem, const0_rtx);
5259 if (! HONOR_NANS (mode) && code == ORDERED)
5260 return const_true_rtx;
5262 if (! HONOR_NANS (mode) && code == UNORDERED)
5263 return const0_rtx;
5265 /* For modes without NaNs, if the two operands are equal, we know the
5266 result except if they have side-effects. Even with NaNs we know
5267 the result of unordered comparisons and, if signaling NaNs are
5268 irrelevant, also the result of LT/GT/LTGT. */
5269 if ((! HONOR_NANS (trueop0)
5270 || code == UNEQ || code == UNLE || code == UNGE
5271 || ((code == LT || code == GT || code == LTGT)
5272 && ! HONOR_SNANS (trueop0)))
5273 && rtx_equal_p (trueop0, trueop1)
5274 && ! side_effects_p (trueop0))
5275 return comparison_result (code, CMP_EQ);
5277 /* If the operands are floating-point constants, see if we can fold
5278 the result. */
5279 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
5280 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
5281 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
5283 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
5284 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
5286 /* Comparisons are unordered iff at least one of the values is NaN. */
5287 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
5288 switch (code)
5290 case UNEQ:
5291 case UNLT:
5292 case UNGT:
5293 case UNLE:
5294 case UNGE:
5295 case NE:
5296 case UNORDERED:
5297 return const_true_rtx;
5298 case EQ:
5299 case LT:
5300 case GT:
5301 case LE:
5302 case GE:
5303 case LTGT:
5304 case ORDERED:
5305 return const0_rtx;
5306 default:
5307 return 0;
5310 return comparison_result (code,
5311 (real_equal (d0, d1) ? CMP_EQ :
5312 real_less (d0, d1) ? CMP_LT : CMP_GT));
5315 /* Otherwise, see if the operands are both integers. */
5316 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
5317 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
5319 /* It would be nice if we really had a mode here. However, the
5320 largest int representable on the target is as good as
5321 infinite. */
5322 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
5323 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
5324 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
5326 if (wi::eq_p (ptrueop0, ptrueop1))
5327 return comparison_result (code, CMP_EQ);
5328 else
5330 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
5331 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
5332 return comparison_result (code, cr);
5336 /* Optimize comparisons with upper and lower bounds. */
5337 scalar_int_mode int_mode;
5338 if (CONST_INT_P (trueop1)
5339 && is_a <scalar_int_mode> (mode, &int_mode)
5340 && HWI_COMPUTABLE_MODE_P (int_mode)
5341 && !side_effects_p (trueop0))
5343 int sign;
5344 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
5345 HOST_WIDE_INT val = INTVAL (trueop1);
5346 HOST_WIDE_INT mmin, mmax;
5348 if (code == GEU
5349 || code == LEU
5350 || code == GTU
5351 || code == LTU)
5352 sign = 0;
5353 else
5354 sign = 1;
5356 /* Get a reduced range if the sign bit is zero. */
5357 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
5359 mmin = 0;
5360 mmax = nonzero;
5362 else
5364 rtx mmin_rtx, mmax_rtx;
5365 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
5367 mmin = INTVAL (mmin_rtx);
5368 mmax = INTVAL (mmax_rtx);
5369 if (sign)
5371 unsigned int sign_copies
5372 = num_sign_bit_copies (trueop0, int_mode);
5374 mmin >>= (sign_copies - 1);
5375 mmax >>= (sign_copies - 1);
5379 switch (code)
5381 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5382 case GEU:
5383 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5384 return const_true_rtx;
5385 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5386 return const0_rtx;
5387 break;
5388 case GE:
5389 if (val <= mmin)
5390 return const_true_rtx;
5391 if (val > mmax)
5392 return const0_rtx;
5393 break;
5395 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5396 case LEU:
5397 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5398 return const_true_rtx;
5399 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5400 return const0_rtx;
5401 break;
5402 case LE:
5403 if (val >= mmax)
5404 return const_true_rtx;
5405 if (val < mmin)
5406 return const0_rtx;
5407 break;
5409 case EQ:
5410 /* x == y is always false for y out of range. */
5411 if (val < mmin || val > mmax)
5412 return const0_rtx;
5413 break;
5415 /* x > y is always false for y >= mmax, always true for y < mmin. */
5416 case GTU:
5417 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
5418 return const0_rtx;
5419 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
5420 return const_true_rtx;
5421 break;
5422 case GT:
5423 if (val >= mmax)
5424 return const0_rtx;
5425 if (val < mmin)
5426 return const_true_rtx;
5427 break;
5429 /* x < y is always false for y <= mmin, always true for y > mmax. */
5430 case LTU:
5431 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
5432 return const0_rtx;
5433 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
5434 return const_true_rtx;
5435 break;
5436 case LT:
5437 if (val <= mmin)
5438 return const0_rtx;
5439 if (val > mmax)
5440 return const_true_rtx;
5441 break;
5443 case NE:
5444 /* x != y is always true for y out of range. */
5445 if (val < mmin || val > mmax)
5446 return const_true_rtx;
5447 break;
5449 default:
5450 break;
5454 /* Optimize integer comparisons with zero. */
5455 if (is_a <scalar_int_mode> (mode, &int_mode)
5456 && trueop1 == const0_rtx
5457 && !side_effects_p (trueop0))
5459 /* Some addresses are known to be nonzero. We don't know
5460 their sign, but equality comparisons are known. */
5461 if (nonzero_address_p (trueop0))
5463 if (code == EQ || code == LEU)
5464 return const0_rtx;
5465 if (code == NE || code == GTU)
5466 return const_true_rtx;
5469 /* See if the first operand is an IOR with a constant. If so, we
5470 may be able to determine the result of this comparison. */
5471 if (GET_CODE (op0) == IOR)
5473 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
5474 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
5476 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
5477 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5478 && (UINTVAL (inner_const)
5479 & (HOST_WIDE_INT_1U
5480 << sign_bitnum)));
5482 switch (code)
5484 case EQ:
5485 case LEU:
5486 return const0_rtx;
5487 case NE:
5488 case GTU:
5489 return const_true_rtx;
5490 case LT:
5491 case LE:
5492 if (has_sign)
5493 return const_true_rtx;
5494 break;
5495 case GT:
5496 case GE:
5497 if (has_sign)
5498 return const0_rtx;
5499 break;
5500 default:
5501 break;
5507 /* Optimize comparison of ABS with zero. */
5508 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
5509 && (GET_CODE (trueop0) == ABS
5510 || (GET_CODE (trueop0) == FLOAT_EXTEND
5511 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
5513 switch (code)
5515 case LT:
5516 /* Optimize abs(x) < 0.0. */
5517 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
5518 return const0_rtx;
5519 break;
5521 case GE:
5522 /* Optimize abs(x) >= 0.0. */
5523 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
5524 return const_true_rtx;
5525 break;
5527 case UNGE:
5528 /* Optimize ! (abs(x) < 0.0). */
5529 return const_true_rtx;
5531 default:
5532 break;
5536 return 0;
5539 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
5540 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
5541 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
5542 can be simplified to that or NULL_RTX if not.
5543 Assume X is compared against zero with CMP_CODE and the true
5544 arm is TRUE_VAL and the false arm is FALSE_VAL. */
5546 static rtx
5547 simplify_cond_clz_ctz (rtx x, rtx_code cmp_code, rtx true_val, rtx false_val)
5549 if (cmp_code != EQ && cmp_code != NE)
5550 return NULL_RTX;
5552 /* Result on X == 0 and X !=0 respectively. */
5553 rtx on_zero, on_nonzero;
5554 if (cmp_code == EQ)
5556 on_zero = true_val;
5557 on_nonzero = false_val;
5559 else
5561 on_zero = false_val;
5562 on_nonzero = true_val;
5565 rtx_code op_code = GET_CODE (on_nonzero);
5566 if ((op_code != CLZ && op_code != CTZ)
5567 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
5568 || !CONST_INT_P (on_zero))
5569 return NULL_RTX;
5571 HOST_WIDE_INT op_val;
5572 scalar_int_mode mode ATTRIBUTE_UNUSED
5573 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
5574 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
5575 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
5576 && op_val == INTVAL (on_zero))
5577 return on_nonzero;
5579 return NULL_RTX;
5583 /* Simplify CODE, an operation with result mode MODE and three operands,
5584 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5585 a constant. Return 0 if no simplifications is possible. */
5588 simplify_ternary_operation (enum rtx_code code, machine_mode mode,
5589 machine_mode op0_mode, rtx op0, rtx op1,
5590 rtx op2)
5592 bool any_change = false;
5593 rtx tem, trueop2;
5594 scalar_int_mode int_mode, int_op0_mode;
5595 unsigned int n_elts;
5597 switch (code)
5599 case FMA:
5600 /* Simplify negations around the multiplication. */
5601 /* -a * -b + c => a * b + c. */
5602 if (GET_CODE (op0) == NEG)
5604 tem = simplify_unary_operation (NEG, mode, op1, mode);
5605 if (tem)
5606 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
5608 else if (GET_CODE (op1) == NEG)
5610 tem = simplify_unary_operation (NEG, mode, op0, mode);
5611 if (tem)
5612 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
5615 /* Canonicalize the two multiplication operands. */
5616 /* a * -b + c => -b * a + c. */
5617 if (swap_commutative_operands_p (op0, op1))
5618 std::swap (op0, op1), any_change = true;
5620 if (any_change)
5621 return gen_rtx_FMA (mode, op0, op1, op2);
5622 return NULL_RTX;
5624 case SIGN_EXTRACT:
5625 case ZERO_EXTRACT:
5626 if (CONST_INT_P (op0)
5627 && CONST_INT_P (op1)
5628 && CONST_INT_P (op2)
5629 && is_a <scalar_int_mode> (mode, &int_mode)
5630 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
5631 && HWI_COMPUTABLE_MODE_P (int_mode))
5633 /* Extracting a bit-field from a constant */
5634 unsigned HOST_WIDE_INT val = UINTVAL (op0);
5635 HOST_WIDE_INT op1val = INTVAL (op1);
5636 HOST_WIDE_INT op2val = INTVAL (op2);
5637 if (!BITS_BIG_ENDIAN)
5638 val >>= op2val;
5639 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
5640 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
5641 else
5642 /* Not enough information to calculate the bit position. */
5643 break;
5645 if (HOST_BITS_PER_WIDE_INT != op1val)
5647 /* First zero-extend. */
5648 val &= (HOST_WIDE_INT_1U << op1val) - 1;
5649 /* If desired, propagate sign bit. */
5650 if (code == SIGN_EXTRACT
5651 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
5652 != 0)
5653 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
5656 return gen_int_mode (val, int_mode);
5658 break;
5660 case IF_THEN_ELSE:
5661 if (CONST_INT_P (op0))
5662 return op0 != const0_rtx ? op1 : op2;
5664 /* Convert c ? a : a into "a". */
5665 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
5666 return op1;
5668 /* Convert a != b ? a : b into "a". */
5669 if (GET_CODE (op0) == NE
5670 && ! side_effects_p (op0)
5671 && ! HONOR_NANS (mode)
5672 && ! HONOR_SIGNED_ZEROS (mode)
5673 && ((rtx_equal_p (XEXP (op0, 0), op1)
5674 && rtx_equal_p (XEXP (op0, 1), op2))
5675 || (rtx_equal_p (XEXP (op0, 0), op2)
5676 && rtx_equal_p (XEXP (op0, 1), op1))))
5677 return op1;
5679 /* Convert a == b ? a : b into "b". */
5680 if (GET_CODE (op0) == EQ
5681 && ! side_effects_p (op0)
5682 && ! HONOR_NANS (mode)
5683 && ! HONOR_SIGNED_ZEROS (mode)
5684 && ((rtx_equal_p (XEXP (op0, 0), op1)
5685 && rtx_equal_p (XEXP (op0, 1), op2))
5686 || (rtx_equal_p (XEXP (op0, 0), op2)
5687 && rtx_equal_p (XEXP (op0, 1), op1))))
5688 return op2;
5690 /* Convert (!c) != {0,...,0} ? a : b into
5691 c != {0,...,0} ? b : a for vector modes. */
5692 if (VECTOR_MODE_P (GET_MODE (op1))
5693 && GET_CODE (op0) == NE
5694 && GET_CODE (XEXP (op0, 0)) == NOT
5695 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
5697 rtx cv = XEXP (op0, 1);
5698 int nunits;
5699 bool ok = true;
5700 if (!CONST_VECTOR_NUNITS (cv).is_constant (&nunits))
5701 ok = false;
5702 else
5703 for (int i = 0; i < nunits; ++i)
5704 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
5706 ok = false;
5707 break;
5709 if (ok)
5711 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
5712 XEXP (XEXP (op0, 0), 0),
5713 XEXP (op0, 1));
5714 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
5715 return retval;
5719 /* Convert x == 0 ? N : clz (x) into clz (x) when
5720 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
5721 Similarly for ctz (x). */
5722 if (COMPARISON_P (op0) && !side_effects_p (op0)
5723 && XEXP (op0, 1) == const0_rtx)
5725 rtx simplified
5726 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
5727 op1, op2);
5728 if (simplified)
5729 return simplified;
5732 if (COMPARISON_P (op0) && ! side_effects_p (op0))
5734 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
5735 ? GET_MODE (XEXP (op0, 1))
5736 : GET_MODE (XEXP (op0, 0)));
5737 rtx temp;
5739 /* Look for happy constants in op1 and op2. */
5740 if (CONST_INT_P (op1) && CONST_INT_P (op2))
5742 HOST_WIDE_INT t = INTVAL (op1);
5743 HOST_WIDE_INT f = INTVAL (op2);
5745 if (t == STORE_FLAG_VALUE && f == 0)
5746 code = GET_CODE (op0);
5747 else if (t == 0 && f == STORE_FLAG_VALUE)
5749 enum rtx_code tmp;
5750 tmp = reversed_comparison_code (op0, NULL);
5751 if (tmp == UNKNOWN)
5752 break;
5753 code = tmp;
5755 else
5756 break;
5758 return simplify_gen_relational (code, mode, cmp_mode,
5759 XEXP (op0, 0), XEXP (op0, 1));
5762 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
5763 cmp_mode, XEXP (op0, 0),
5764 XEXP (op0, 1));
5766 /* See if any simplifications were possible. */
5767 if (temp)
5769 if (CONST_INT_P (temp))
5770 return temp == const0_rtx ? op2 : op1;
5771 else if (temp)
5772 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
5775 break;
5777 case VEC_MERGE:
5778 gcc_assert (GET_MODE (op0) == mode);
5779 gcc_assert (GET_MODE (op1) == mode);
5780 gcc_assert (VECTOR_MODE_P (mode));
5781 trueop2 = avoid_constant_pool_reference (op2);
5782 if (CONST_INT_P (trueop2)
5783 && GET_MODE_NUNITS (mode).is_constant (&n_elts))
5785 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
5786 unsigned HOST_WIDE_INT mask;
5787 if (n_elts == HOST_BITS_PER_WIDE_INT)
5788 mask = -1;
5789 else
5790 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
5792 if (!(sel & mask) && !side_effects_p (op0))
5793 return op1;
5794 if ((sel & mask) == mask && !side_effects_p (op1))
5795 return op0;
5797 rtx trueop0 = avoid_constant_pool_reference (op0);
5798 rtx trueop1 = avoid_constant_pool_reference (op1);
5799 if (GET_CODE (trueop0) == CONST_VECTOR
5800 && GET_CODE (trueop1) == CONST_VECTOR)
5802 rtvec v = rtvec_alloc (n_elts);
5803 unsigned int i;
5805 for (i = 0; i < n_elts; i++)
5806 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
5807 ? CONST_VECTOR_ELT (trueop0, i)
5808 : CONST_VECTOR_ELT (trueop1, i));
5809 return gen_rtx_CONST_VECTOR (mode, v);
5812 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5813 if no element from a appears in the result. */
5814 if (GET_CODE (op0) == VEC_MERGE)
5816 tem = avoid_constant_pool_reference (XEXP (op0, 2));
5817 if (CONST_INT_P (tem))
5819 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
5820 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
5821 return simplify_gen_ternary (code, mode, mode,
5822 XEXP (op0, 1), op1, op2);
5823 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
5824 return simplify_gen_ternary (code, mode, mode,
5825 XEXP (op0, 0), op1, op2);
5828 if (GET_CODE (op1) == VEC_MERGE)
5830 tem = avoid_constant_pool_reference (XEXP (op1, 2));
5831 if (CONST_INT_P (tem))
5833 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
5834 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
5835 return simplify_gen_ternary (code, mode, mode,
5836 op0, XEXP (op1, 1), op2);
5837 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
5838 return simplify_gen_ternary (code, mode, mode,
5839 op0, XEXP (op1, 0), op2);
5843 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5844 with a. */
5845 if (GET_CODE (op0) == VEC_DUPLICATE
5846 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
5847 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
5848 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0, 0))), 1))
5850 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
5851 if (CONST_INT_P (tem) && CONST_INT_P (op2))
5853 if (XEXP (XEXP (op0, 0), 0) == op1
5854 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
5855 return op1;
5858 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
5859 (const_int N))
5860 with (vec_concat (X) (B)) if N == 1 or
5861 (vec_concat (A) (X)) if N == 2. */
5862 if (GET_CODE (op0) == VEC_DUPLICATE
5863 && GET_CODE (op1) == CONST_VECTOR
5864 && known_eq (CONST_VECTOR_NUNITS (op1), 2)
5865 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
5866 && IN_RANGE (sel, 1, 2))
5868 rtx newop0 = XEXP (op0, 0);
5869 rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
5870 if (sel == 2)
5871 std::swap (newop0, newop1);
5872 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5874 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
5875 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
5876 Only applies for vectors of two elements. */
5877 if (GET_CODE (op0) == VEC_DUPLICATE
5878 && GET_CODE (op1) == VEC_CONCAT
5879 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
5880 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
5881 && IN_RANGE (sel, 1, 2))
5883 rtx newop0 = XEXP (op0, 0);
5884 rtx newop1 = XEXP (op1, 2 - sel);
5885 rtx otherop = XEXP (op1, sel - 1);
5886 if (sel == 2)
5887 std::swap (newop0, newop1);
5888 /* Don't want to throw away the other part of the vec_concat if
5889 it has side-effects. */
5890 if (!side_effects_p (otherop))
5891 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5894 /* Replace:
5896 (vec_merge:outer (vec_duplicate:outer x:inner)
5897 (subreg:outer y:inner 0)
5898 (const_int N))
5900 with (vec_concat:outer x:inner y:inner) if N == 1,
5901 or (vec_concat:outer y:inner x:inner) if N == 2.
5903 Implicitly, this means we have a paradoxical subreg, but such
5904 a check is cheap, so make it anyway.
5906 Only applies for vectors of two elements. */
5907 if (GET_CODE (op0) == VEC_DUPLICATE
5908 && GET_CODE (op1) == SUBREG
5909 && GET_MODE (op1) == GET_MODE (op0)
5910 && GET_MODE (SUBREG_REG (op1)) == GET_MODE (XEXP (op0, 0))
5911 && paradoxical_subreg_p (op1)
5912 && subreg_lowpart_p (op1)
5913 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
5914 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
5915 && IN_RANGE (sel, 1, 2))
5917 rtx newop0 = XEXP (op0, 0);
5918 rtx newop1 = SUBREG_REG (op1);
5919 if (sel == 2)
5920 std::swap (newop0, newop1);
5921 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5924 /* Same as above but with switched operands:
5925 Replace (vec_merge:outer (subreg:outer x:inner 0)
5926 (vec_duplicate:outer y:inner)
5927 (const_int N))
5929 with (vec_concat:outer x:inner y:inner) if N == 1,
5930 or (vec_concat:outer y:inner x:inner) if N == 2. */
5931 if (GET_CODE (op1) == VEC_DUPLICATE
5932 && GET_CODE (op0) == SUBREG
5933 && GET_MODE (op0) == GET_MODE (op1)
5934 && GET_MODE (SUBREG_REG (op0)) == GET_MODE (XEXP (op1, 0))
5935 && paradoxical_subreg_p (op0)
5936 && subreg_lowpart_p (op0)
5937 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
5938 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
5939 && IN_RANGE (sel, 1, 2))
5941 rtx newop0 = SUBREG_REG (op0);
5942 rtx newop1 = XEXP (op1, 0);
5943 if (sel == 2)
5944 std::swap (newop0, newop1);
5945 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5948 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
5949 (const_int n))
5950 with (vec_concat x y) or (vec_concat y x) depending on value
5951 of N. */
5952 if (GET_CODE (op0) == VEC_DUPLICATE
5953 && GET_CODE (op1) == VEC_DUPLICATE
5954 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
5955 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
5956 && IN_RANGE (sel, 1, 2))
5958 rtx newop0 = XEXP (op0, 0);
5959 rtx newop1 = XEXP (op1, 0);
5960 if (sel == 2)
5961 std::swap (newop0, newop1);
5963 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
5967 if (rtx_equal_p (op0, op1)
5968 && !side_effects_p (op2) && !side_effects_p (op1))
5969 return op0;
5971 break;
5973 default:
5974 gcc_unreachable ();
5977 return 0;
5980 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5981 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5982 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5984 Works by unpacking INNER_BYTES bytes of OP into a collection of 8-bit values
5985 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5986 and then repacking them again for OUTERMODE. If OP is a CONST_VECTOR,
5987 FIRST_ELEM is the number of the first element to extract, otherwise
5988 FIRST_ELEM is ignored. */
5990 static rtx
5991 simplify_immed_subreg (fixed_size_mode outermode, rtx op,
5992 machine_mode innermode, unsigned int byte,
5993 unsigned int first_elem, unsigned int inner_bytes)
5995 enum {
5996 value_bit = 8,
5997 value_mask = (1 << value_bit) - 1
5999 unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
6000 int value_start;
6001 int i;
6002 int elem;
6004 int num_elem;
6005 rtx * elems;
6006 int elem_bitsize;
6007 rtx result_s = NULL;
6008 rtvec result_v = NULL;
6009 enum mode_class outer_class;
6010 scalar_mode outer_submode;
6011 int max_bitsize;
6013 /* Some ports misuse CCmode. */
6014 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
6015 return op;
6017 /* We have no way to represent a complex constant at the rtl level. */
6018 if (COMPLEX_MODE_P (outermode))
6019 return NULL_RTX;
6021 /* We support any size mode. */
6022 max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
6023 inner_bytes * BITS_PER_UNIT);
6025 /* Unpack the value. */
6027 if (GET_CODE (op) == CONST_VECTOR)
6029 num_elem = CEIL (inner_bytes, GET_MODE_UNIT_SIZE (innermode));
6030 elem_bitsize = GET_MODE_UNIT_BITSIZE (innermode);
6032 else
6034 num_elem = 1;
6035 elem_bitsize = max_bitsize;
6037 /* If this asserts, it is too complicated; reducing value_bit may help. */
6038 gcc_assert (BITS_PER_UNIT % value_bit == 0);
6039 /* I don't know how to handle endianness of sub-units. */
6040 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
6042 for (elem = 0; elem < num_elem; elem++)
6044 unsigned char * vp;
6045 rtx el = (GET_CODE (op) == CONST_VECTOR
6046 ? CONST_VECTOR_ELT (op, first_elem + elem)
6047 : op);
6049 /* Vectors are kept in target memory order. (This is probably
6050 a mistake.) */
6052 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6053 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6054 / BITS_PER_UNIT);
6055 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6056 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6057 unsigned bytele = (subword_byte % UNITS_PER_WORD
6058 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6059 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
6062 switch (GET_CODE (el))
6064 case CONST_INT:
6065 for (i = 0;
6066 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6067 i += value_bit)
6068 *vp++ = INTVAL (el) >> i;
6069 /* CONST_INTs are always logically sign-extended. */
6070 for (; i < elem_bitsize; i += value_bit)
6071 *vp++ = INTVAL (el) < 0 ? -1 : 0;
6072 break;
6074 case CONST_WIDE_INT:
6076 rtx_mode_t val = rtx_mode_t (el, GET_MODE_INNER (innermode));
6077 unsigned char extend = wi::sign_mask (val);
6078 int prec = wi::get_precision (val);
6080 for (i = 0; i < prec && i < elem_bitsize; i += value_bit)
6081 *vp++ = wi::extract_uhwi (val, i, value_bit);
6082 for (; i < elem_bitsize; i += value_bit)
6083 *vp++ = extend;
6085 break;
6087 case CONST_DOUBLE:
6088 if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
6090 unsigned char extend = 0;
6091 /* If this triggers, someone should have generated a
6092 CONST_INT instead. */
6093 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
6095 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6096 *vp++ = CONST_DOUBLE_LOW (el) >> i;
6097 while (i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize)
6099 *vp++
6100 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
6101 i += value_bit;
6104 if (CONST_DOUBLE_HIGH (el) >> (HOST_BITS_PER_WIDE_INT - 1))
6105 extend = -1;
6106 for (; i < elem_bitsize; i += value_bit)
6107 *vp++ = extend;
6109 else
6111 /* This is big enough for anything on the platform. */
6112 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
6113 scalar_float_mode el_mode;
6115 el_mode = as_a <scalar_float_mode> (GET_MODE (el));
6116 int bitsize = GET_MODE_BITSIZE (el_mode);
6118 gcc_assert (bitsize <= elem_bitsize);
6119 gcc_assert (bitsize % value_bit == 0);
6121 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
6122 GET_MODE (el));
6124 /* real_to_target produces its result in words affected by
6125 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6126 and use WORDS_BIG_ENDIAN instead; see the documentation
6127 of SUBREG in rtl.texi. */
6128 for (i = 0; i < bitsize; i += value_bit)
6130 int ibase;
6131 if (WORDS_BIG_ENDIAN)
6132 ibase = bitsize - 1 - i;
6133 else
6134 ibase = i;
6135 *vp++ = tmp[ibase / 32] >> i % 32;
6138 /* It shouldn't matter what's done here, so fill it with
6139 zero. */
6140 for (; i < elem_bitsize; i += value_bit)
6141 *vp++ = 0;
6143 break;
6145 case CONST_FIXED:
6146 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
6148 for (i = 0; i < elem_bitsize; i += value_bit)
6149 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6151 else
6153 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
6154 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
6155 for (; i < HOST_BITS_PER_DOUBLE_INT && i < elem_bitsize;
6156 i += value_bit)
6157 *vp++ = CONST_FIXED_VALUE_HIGH (el)
6158 >> (i - HOST_BITS_PER_WIDE_INT);
6159 for (; i < elem_bitsize; i += value_bit)
6160 *vp++ = 0;
6162 break;
6164 default:
6165 gcc_unreachable ();
6169 /* Now, pick the right byte to start with. */
6170 /* Renumber BYTE so that the least-significant byte is byte 0. A special
6171 case is paradoxical SUBREGs, which shouldn't be adjusted since they
6172 will already have offset 0. */
6173 if (inner_bytes >= GET_MODE_SIZE (outermode))
6175 unsigned ibyte = inner_bytes - GET_MODE_SIZE (outermode) - byte;
6176 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6177 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6178 byte = (subword_byte % UNITS_PER_WORD
6179 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6182 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
6183 so if it's become negative it will instead be very large.) */
6184 gcc_assert (byte < inner_bytes);
6186 /* Convert from bytes to chunks of size value_bit. */
6187 value_start = byte * (BITS_PER_UNIT / value_bit);
6189 /* Re-pack the value. */
6190 num_elem = GET_MODE_NUNITS (outermode);
6192 if (VECTOR_MODE_P (outermode))
6194 result_v = rtvec_alloc (num_elem);
6195 elems = &RTVEC_ELT (result_v, 0);
6197 else
6198 elems = &result_s;
6200 outer_submode = GET_MODE_INNER (outermode);
6201 outer_class = GET_MODE_CLASS (outer_submode);
6202 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
6204 gcc_assert (elem_bitsize % value_bit == 0);
6205 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
6207 for (elem = 0; elem < num_elem; elem++)
6209 unsigned char *vp;
6211 /* Vectors are stored in target memory order. (This is probably
6212 a mistake.) */
6214 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
6215 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
6216 / BITS_PER_UNIT);
6217 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
6218 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
6219 unsigned bytele = (subword_byte % UNITS_PER_WORD
6220 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
6221 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
6224 switch (outer_class)
6226 case MODE_INT:
6227 case MODE_PARTIAL_INT:
6229 int u;
6230 int base = 0;
6231 int units
6232 = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
6233 / HOST_BITS_PER_WIDE_INT;
6234 HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
6235 wide_int r;
6237 if (GET_MODE_PRECISION (outer_submode) > MAX_BITSIZE_MODE_ANY_INT)
6238 return NULL_RTX;
6239 for (u = 0; u < units; u++)
6241 unsigned HOST_WIDE_INT buf = 0;
6242 for (i = 0;
6243 i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
6244 i += value_bit)
6245 buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6247 tmp[u] = buf;
6248 base += HOST_BITS_PER_WIDE_INT;
6250 r = wide_int::from_array (tmp, units,
6251 GET_MODE_PRECISION (outer_submode));
6252 #if TARGET_SUPPORTS_WIDE_INT == 0
6253 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
6254 if (wi::min_precision (r, SIGNED) > HOST_BITS_PER_DOUBLE_INT)
6255 return NULL_RTX;
6256 #endif
6257 elems[elem] = immed_wide_int_const (r, outer_submode);
6259 break;
6261 case MODE_FLOAT:
6262 case MODE_DECIMAL_FLOAT:
6264 REAL_VALUE_TYPE r;
6265 long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32] = { 0 };
6267 /* real_from_target wants its input in words affected by
6268 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
6269 and use WORDS_BIG_ENDIAN instead; see the documentation
6270 of SUBREG in rtl.texi. */
6271 for (i = 0; i < elem_bitsize; i += value_bit)
6273 int ibase;
6274 if (WORDS_BIG_ENDIAN)
6275 ibase = elem_bitsize - 1 - i;
6276 else
6277 ibase = i;
6278 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
6281 real_from_target (&r, tmp, outer_submode);
6282 elems[elem] = const_double_from_real_value (r, outer_submode);
6284 break;
6286 case MODE_FRACT:
6287 case MODE_UFRACT:
6288 case MODE_ACCUM:
6289 case MODE_UACCUM:
6291 FIXED_VALUE_TYPE f;
6292 f.data.low = 0;
6293 f.data.high = 0;
6294 f.mode = outer_submode;
6296 for (i = 0;
6297 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
6298 i += value_bit)
6299 f.data.low |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
6300 for (; i < elem_bitsize; i += value_bit)
6301 f.data.high |= ((unsigned HOST_WIDE_INT)(*vp++ & value_mask)
6302 << (i - HOST_BITS_PER_WIDE_INT));
6304 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
6306 break;
6308 default:
6309 gcc_unreachable ();
6312 if (VECTOR_MODE_P (outermode))
6313 return gen_rtx_CONST_VECTOR (outermode, result_v);
6314 else
6315 return result_s;
6318 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
6319 Return 0 if no simplifications are possible. */
6321 simplify_subreg (machine_mode outermode, rtx op,
6322 machine_mode innermode, poly_uint64 byte)
6324 /* Little bit of sanity checking. */
6325 gcc_assert (innermode != VOIDmode);
6326 gcc_assert (outermode != VOIDmode);
6327 gcc_assert (innermode != BLKmode);
6328 gcc_assert (outermode != BLKmode);
6330 gcc_assert (GET_MODE (op) == innermode
6331 || GET_MODE (op) == VOIDmode);
6333 poly_uint64 outersize = GET_MODE_SIZE (outermode);
6334 if (!multiple_p (byte, outersize))
6335 return NULL_RTX;
6337 poly_uint64 innersize = GET_MODE_SIZE (innermode);
6338 if (maybe_ge (byte, innersize))
6339 return NULL_RTX;
6341 if (outermode == innermode && known_eq (byte, 0U))
6342 return op;
6344 if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
6346 rtx elt;
6348 if (VECTOR_MODE_P (outermode)
6349 && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
6350 && vec_duplicate_p (op, &elt))
6351 return gen_vec_duplicate (outermode, elt);
6353 if (outermode == GET_MODE_INNER (innermode)
6354 && vec_duplicate_p (op, &elt))
6355 return elt;
6358 if (CONST_SCALAR_INT_P (op)
6359 || CONST_DOUBLE_AS_FLOAT_P (op)
6360 || CONST_FIXED_P (op)
6361 || GET_CODE (op) == CONST_VECTOR)
6363 /* simplify_immed_subreg deconstructs OP into bytes and constructs
6364 the result from bytes, so it only works if the sizes of the modes
6365 and the value of the offset are known at compile time. Cases that
6366 that apply to general modes and offsets should be handled here
6367 before calling simplify_immed_subreg. */
6368 fixed_size_mode fs_outermode, fs_innermode;
6369 unsigned HOST_WIDE_INT cbyte;
6370 if (is_a <fixed_size_mode> (outermode, &fs_outermode)
6371 && is_a <fixed_size_mode> (innermode, &fs_innermode)
6372 && byte.is_constant (&cbyte))
6373 return simplify_immed_subreg (fs_outermode, op, fs_innermode, cbyte,
6374 0, GET_MODE_SIZE (fs_innermode));
6376 /* Handle constant-sized outer modes and variable-sized inner modes. */
6377 unsigned HOST_WIDE_INT first_elem;
6378 if (GET_CODE (op) == CONST_VECTOR
6379 && is_a <fixed_size_mode> (outermode, &fs_outermode)
6380 && constant_multiple_p (byte, GET_MODE_UNIT_SIZE (innermode),
6381 &first_elem))
6382 return simplify_immed_subreg (fs_outermode, op, innermode, 0,
6383 first_elem,
6384 GET_MODE_SIZE (fs_outermode));
6386 return NULL_RTX;
6389 /* Changing mode twice with SUBREG => just change it once,
6390 or not at all if changing back op starting mode. */
6391 if (GET_CODE (op) == SUBREG)
6393 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
6394 poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode);
6395 rtx newx;
6397 if (outermode == innermostmode
6398 && known_eq (byte, 0U)
6399 && known_eq (SUBREG_BYTE (op), 0))
6400 return SUBREG_REG (op);
6402 /* Work out the memory offset of the final OUTERMODE value relative
6403 to the inner value of OP. */
6404 poly_int64 mem_offset = subreg_memory_offset (outermode,
6405 innermode, byte);
6406 poly_int64 op_mem_offset = subreg_memory_offset (op);
6407 poly_int64 final_offset = mem_offset + op_mem_offset;
6409 /* See whether resulting subreg will be paradoxical. */
6410 if (!paradoxical_subreg_p (outermode, innermostmode))
6412 /* Bail out in case resulting subreg would be incorrect. */
6413 if (maybe_lt (final_offset, 0)
6414 || maybe_ge (poly_uint64 (final_offset), innermostsize)
6415 || !multiple_p (final_offset, outersize))
6416 return NULL_RTX;
6418 else
6420 poly_int64 required_offset = subreg_memory_offset (outermode,
6421 innermostmode, 0);
6422 if (maybe_ne (final_offset, required_offset))
6423 return NULL_RTX;
6424 /* Paradoxical subregs always have byte offset 0. */
6425 final_offset = 0;
6428 /* Recurse for further possible simplifications. */
6429 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
6430 final_offset);
6431 if (newx)
6432 return newx;
6433 if (validate_subreg (outermode, innermostmode,
6434 SUBREG_REG (op), final_offset))
6436 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
6437 if (SUBREG_PROMOTED_VAR_P (op)
6438 && SUBREG_PROMOTED_SIGN (op) >= 0
6439 && GET_MODE_CLASS (outermode) == MODE_INT
6440 && known_ge (outersize, innersize)
6441 && known_le (outersize, innermostsize)
6442 && subreg_lowpart_p (newx))
6444 SUBREG_PROMOTED_VAR_P (newx) = 1;
6445 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
6447 return newx;
6449 return NULL_RTX;
6452 /* SUBREG of a hard register => just change the register number
6453 and/or mode. If the hard register is not valid in that mode,
6454 suppress this simplification. If the hard register is the stack,
6455 frame, or argument pointer, leave this as a SUBREG. */
6457 if (REG_P (op) && HARD_REGISTER_P (op))
6459 unsigned int regno, final_regno;
6461 regno = REGNO (op);
6462 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
6463 if (HARD_REGISTER_NUM_P (final_regno))
6465 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
6466 subreg_memory_offset (outermode,
6467 innermode, byte));
6469 /* Propagate original regno. We don't have any way to specify
6470 the offset inside original regno, so do so only for lowpart.
6471 The information is used only by alias analysis that can not
6472 grog partial register anyway. */
6474 if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
6475 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
6476 return x;
6480 /* If we have a SUBREG of a register that we are replacing and we are
6481 replacing it with a MEM, make a new MEM and try replacing the
6482 SUBREG with it. Don't do this if the MEM has a mode-dependent address
6483 or if we would be widening it. */
6485 if (MEM_P (op)
6486 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
6487 /* Allow splitting of volatile memory references in case we don't
6488 have instruction to move the whole thing. */
6489 && (! MEM_VOLATILE_P (op)
6490 || ! have_insn_for (SET, innermode))
6491 && known_le (outersize, innersize))
6492 return adjust_address_nv (op, outermode, byte);
6494 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
6495 of two parts. */
6496 if (GET_CODE (op) == CONCAT
6497 || GET_CODE (op) == VEC_CONCAT)
6499 poly_uint64 final_offset;
6500 rtx part, res;
6502 machine_mode part_mode = GET_MODE (XEXP (op, 0));
6503 if (part_mode == VOIDmode)
6504 part_mode = GET_MODE_INNER (GET_MODE (op));
6505 poly_uint64 part_size = GET_MODE_SIZE (part_mode);
6506 if (known_lt (byte, part_size))
6508 part = XEXP (op, 0);
6509 final_offset = byte;
6511 else if (known_ge (byte, part_size))
6513 part = XEXP (op, 1);
6514 final_offset = byte - part_size;
6516 else
6517 return NULL_RTX;
6519 if (maybe_gt (final_offset + outersize, part_size))
6520 return NULL_RTX;
6522 part_mode = GET_MODE (part);
6523 if (part_mode == VOIDmode)
6524 part_mode = GET_MODE_INNER (GET_MODE (op));
6525 res = simplify_subreg (outermode, part, part_mode, final_offset);
6526 if (res)
6527 return res;
6528 if (validate_subreg (outermode, part_mode, part, final_offset))
6529 return gen_rtx_SUBREG (outermode, part, final_offset);
6530 return NULL_RTX;
6533 /* A SUBREG resulting from a zero extension may fold to zero if
6534 it extracts higher bits that the ZERO_EXTEND's source bits. */
6535 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
6537 poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
6538 if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
6539 return CONST0_RTX (outermode);
6542 scalar_int_mode int_outermode, int_innermode;
6543 if (is_a <scalar_int_mode> (outermode, &int_outermode)
6544 && is_a <scalar_int_mode> (innermode, &int_innermode)
6545 && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
6547 /* Handle polynomial integers. The upper bits of a paradoxical
6548 subreg are undefined, so this is safe regardless of whether
6549 we're truncating or extending. */
6550 if (CONST_POLY_INT_P (op))
6552 poly_wide_int val
6553 = poly_wide_int::from (const_poly_int_value (op),
6554 GET_MODE_PRECISION (int_outermode),
6555 SIGNED);
6556 return immed_wide_int_const (val, int_outermode);
6559 if (GET_MODE_PRECISION (int_outermode)
6560 < GET_MODE_PRECISION (int_innermode))
6562 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
6563 if (tem)
6564 return tem;
6568 return NULL_RTX;
6571 /* Make a SUBREG operation or equivalent if it folds. */
6574 simplify_gen_subreg (machine_mode outermode, rtx op,
6575 machine_mode innermode, poly_uint64 byte)
6577 rtx newx;
6579 newx = simplify_subreg (outermode, op, innermode, byte);
6580 if (newx)
6581 return newx;
6583 if (GET_CODE (op) == SUBREG
6584 || GET_CODE (op) == CONCAT
6585 || GET_MODE (op) == VOIDmode)
6586 return NULL_RTX;
6588 if (validate_subreg (outermode, innermode, op, byte))
6589 return gen_rtx_SUBREG (outermode, op, byte);
6591 return NULL_RTX;
6594 /* Generates a subreg to get the least significant part of EXPR (in mode
6595 INNER_MODE) to OUTER_MODE. */
6598 lowpart_subreg (machine_mode outer_mode, rtx expr,
6599 machine_mode inner_mode)
6601 return simplify_gen_subreg (outer_mode, expr, inner_mode,
6602 subreg_lowpart_offset (outer_mode, inner_mode));
6605 /* Simplify X, an rtx expression.
6607 Return the simplified expression or NULL if no simplifications
6608 were possible.
6610 This is the preferred entry point into the simplification routines;
6611 however, we still allow passes to call the more specific routines.
6613 Right now GCC has three (yes, three) major bodies of RTL simplification
6614 code that need to be unified.
6616 1. fold_rtx in cse.c. This code uses various CSE specific
6617 information to aid in RTL simplification.
6619 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6620 it uses combine specific information to aid in RTL
6621 simplification.
6623 3. The routines in this file.
6626 Long term we want to only have one body of simplification code; to
6627 get to that state I recommend the following steps:
6629 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6630 which are not pass dependent state into these routines.
6632 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6633 use this routine whenever possible.
6635 3. Allow for pass dependent state to be provided to these
6636 routines and add simplifications based on the pass dependent
6637 state. Remove code from cse.c & combine.c that becomes
6638 redundant/dead.
6640 It will take time, but ultimately the compiler will be easier to
6641 maintain and improve. It's totally silly that when we add a
6642 simplification that it needs to be added to 4 places (3 for RTL
6643 simplification and 1 for tree simplification. */
6646 simplify_rtx (const_rtx x)
6648 const enum rtx_code code = GET_CODE (x);
6649 const machine_mode mode = GET_MODE (x);
6651 switch (GET_RTX_CLASS (code))
6653 case RTX_UNARY:
6654 return simplify_unary_operation (code, mode,
6655 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
6656 case RTX_COMM_ARITH:
6657 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
6658 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
6660 /* Fall through. */
6662 case RTX_BIN_ARITH:
6663 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
6665 case RTX_TERNARY:
6666 case RTX_BITFIELD_OPS:
6667 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
6668 XEXP (x, 0), XEXP (x, 1),
6669 XEXP (x, 2));
6671 case RTX_COMPARE:
6672 case RTX_COMM_COMPARE:
6673 return simplify_relational_operation (code, mode,
6674 ((GET_MODE (XEXP (x, 0))
6675 != VOIDmode)
6676 ? GET_MODE (XEXP (x, 0))
6677 : GET_MODE (XEXP (x, 1))),
6678 XEXP (x, 0),
6679 XEXP (x, 1));
6681 case RTX_EXTRA:
6682 if (code == SUBREG)
6683 return simplify_subreg (mode, SUBREG_REG (x),
6684 GET_MODE (SUBREG_REG (x)),
6685 SUBREG_BYTE (x));
6686 break;
6688 case RTX_OBJ:
6689 if (code == LO_SUM)
6691 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6692 if (GET_CODE (XEXP (x, 0)) == HIGH
6693 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6694 return XEXP (x, 1);
6696 break;
6698 default:
6699 break;
6701 return NULL;
6704 #if CHECKING_P
6706 namespace selftest {
6708 /* Make a unique pseudo REG of mode MODE for use by selftests. */
6710 static rtx
6711 make_test_reg (machine_mode mode)
6713 static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
6715 return gen_rtx_REG (mode, test_reg_num++);
6718 /* Test vector simplifications involving VEC_DUPLICATE in which the
6719 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6720 register that holds one element of MODE. */
6722 static void
6723 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
6725 scalar_mode inner_mode = GET_MODE_INNER (mode);
6726 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6727 poly_uint64 nunits = GET_MODE_NUNITS (mode);
6728 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
6730 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
6731 rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
6732 rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
6733 ASSERT_RTX_EQ (duplicate,
6734 simplify_unary_operation (NOT, mode,
6735 duplicate_not, mode));
6737 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6738 rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
6739 ASSERT_RTX_EQ (duplicate,
6740 simplify_unary_operation (NEG, mode,
6741 duplicate_neg, mode));
6743 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
6744 ASSERT_RTX_EQ (duplicate,
6745 simplify_binary_operation (PLUS, mode, duplicate,
6746 CONST0_RTX (mode)));
6748 ASSERT_RTX_EQ (duplicate,
6749 simplify_binary_operation (MINUS, mode, duplicate,
6750 CONST0_RTX (mode)));
6752 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
6753 simplify_binary_operation (MINUS, mode, duplicate,
6754 duplicate));
6757 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
6758 rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
6759 ASSERT_RTX_PTR_EQ (scalar_reg,
6760 simplify_binary_operation (VEC_SELECT, inner_mode,
6761 duplicate, zero_par));
6763 /* And again with the final element. */
6764 unsigned HOST_WIDE_INT const_nunits;
6765 if (nunits.is_constant (&const_nunits))
6767 rtx last_index = gen_int_mode (const_nunits - 1, word_mode);
6768 rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
6769 ASSERT_RTX_PTR_EQ (scalar_reg,
6770 simplify_binary_operation (VEC_SELECT, inner_mode,
6771 duplicate, last_par));
6774 /* Test a scalar subreg of a VEC_DUPLICATE. */
6775 poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
6776 ASSERT_RTX_EQ (scalar_reg,
6777 simplify_gen_subreg (inner_mode, duplicate,
6778 mode, offset));
6780 machine_mode narrower_mode;
6781 if (maybe_ne (nunits, 2U)
6782 && multiple_p (nunits, 2)
6783 && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
6784 && VECTOR_MODE_P (narrower_mode))
6786 /* Test VEC_SELECT of a vector. */
6787 rtx vec_par
6788 = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
6789 rtx narrower_duplicate
6790 = gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
6791 ASSERT_RTX_EQ (narrower_duplicate,
6792 simplify_binary_operation (VEC_SELECT, narrower_mode,
6793 duplicate, vec_par));
6795 /* Test a vector subreg of a VEC_DUPLICATE. */
6796 poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
6797 ASSERT_RTX_EQ (narrower_duplicate,
6798 simplify_gen_subreg (narrower_mode, duplicate,
6799 mode, offset));
6803 /* Test vector simplifications involving VEC_SERIES in which the
6804 operands and result have vector mode MODE. SCALAR_REG is a pseudo
6805 register that holds one element of MODE. */
6807 static void
6808 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
6810 /* Test unary cases with VEC_SERIES arguments. */
6811 scalar_mode inner_mode = GET_MODE_INNER (mode);
6812 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
6813 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
6814 rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
6815 rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
6816 rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
6817 rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
6818 rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
6819 rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
6820 neg_scalar_reg);
6821 ASSERT_RTX_EQ (series_0_r,
6822 simplify_unary_operation (NEG, mode, series_0_nr, mode));
6823 ASSERT_RTX_EQ (series_r_m1,
6824 simplify_unary_operation (NEG, mode, series_nr_1, mode));
6825 ASSERT_RTX_EQ (series_r_r,
6826 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
6828 /* Test that a VEC_SERIES with a zero step is simplified away. */
6829 ASSERT_RTX_EQ (duplicate,
6830 simplify_binary_operation (VEC_SERIES, mode,
6831 scalar_reg, const0_rtx));
6833 /* Test PLUS and MINUS with VEC_SERIES. */
6834 rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
6835 rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
6836 rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
6837 ASSERT_RTX_EQ (series_r_r,
6838 simplify_binary_operation (PLUS, mode, series_0_r,
6839 duplicate));
6840 ASSERT_RTX_EQ (series_r_1,
6841 simplify_binary_operation (PLUS, mode, duplicate,
6842 series_0_1));
6843 ASSERT_RTX_EQ (series_r_m1,
6844 simplify_binary_operation (PLUS, mode, duplicate,
6845 series_0_m1));
6846 ASSERT_RTX_EQ (series_0_r,
6847 simplify_binary_operation (MINUS, mode, series_r_r,
6848 duplicate));
6849 ASSERT_RTX_EQ (series_r_m1,
6850 simplify_binary_operation (MINUS, mode, duplicate,
6851 series_0_1));
6852 ASSERT_RTX_EQ (series_r_1,
6853 simplify_binary_operation (MINUS, mode, duplicate,
6854 series_0_m1));
6855 ASSERT_RTX_EQ (series_0_m1,
6856 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
6857 constm1_rtx));
6860 /* Verify some simplifications involving vectors. */
6862 static void
6863 test_vector_ops ()
6865 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
6867 machine_mode mode = (machine_mode) i;
6868 if (VECTOR_MODE_P (mode))
6870 rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
6871 test_vector_ops_duplicate (mode, scalar_reg);
6872 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
6873 && maybe_gt (GET_MODE_NUNITS (mode), 2))
6874 test_vector_ops_series (mode, scalar_reg);
6879 template<unsigned int N>
6880 struct simplify_const_poly_int_tests
6882 static void run ();
6885 template<>
6886 struct simplify_const_poly_int_tests<1>
6888 static void run () {}
6891 /* Test various CONST_POLY_INT properties. */
6893 template<unsigned int N>
6894 void
6895 simplify_const_poly_int_tests<N>::run ()
6897 rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
6898 rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
6899 rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
6900 rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
6901 rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
6902 rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
6903 rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
6904 rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
6905 rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
6906 rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
6907 rtx two = GEN_INT (2);
6908 rtx six = GEN_INT (6);
6909 poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
6911 /* These tests only try limited operation combinations. Fuller arithmetic
6912 testing is done directly on poly_ints. */
6913 ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
6914 ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
6915 ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
6916 ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
6917 ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
6918 ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
6919 ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
6920 ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
6921 ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
6922 ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
6923 ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
6926 /* Run all of the selftests within this file. */
6928 void
6929 simplify_rtx_c_tests ()
6931 test_vector_ops ();
6932 simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
6935 } // namespace selftest
6937 #endif /* CHECKING_P */