Fix warnings building linux-atomic.c and fptr.c on hppa64-linux
[official-gcc.git] / gcc / simplify-rtx.c
blobbbbd6b74942491e9861ce2928f01b9aa7eeab031
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "optabs.h"
31 #include "emit-rtl.h"
32 #include "recog.h"
33 #include "diagnostic-core.h"
34 #include "varasm.h"
35 #include "flags.h"
36 #include "selftest.h"
37 #include "selftest-rtl.h"
38 #include "rtx-vector-builder.h"
39 #include "rtlanal.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
46 signed wide int. */
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
50 static bool plus_minus_operand_p (const_rtx);
52 /* Negate I, which satisfies poly_int_rtx_p. MODE is the mode of I. */
54 static rtx
55 neg_poly_int_rtx (machine_mode mode, const_rtx i)
57 return immed_wide_int_const (-wi::to_poly_wide (i, mode), mode);
60 /* Test whether expression, X, is an immediate constant that represents
61 the most significant bit of machine mode MODE. */
63 bool
64 mode_signbit_p (machine_mode mode, const_rtx x)
66 unsigned HOST_WIDE_INT val;
67 unsigned int width;
68 scalar_int_mode int_mode;
70 if (!is_int_mode (mode, &int_mode))
71 return false;
73 width = GET_MODE_PRECISION (int_mode);
74 if (width == 0)
75 return false;
77 if (width <= HOST_BITS_PER_WIDE_INT
78 && CONST_INT_P (x))
79 val = INTVAL (x);
80 #if TARGET_SUPPORTS_WIDE_INT
81 else if (CONST_WIDE_INT_P (x))
83 unsigned int i;
84 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
85 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
86 return false;
87 for (i = 0; i < elts - 1; i++)
88 if (CONST_WIDE_INT_ELT (x, i) != 0)
89 return false;
90 val = CONST_WIDE_INT_ELT (x, elts - 1);
91 width %= HOST_BITS_PER_WIDE_INT;
92 if (width == 0)
93 width = HOST_BITS_PER_WIDE_INT;
95 #else
96 else if (width <= HOST_BITS_PER_DOUBLE_INT
97 && CONST_DOUBLE_AS_INT_P (x)
98 && CONST_DOUBLE_LOW (x) == 0)
100 val = CONST_DOUBLE_HIGH (x);
101 width -= HOST_BITS_PER_WIDE_INT;
103 #endif
104 else
105 /* X is not an integer constant. */
106 return false;
108 if (width < HOST_BITS_PER_WIDE_INT)
109 val &= (HOST_WIDE_INT_1U << width) - 1;
110 return val == (HOST_WIDE_INT_1U << (width - 1));
113 /* Test whether VAL is equal to the most significant bit of mode MODE
114 (after masking with the mode mask of MODE). Returns false if the
115 precision of MODE is too large to handle. */
117 bool
118 val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
120 unsigned int width;
121 scalar_int_mode int_mode;
123 if (!is_int_mode (mode, &int_mode))
124 return false;
126 width = GET_MODE_PRECISION (int_mode);
127 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
128 return false;
130 val &= GET_MODE_MASK (int_mode);
131 return val == (HOST_WIDE_INT_1U << (width - 1));
134 /* Test whether the most significant bit of mode MODE is set in VAL.
135 Returns false if the precision of MODE is too large to handle. */
136 bool
137 val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
139 unsigned int width;
141 scalar_int_mode int_mode;
142 if (!is_int_mode (mode, &int_mode))
143 return false;
145 width = GET_MODE_PRECISION (int_mode);
146 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
147 return false;
149 val &= HOST_WIDE_INT_1U << (width - 1);
150 return val != 0;
153 /* Test whether the most significant bit of mode MODE is clear in VAL.
154 Returns false if the precision of MODE is too large to handle. */
155 bool
156 val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
158 unsigned int width;
160 scalar_int_mode int_mode;
161 if (!is_int_mode (mode, &int_mode))
162 return false;
164 width = GET_MODE_PRECISION (int_mode);
165 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
166 return false;
168 val &= HOST_WIDE_INT_1U << (width - 1);
169 return val == 0;
172 /* Make a binary operation by properly ordering the operands and
173 seeing if the expression folds. */
176 simplify_context::simplify_gen_binary (rtx_code code, machine_mode mode,
177 rtx op0, rtx op1)
179 rtx tem;
181 /* If this simplifies, do it. */
182 tem = simplify_binary_operation (code, mode, op0, op1);
183 if (tem)
184 return tem;
186 /* Put complex operands first and constants second if commutative. */
187 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
188 && swap_commutative_operands_p (op0, op1))
189 std::swap (op0, op1);
191 return gen_rtx_fmt_ee (code, mode, op0, op1);
194 /* If X is a MEM referencing the constant pool, return the real value.
195 Otherwise return X. */
197 avoid_constant_pool_reference (rtx x)
199 rtx c, tmp, addr;
200 machine_mode cmode;
201 poly_int64 offset = 0;
203 switch (GET_CODE (x))
205 case MEM:
206 break;
208 case FLOAT_EXTEND:
209 /* Handle float extensions of constant pool references. */
210 tmp = XEXP (x, 0);
211 c = avoid_constant_pool_reference (tmp);
212 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
213 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
214 GET_MODE (x));
215 return x;
217 default:
218 return x;
221 if (GET_MODE (x) == BLKmode)
222 return x;
224 addr = XEXP (x, 0);
226 /* Call target hook to avoid the effects of -fpic etc.... */
227 addr = targetm.delegitimize_address (addr);
229 /* Split the address into a base and integer offset. */
230 addr = strip_offset (addr, &offset);
232 if (GET_CODE (addr) == LO_SUM)
233 addr = XEXP (addr, 1);
235 /* If this is a constant pool reference, we can turn it into its
236 constant and hope that simplifications happen. */
237 if (GET_CODE (addr) == SYMBOL_REF
238 && CONSTANT_POOL_ADDRESS_P (addr))
240 c = get_pool_constant (addr);
241 cmode = get_pool_mode (addr);
243 /* If we're accessing the constant in a different mode than it was
244 originally stored, attempt to fix that up via subreg simplifications.
245 If that fails we have no choice but to return the original memory. */
246 if (known_eq (offset, 0) && cmode == GET_MODE (x))
247 return c;
248 else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
250 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
251 if (tem && CONSTANT_P (tem))
252 return tem;
256 return x;
259 /* Simplify a MEM based on its attributes. This is the default
260 delegitimize_address target hook, and it's recommended that every
261 overrider call it. */
264 delegitimize_mem_from_attrs (rtx x)
266 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 use their base addresses as equivalent. */
268 if (MEM_P (x)
269 && MEM_EXPR (x)
270 && MEM_OFFSET_KNOWN_P (x))
272 tree decl = MEM_EXPR (x);
273 machine_mode mode = GET_MODE (x);
274 poly_int64 offset = 0;
276 switch (TREE_CODE (decl))
278 default:
279 decl = NULL;
280 break;
282 case VAR_DECL:
283 break;
285 case ARRAY_REF:
286 case ARRAY_RANGE_REF:
287 case COMPONENT_REF:
288 case BIT_FIELD_REF:
289 case REALPART_EXPR:
290 case IMAGPART_EXPR:
291 case VIEW_CONVERT_EXPR:
293 poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
294 tree toffset;
295 int unsignedp, reversep, volatilep = 0;
297 decl
298 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
299 &unsignedp, &reversep, &volatilep);
300 if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
301 || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
302 || (toffset && !poly_int_tree_p (toffset, &toffset_val)))
303 decl = NULL;
304 else
305 offset += bytepos + toffset_val;
306 break;
310 if (decl
311 && mode == GET_MODE (x)
312 && VAR_P (decl)
313 && (TREE_STATIC (decl)
314 || DECL_THREAD_LOCAL_P (decl))
315 && DECL_RTL_SET_P (decl)
316 && MEM_P (DECL_RTL (decl)))
318 rtx newx;
320 offset += MEM_OFFSET (x);
322 newx = DECL_RTL (decl);
324 if (MEM_P (newx))
326 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
327 poly_int64 n_offset, o_offset;
329 /* Avoid creating a new MEM needlessly if we already had
330 the same address. We do if there's no OFFSET and the
331 old address X is identical to NEWX, or if X is of the
332 form (plus NEWX OFFSET), or the NEWX is of the form
333 (plus Y (const_int Z)) and X is that with the offset
334 added: (plus Y (const_int Z+OFFSET)). */
335 n = strip_offset (n, &n_offset);
336 o = strip_offset (o, &o_offset);
337 if (!(known_eq (o_offset, n_offset + offset)
338 && rtx_equal_p (o, n)))
339 x = adjust_address_nv (newx, mode, offset);
341 else if (GET_MODE (x) == GET_MODE (newx)
342 && known_eq (offset, 0))
343 x = newx;
347 return x;
350 /* Make a unary operation by first seeing if it folds and otherwise making
351 the specified operation. */
354 simplify_context::simplify_gen_unary (rtx_code code, machine_mode mode, rtx op,
355 machine_mode op_mode)
357 rtx tem;
359 /* If this simplifies, use it. */
360 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
361 return tem;
363 return gen_rtx_fmt_e (code, mode, op);
366 /* Likewise for ternary operations. */
369 simplify_context::simplify_gen_ternary (rtx_code code, machine_mode mode,
370 machine_mode op0_mode,
371 rtx op0, rtx op1, rtx op2)
373 rtx tem;
375 /* If this simplifies, use it. */
376 if ((tem = simplify_ternary_operation (code, mode, op0_mode,
377 op0, op1, op2)) != 0)
378 return tem;
380 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
383 /* Likewise, for relational operations.
384 CMP_MODE specifies mode comparison is done in. */
387 simplify_context::simplify_gen_relational (rtx_code code, machine_mode mode,
388 machine_mode cmp_mode,
389 rtx op0, rtx op1)
391 rtx tem;
393 if ((tem = simplify_relational_operation (code, mode, cmp_mode,
394 op0, op1)) != 0)
395 return tem;
397 return gen_rtx_fmt_ee (code, mode, op0, op1);
400 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
401 and simplify the result. If FN is non-NULL, call this callback on each
402 X, if it returns non-NULL, replace X with its return value and simplify the
403 result. */
406 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
407 rtx (*fn) (rtx, const_rtx, void *), void *data)
409 enum rtx_code code = GET_CODE (x);
410 machine_mode mode = GET_MODE (x);
411 machine_mode op_mode;
412 const char *fmt;
413 rtx op0, op1, op2, newx, op;
414 rtvec vec, newvec;
415 int i, j;
417 if (__builtin_expect (fn != NULL, 0))
419 newx = fn (x, old_rtx, data);
420 if (newx)
421 return newx;
423 else if (rtx_equal_p (x, old_rtx))
424 return copy_rtx ((rtx) data);
426 switch (GET_RTX_CLASS (code))
428 case RTX_UNARY:
429 op0 = XEXP (x, 0);
430 op_mode = GET_MODE (op0);
431 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
432 if (op0 == XEXP (x, 0))
433 return x;
434 return simplify_gen_unary (code, mode, op0, op_mode);
436 case RTX_BIN_ARITH:
437 case RTX_COMM_ARITH:
438 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
439 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
440 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
441 return x;
442 return simplify_gen_binary (code, mode, op0, op1);
444 case RTX_COMPARE:
445 case RTX_COMM_COMPARE:
446 op0 = XEXP (x, 0);
447 op1 = XEXP (x, 1);
448 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
449 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
450 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
451 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
452 return x;
453 return simplify_gen_relational (code, mode, op_mode, op0, op1);
455 case RTX_TERNARY:
456 case RTX_BITFIELD_OPS:
457 op0 = XEXP (x, 0);
458 op_mode = GET_MODE (op0);
459 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
460 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
461 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
462 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
463 return x;
464 if (op_mode == VOIDmode)
465 op_mode = GET_MODE (op0);
466 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
468 case RTX_EXTRA:
469 if (code == SUBREG)
471 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
472 if (op0 == SUBREG_REG (x))
473 return x;
474 op0 = simplify_gen_subreg (GET_MODE (x), op0,
475 GET_MODE (SUBREG_REG (x)),
476 SUBREG_BYTE (x));
477 return op0 ? op0 : x;
479 break;
481 case RTX_OBJ:
482 if (code == MEM)
484 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
485 if (op0 == XEXP (x, 0))
486 return x;
487 return replace_equiv_address_nv (x, op0);
489 else if (code == LO_SUM)
491 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
492 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
494 /* (lo_sum (high x) y) -> y where x and y have the same base. */
495 if (GET_CODE (op0) == HIGH)
497 rtx base0, base1, offset0, offset1;
498 split_const (XEXP (op0, 0), &base0, &offset0);
499 split_const (op1, &base1, &offset1);
500 if (rtx_equal_p (base0, base1))
501 return op1;
504 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
505 return x;
506 return gen_rtx_LO_SUM (mode, op0, op1);
508 break;
510 default:
511 break;
514 newx = x;
515 fmt = GET_RTX_FORMAT (code);
516 for (i = 0; fmt[i]; i++)
517 switch (fmt[i])
519 case 'E':
520 vec = XVEC (x, i);
521 newvec = XVEC (newx, i);
522 for (j = 0; j < GET_NUM_ELEM (vec); j++)
524 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
525 old_rtx, fn, data);
526 if (op != RTVEC_ELT (vec, j))
528 if (newvec == vec)
530 newvec = shallow_copy_rtvec (vec);
531 if (x == newx)
532 newx = shallow_copy_rtx (x);
533 XVEC (newx, i) = newvec;
535 RTVEC_ELT (newvec, j) = op;
538 break;
540 case 'e':
541 if (XEXP (x, i))
543 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
544 if (op != XEXP (x, i))
546 if (x == newx)
547 newx = shallow_copy_rtx (x);
548 XEXP (newx, i) = op;
551 break;
553 return newx;
556 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
557 resulting RTX. Return a new RTX which is as simplified as possible. */
560 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
562 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
565 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
566 Only handle cases where the truncated value is inherently an rvalue.
568 RTL provides two ways of truncating a value:
570 1. a lowpart subreg. This form is only a truncation when both
571 the outer and inner modes (here MODE and OP_MODE respectively)
572 are scalar integers, and only then when the subreg is used as
573 an rvalue.
575 It is only valid to form such truncating subregs if the
576 truncation requires no action by the target. The onus for
577 proving this is on the creator of the subreg -- e.g. the
578 caller to simplify_subreg or simplify_gen_subreg -- and typically
579 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
581 2. a TRUNCATE. This form handles both scalar and compound integers.
583 The first form is preferred where valid. However, the TRUNCATE
584 handling in simplify_unary_operation turns the second form into the
585 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
586 so it is generally safe to form rvalue truncations using:
588 simplify_gen_unary (TRUNCATE, ...)
590 and leave simplify_unary_operation to work out which representation
591 should be used.
593 Because of the proof requirements on (1), simplify_truncation must
594 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
595 regardless of whether the outer truncation came from a SUBREG or a
596 TRUNCATE. For example, if the caller has proven that an SImode
597 truncation of:
599 (and:DI X Y)
601 is a no-op and can be represented as a subreg, it does not follow
602 that SImode truncations of X and Y are also no-ops. On a target
603 like 64-bit MIPS that requires SImode values to be stored in
604 sign-extended form, an SImode truncation of:
606 (and:DI (reg:DI X) (const_int 63))
608 is trivially a no-op because only the lower 6 bits can be set.
609 However, X is still an arbitrary 64-bit number and so we cannot
610 assume that truncating it too is a no-op. */
613 simplify_context::simplify_truncation (machine_mode mode, rtx op,
614 machine_mode op_mode)
616 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
617 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
618 scalar_int_mode int_mode, int_op_mode, subreg_mode;
620 gcc_assert (precision <= op_precision);
622 /* Optimize truncations of zero and sign extended values. */
623 if (GET_CODE (op) == ZERO_EXTEND
624 || GET_CODE (op) == SIGN_EXTEND)
626 /* There are three possibilities. If MODE is the same as the
627 origmode, we can omit both the extension and the subreg.
628 If MODE is not larger than the origmode, we can apply the
629 truncation without the extension. Finally, if the outermode
630 is larger than the origmode, we can just extend to the appropriate
631 mode. */
632 machine_mode origmode = GET_MODE (XEXP (op, 0));
633 if (mode == origmode)
634 return XEXP (op, 0);
635 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
636 return simplify_gen_unary (TRUNCATE, mode,
637 XEXP (op, 0), origmode);
638 else
639 return simplify_gen_unary (GET_CODE (op), mode,
640 XEXP (op, 0), origmode);
643 /* If the machine can perform operations in the truncated mode, distribute
644 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
645 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
646 if (1
647 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
648 && (GET_CODE (op) == PLUS
649 || GET_CODE (op) == MINUS
650 || GET_CODE (op) == MULT))
652 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
653 if (op0)
655 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
656 if (op1)
657 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
661 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
662 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
663 the outer subreg is effectively a truncation to the original mode. */
664 if ((GET_CODE (op) == LSHIFTRT
665 || GET_CODE (op) == ASHIFTRT)
666 /* Ensure that OP_MODE is at least twice as wide as MODE
667 to avoid the possibility that an outer LSHIFTRT shifts by more
668 than the sign extension's sign_bit_copies and introduces zeros
669 into the high bits of the result. */
670 && 2 * precision <= op_precision
671 && CONST_INT_P (XEXP (op, 1))
672 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
673 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
674 && UINTVAL (XEXP (op, 1)) < precision)
675 return simplify_gen_binary (ASHIFTRT, mode,
676 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
678 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
679 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
680 the outer subreg is effectively a truncation to the original mode. */
681 if ((GET_CODE (op) == LSHIFTRT
682 || GET_CODE (op) == ASHIFTRT)
683 && CONST_INT_P (XEXP (op, 1))
684 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
685 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
686 && UINTVAL (XEXP (op, 1)) < precision)
687 return simplify_gen_binary (LSHIFTRT, mode,
688 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
690 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
691 to (ashift:QI (x:QI) C), where C is a suitable small constant and
692 the outer subreg is effectively a truncation to the original mode. */
693 if (GET_CODE (op) == ASHIFT
694 && CONST_INT_P (XEXP (op, 1))
695 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
696 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
697 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
698 && UINTVAL (XEXP (op, 1)) < precision)
699 return simplify_gen_binary (ASHIFT, mode,
700 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
702 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
703 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
704 and C2. */
705 if (GET_CODE (op) == AND
706 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
707 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
708 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
709 && CONST_INT_P (XEXP (op, 1)))
711 rtx op0 = (XEXP (XEXP (op, 0), 0));
712 rtx shift_op = XEXP (XEXP (op, 0), 1);
713 rtx mask_op = XEXP (op, 1);
714 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
715 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
717 if (shift < precision
718 /* If doing this transform works for an X with all bits set,
719 it works for any X. */
720 && ((GET_MODE_MASK (mode) >> shift) & mask)
721 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
722 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
723 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
725 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
726 return simplify_gen_binary (AND, mode, op0, mask_op);
730 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
731 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
732 changing len. */
733 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
734 && REG_P (XEXP (op, 0))
735 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
736 && CONST_INT_P (XEXP (op, 1))
737 && CONST_INT_P (XEXP (op, 2)))
739 rtx op0 = XEXP (op, 0);
740 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
741 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
742 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
744 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
745 if (op0)
747 pos -= op_precision - precision;
748 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
749 XEXP (op, 1), GEN_INT (pos));
752 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
754 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
755 if (op0)
756 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
757 XEXP (op, 1), XEXP (op, 2));
761 /* Recognize a word extraction from a multi-word subreg. */
762 if ((GET_CODE (op) == LSHIFTRT
763 || GET_CODE (op) == ASHIFTRT)
764 && SCALAR_INT_MODE_P (mode)
765 && SCALAR_INT_MODE_P (op_mode)
766 && precision >= BITS_PER_WORD
767 && 2 * precision <= op_precision
768 && CONST_INT_P (XEXP (op, 1))
769 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
770 && UINTVAL (XEXP (op, 1)) < op_precision)
772 poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
773 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
774 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
775 (WORDS_BIG_ENDIAN
776 ? byte - shifted_bytes
777 : byte + shifted_bytes));
780 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
781 and try replacing the TRUNCATE and shift with it. Don't do this
782 if the MEM has a mode-dependent address. */
783 if ((GET_CODE (op) == LSHIFTRT
784 || GET_CODE (op) == ASHIFTRT)
785 && is_a <scalar_int_mode> (mode, &int_mode)
786 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
787 && MEM_P (XEXP (op, 0))
788 && CONST_INT_P (XEXP (op, 1))
789 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
790 && INTVAL (XEXP (op, 1)) > 0
791 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
792 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
793 MEM_ADDR_SPACE (XEXP (op, 0)))
794 && ! MEM_VOLATILE_P (XEXP (op, 0))
795 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
796 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
798 poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
799 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
800 return adjust_address_nv (XEXP (op, 0), int_mode,
801 (WORDS_BIG_ENDIAN
802 ? byte - shifted_bytes
803 : byte + shifted_bytes));
806 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
807 (OP:SI foo:SI) if OP is NEG or ABS. */
808 if ((GET_CODE (op) == ABS
809 || GET_CODE (op) == NEG)
810 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
811 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
812 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
813 return simplify_gen_unary (GET_CODE (op), mode,
814 XEXP (XEXP (op, 0), 0), mode);
816 /* Simplifications of (truncate:A (subreg:B X 0)). */
817 if (GET_CODE (op) == SUBREG
818 && is_a <scalar_int_mode> (mode, &int_mode)
819 && SCALAR_INT_MODE_P (op_mode)
820 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
821 && subreg_lowpart_p (op))
823 /* (truncate:A (subreg:B (truncate:C X) 0)) is (truncate:A X). */
824 if (GET_CODE (SUBREG_REG (op)) == TRUNCATE)
826 rtx inner = XEXP (SUBREG_REG (op), 0);
827 if (GET_MODE_PRECISION (int_mode)
828 <= GET_MODE_PRECISION (subreg_mode))
829 return simplify_gen_unary (TRUNCATE, int_mode, inner,
830 GET_MODE (inner));
831 else
832 /* If subreg above is paradoxical and C is narrower
833 than A, return (subreg:A (truncate:C X) 0). */
834 return simplify_gen_subreg (int_mode, SUBREG_REG (op),
835 subreg_mode, 0);
838 /* Simplifications of (truncate:A (subreg:B X:C 0)) with
839 paradoxical subregs (B is wider than C). */
840 if (is_a <scalar_int_mode> (op_mode, &int_op_mode))
842 unsigned int int_op_prec = GET_MODE_PRECISION (int_op_mode);
843 unsigned int subreg_prec = GET_MODE_PRECISION (subreg_mode);
844 if (int_op_prec > subreg_prec)
846 if (int_mode == subreg_mode)
847 return SUBREG_REG (op);
848 if (GET_MODE_PRECISION (int_mode) < subreg_prec)
849 return simplify_gen_unary (TRUNCATE, int_mode,
850 SUBREG_REG (op), subreg_mode);
852 /* Simplification of (truncate:A (subreg:B X:C 0)) where
853 A is narrower than B and B is narrower than C. */
854 else if (int_op_prec < subreg_prec
855 && GET_MODE_PRECISION (int_mode) < int_op_prec)
856 return simplify_gen_unary (TRUNCATE, int_mode,
857 SUBREG_REG (op), subreg_mode);
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op) == TRUNCATE)
863 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
864 GET_MODE (XEXP (op, 0)));
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
867 in mode A. */
868 if (GET_CODE (op) == IOR
869 && SCALAR_INT_MODE_P (mode)
870 && SCALAR_INT_MODE_P (op_mode)
871 && CONST_INT_P (XEXP (op, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
873 return constm1_rtx;
875 return NULL_RTX;
878 /* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
882 simplify_context::simplify_unary_operation (rtx_code code, machine_mode mode,
883 rtx op, machine_mode op_mode)
885 rtx trueop, tem;
887 trueop = avoid_constant_pool_reference (op);
889 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
890 if (tem)
891 return tem;
893 return simplify_unary_operation_1 (code, mode, op);
896 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
897 to be exact. */
899 static bool
900 exact_int_to_float_conversion_p (const_rtx op)
902 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
903 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
904 /* Constants shouldn't reach here. */
905 gcc_assert (op0_mode != VOIDmode);
906 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
907 int in_bits = in_prec;
908 if (HWI_COMPUTABLE_MODE_P (op0_mode))
910 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
911 if (GET_CODE (op) == FLOAT)
912 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
913 else if (GET_CODE (op) == UNSIGNED_FLOAT)
914 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
915 else
916 gcc_unreachable ();
917 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
919 return in_bits <= out_bits;
922 /* Perform some simplifications we can do even if the operands
923 aren't constant. */
925 simplify_context::simplify_unary_operation_1 (rtx_code code, machine_mode mode,
926 rtx op)
928 enum rtx_code reversed;
929 rtx temp, elt, base, step;
930 scalar_int_mode inner, int_mode, op_mode, op0_mode;
932 switch (code)
934 case NOT:
935 /* (not (not X)) == X. */
936 if (GET_CODE (op) == NOT)
937 return XEXP (op, 0);
939 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
940 comparison is all ones. */
941 if (COMPARISON_P (op)
942 && (mode == BImode || STORE_FLAG_VALUE == -1)
943 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
944 return simplify_gen_relational (reversed, mode, VOIDmode,
945 XEXP (op, 0), XEXP (op, 1));
947 /* (not (plus X -1)) can become (neg X). */
948 if (GET_CODE (op) == PLUS
949 && XEXP (op, 1) == constm1_rtx)
950 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
952 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
953 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
954 and MODE_VECTOR_INT. */
955 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
956 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
957 CONSTM1_RTX (mode));
959 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
960 if (GET_CODE (op) == XOR
961 && CONST_INT_P (XEXP (op, 1))
962 && (temp = simplify_unary_operation (NOT, mode,
963 XEXP (op, 1), mode)) != 0)
964 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
966 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
967 if (GET_CODE (op) == PLUS
968 && CONST_INT_P (XEXP (op, 1))
969 && mode_signbit_p (mode, XEXP (op, 1))
970 && (temp = simplify_unary_operation (NOT, mode,
971 XEXP (op, 1), mode)) != 0)
972 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
975 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
976 operands other than 1, but that is not valid. We could do a
977 similar simplification for (not (lshiftrt C X)) where C is
978 just the sign bit, but this doesn't seem common enough to
979 bother with. */
980 if (GET_CODE (op) == ASHIFT
981 && XEXP (op, 0) == const1_rtx)
983 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
984 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
987 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
988 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
989 so we can perform the above simplification. */
990 if (STORE_FLAG_VALUE == -1
991 && is_a <scalar_int_mode> (mode, &int_mode)
992 && GET_CODE (op) == ASHIFTRT
993 && CONST_INT_P (XEXP (op, 1))
994 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
995 return simplify_gen_relational (GE, int_mode, VOIDmode,
996 XEXP (op, 0), const0_rtx);
999 if (partial_subreg_p (op)
1000 && subreg_lowpart_p (op)
1001 && GET_CODE (SUBREG_REG (op)) == ASHIFT
1002 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
1004 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
1005 rtx x;
1007 x = gen_rtx_ROTATE (inner_mode,
1008 simplify_gen_unary (NOT, inner_mode, const1_rtx,
1009 inner_mode),
1010 XEXP (SUBREG_REG (op), 1));
1011 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1012 if (temp)
1013 return temp;
1016 /* Apply De Morgan's laws to reduce number of patterns for machines
1017 with negating logical insns (and-not, nand, etc.). If result has
1018 only one NOT, put it first, since that is how the patterns are
1019 coded. */
1020 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1022 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1023 machine_mode op_mode;
1025 op_mode = GET_MODE (in1);
1026 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1028 op_mode = GET_MODE (in2);
1029 if (op_mode == VOIDmode)
1030 op_mode = mode;
1031 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1033 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1034 std::swap (in1, in2);
1036 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1037 mode, in1, in2);
1040 /* (not (bswap x)) -> (bswap (not x)). */
1041 if (GET_CODE (op) == BSWAP)
1043 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1044 return simplify_gen_unary (BSWAP, mode, x, mode);
1046 break;
1048 case NEG:
1049 /* (neg (neg X)) == X. */
1050 if (GET_CODE (op) == NEG)
1051 return XEXP (op, 0);
1053 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1054 If comparison is not reversible use
1055 x ? y : (neg y). */
1056 if (GET_CODE (op) == IF_THEN_ELSE)
1058 rtx cond = XEXP (op, 0);
1059 rtx true_rtx = XEXP (op, 1);
1060 rtx false_rtx = XEXP (op, 2);
1062 if ((GET_CODE (true_rtx) == NEG
1063 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1064 || (GET_CODE (false_rtx) == NEG
1065 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1067 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1068 temp = reversed_comparison (cond, mode);
1069 else
1071 temp = cond;
1072 std::swap (true_rtx, false_rtx);
1074 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1075 mode, temp, true_rtx, false_rtx);
1079 /* (neg (plus X 1)) can become (not X). */
1080 if (GET_CODE (op) == PLUS
1081 && XEXP (op, 1) == const1_rtx)
1082 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1084 /* Similarly, (neg (not X)) is (plus X 1). */
1085 if (GET_CODE (op) == NOT)
1086 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1087 CONST1_RTX (mode));
1089 /* (neg (minus X Y)) can become (minus Y X). This transformation
1090 isn't safe for modes with signed zeros, since if X and Y are
1091 both +0, (minus Y X) is the same as (minus X Y). If the
1092 rounding mode is towards +infinity (or -infinity) then the two
1093 expressions will be rounded differently. */
1094 if (GET_CODE (op) == MINUS
1095 && !HONOR_SIGNED_ZEROS (mode)
1096 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1097 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1099 if (GET_CODE (op) == PLUS
1100 && !HONOR_SIGNED_ZEROS (mode)
1101 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1103 /* (neg (plus A C)) is simplified to (minus -C A). */
1104 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1105 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1107 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1108 if (temp)
1109 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1112 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1113 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1114 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1117 /* (neg (mult A B)) becomes (mult A (neg B)).
1118 This works even for floating-point values. */
1119 if (GET_CODE (op) == MULT
1120 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1122 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1123 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1126 /* NEG commutes with ASHIFT since it is multiplication. Only do
1127 this if we can then eliminate the NEG (e.g., if the operand
1128 is a constant). */
1129 if (GET_CODE (op) == ASHIFT)
1131 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1132 if (temp)
1133 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1136 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1137 C is equal to the width of MODE minus 1. */
1138 if (GET_CODE (op) == ASHIFTRT
1139 && CONST_INT_P (XEXP (op, 1))
1140 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1141 return simplify_gen_binary (LSHIFTRT, mode,
1142 XEXP (op, 0), XEXP (op, 1));
1144 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1145 C is equal to the width of MODE minus 1. */
1146 if (GET_CODE (op) == LSHIFTRT
1147 && CONST_INT_P (XEXP (op, 1))
1148 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1149 return simplify_gen_binary (ASHIFTRT, mode,
1150 XEXP (op, 0), XEXP (op, 1));
1152 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1153 if (GET_CODE (op) == XOR
1154 && XEXP (op, 1) == const1_rtx
1155 && nonzero_bits (XEXP (op, 0), mode) == 1)
1156 return plus_constant (mode, XEXP (op, 0), -1);
1158 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1159 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1160 if (GET_CODE (op) == LT
1161 && XEXP (op, 1) == const0_rtx
1162 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1164 int_mode = as_a <scalar_int_mode> (mode);
1165 int isize = GET_MODE_PRECISION (inner);
1166 if (STORE_FLAG_VALUE == 1)
1168 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1169 gen_int_shift_amount (inner,
1170 isize - 1));
1171 if (int_mode == inner)
1172 return temp;
1173 if (GET_MODE_PRECISION (int_mode) > isize)
1174 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1175 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1177 else if (STORE_FLAG_VALUE == -1)
1179 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1180 gen_int_shift_amount (inner,
1181 isize - 1));
1182 if (int_mode == inner)
1183 return temp;
1184 if (GET_MODE_PRECISION (int_mode) > isize)
1185 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1186 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1190 if (vec_series_p (op, &base, &step))
1192 /* Only create a new series if we can simplify both parts. In other
1193 cases this isn't really a simplification, and it's not necessarily
1194 a win to replace a vector operation with a scalar operation. */
1195 scalar_mode inner_mode = GET_MODE_INNER (mode);
1196 base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1197 if (base)
1199 step = simplify_unary_operation (NEG, inner_mode,
1200 step, inner_mode);
1201 if (step)
1202 return gen_vec_series (mode, base, step);
1205 break;
1207 case TRUNCATE:
1208 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1209 with the umulXi3_highpart patterns. */
1210 if (GET_CODE (op) == LSHIFTRT
1211 && GET_CODE (XEXP (op, 0)) == MULT)
1212 break;
1214 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1216 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1218 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1219 if (temp)
1220 return temp;
1222 /* We can't handle truncation to a partial integer mode here
1223 because we don't know the real bitsize of the partial
1224 integer mode. */
1225 break;
1228 if (GET_MODE (op) != VOIDmode)
1230 temp = simplify_truncation (mode, op, GET_MODE (op));
1231 if (temp)
1232 return temp;
1235 /* If we know that the value is already truncated, we can
1236 replace the TRUNCATE with a SUBREG. */
1237 if (known_eq (GET_MODE_NUNITS (mode), 1)
1238 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1239 || truncated_to_mode (mode, op)))
1241 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1242 if (temp)
1243 return temp;
1246 /* A truncate of a comparison can be replaced with a subreg if
1247 STORE_FLAG_VALUE permits. This is like the previous test,
1248 but it works even if the comparison is done in a mode larger
1249 than HOST_BITS_PER_WIDE_INT. */
1250 if (HWI_COMPUTABLE_MODE_P (mode)
1251 && COMPARISON_P (op)
1252 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
1253 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1255 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1256 if (temp)
1257 return temp;
1260 /* A truncate of a memory is just loading the low part of the memory
1261 if we are not changing the meaning of the address. */
1262 if (GET_CODE (op) == MEM
1263 && !VECTOR_MODE_P (mode)
1264 && !MEM_VOLATILE_P (op)
1265 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1267 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1268 if (temp)
1269 return temp;
1272 /* Check for useless truncation. */
1273 if (GET_MODE (op) == mode)
1274 return op;
1275 break;
1277 case FLOAT_TRUNCATE:
1278 /* Check for useless truncation. */
1279 if (GET_MODE (op) == mode)
1280 return op;
1282 if (DECIMAL_FLOAT_MODE_P (mode))
1283 break;
1285 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1286 if (GET_CODE (op) == FLOAT_EXTEND
1287 && GET_MODE (XEXP (op, 0)) == mode)
1288 return XEXP (op, 0);
1290 /* (float_truncate:SF (float_truncate:DF foo:XF))
1291 = (float_truncate:SF foo:XF).
1292 This may eliminate double rounding, so it is unsafe.
1294 (float_truncate:SF (float_extend:XF foo:DF))
1295 = (float_truncate:SF foo:DF).
1297 (float_truncate:DF (float_extend:XF foo:SF))
1298 = (float_extend:DF foo:SF). */
1299 if ((GET_CODE (op) == FLOAT_TRUNCATE
1300 && flag_unsafe_math_optimizations)
1301 || GET_CODE (op) == FLOAT_EXTEND)
1302 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1303 > GET_MODE_UNIT_SIZE (mode)
1304 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1305 mode,
1306 XEXP (op, 0), mode);
1308 /* (float_truncate (float x)) is (float x) */
1309 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1310 && (flag_unsafe_math_optimizations
1311 || exact_int_to_float_conversion_p (op)))
1312 return simplify_gen_unary (GET_CODE (op), mode,
1313 XEXP (op, 0),
1314 GET_MODE (XEXP (op, 0)));
1316 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1317 (OP:SF foo:SF) if OP is NEG or ABS. */
1318 if ((GET_CODE (op) == ABS
1319 || GET_CODE (op) == NEG)
1320 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1321 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1322 return simplify_gen_unary (GET_CODE (op), mode,
1323 XEXP (XEXP (op, 0), 0), mode);
1325 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1326 is (float_truncate:SF x). */
1327 if (GET_CODE (op) == SUBREG
1328 && subreg_lowpart_p (op)
1329 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1330 return SUBREG_REG (op);
1331 break;
1333 case FLOAT_EXTEND:
1334 /* Check for useless extension. */
1335 if (GET_MODE (op) == mode)
1336 return op;
1338 if (DECIMAL_FLOAT_MODE_P (mode))
1339 break;
1341 /* (float_extend (float_extend x)) is (float_extend x)
1343 (float_extend (float x)) is (float x) assuming that double
1344 rounding can't happen.
1346 if (GET_CODE (op) == FLOAT_EXTEND
1347 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1348 && exact_int_to_float_conversion_p (op)))
1349 return simplify_gen_unary (GET_CODE (op), mode,
1350 XEXP (op, 0),
1351 GET_MODE (XEXP (op, 0)));
1353 break;
1355 case ABS:
1356 /* (abs (neg <foo>)) -> (abs <foo>) */
1357 if (GET_CODE (op) == NEG)
1358 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1359 GET_MODE (XEXP (op, 0)));
1361 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1362 do nothing. */
1363 if (GET_MODE (op) == VOIDmode)
1364 break;
1366 /* If operand is something known to be positive, ignore the ABS. */
1367 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1368 || val_signbit_known_clear_p (GET_MODE (op),
1369 nonzero_bits (op, GET_MODE (op))))
1370 return op;
1372 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1373 if (is_a <scalar_int_mode> (mode, &int_mode)
1374 && (num_sign_bit_copies (op, int_mode)
1375 == GET_MODE_PRECISION (int_mode)))
1376 return gen_rtx_NEG (int_mode, op);
1378 break;
1380 case FFS:
1381 /* (ffs (*_extend <X>)) = (ffs <X>) */
1382 if (GET_CODE (op) == SIGN_EXTEND
1383 || GET_CODE (op) == ZERO_EXTEND)
1384 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1385 GET_MODE (XEXP (op, 0)));
1386 break;
1388 case POPCOUNT:
1389 switch (GET_CODE (op))
1391 case BSWAP:
1392 case ZERO_EXTEND:
1393 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1394 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1395 GET_MODE (XEXP (op, 0)));
1397 case ROTATE:
1398 case ROTATERT:
1399 /* Rotations don't affect popcount. */
1400 if (!side_effects_p (XEXP (op, 1)))
1401 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1402 GET_MODE (XEXP (op, 0)));
1403 break;
1405 default:
1406 break;
1408 break;
1410 case PARITY:
1411 switch (GET_CODE (op))
1413 case NOT:
1414 case BSWAP:
1415 case ZERO_EXTEND:
1416 case SIGN_EXTEND:
1417 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1418 GET_MODE (XEXP (op, 0)));
1420 case ROTATE:
1421 case ROTATERT:
1422 /* Rotations don't affect parity. */
1423 if (!side_effects_p (XEXP (op, 1)))
1424 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1425 GET_MODE (XEXP (op, 0)));
1426 break;
1428 case PARITY:
1429 /* (parity (parity x)) -> parity (x). */
1430 return op;
1432 default:
1433 break;
1435 break;
1437 case BSWAP:
1438 /* (bswap (bswap x)) -> x. */
1439 if (GET_CODE (op) == BSWAP)
1440 return XEXP (op, 0);
1441 break;
1443 case FLOAT:
1444 /* (float (sign_extend <X>)) = (float <X>). */
1445 if (GET_CODE (op) == SIGN_EXTEND)
1446 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1447 GET_MODE (XEXP (op, 0)));
1448 break;
1450 case SIGN_EXTEND:
1451 /* Check for useless extension. */
1452 if (GET_MODE (op) == mode)
1453 return op;
1455 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1456 becomes just the MINUS if its mode is MODE. This allows
1457 folding switch statements on machines using casesi (such as
1458 the VAX). */
1459 if (GET_CODE (op) == TRUNCATE
1460 && GET_MODE (XEXP (op, 0)) == mode
1461 && GET_CODE (XEXP (op, 0)) == MINUS
1462 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1463 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1464 return XEXP (op, 0);
1466 /* Extending a widening multiplication should be canonicalized to
1467 a wider widening multiplication. */
1468 if (GET_CODE (op) == MULT)
1470 rtx lhs = XEXP (op, 0);
1471 rtx rhs = XEXP (op, 1);
1472 enum rtx_code lcode = GET_CODE (lhs);
1473 enum rtx_code rcode = GET_CODE (rhs);
1475 /* Widening multiplies usually extend both operands, but sometimes
1476 they use a shift to extract a portion of a register. */
1477 if ((lcode == SIGN_EXTEND
1478 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1479 && (rcode == SIGN_EXTEND
1480 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1482 machine_mode lmode = GET_MODE (lhs);
1483 machine_mode rmode = GET_MODE (rhs);
1484 int bits;
1486 if (lcode == ASHIFTRT)
1487 /* Number of bits not shifted off the end. */
1488 bits = (GET_MODE_UNIT_PRECISION (lmode)
1489 - INTVAL (XEXP (lhs, 1)));
1490 else /* lcode == SIGN_EXTEND */
1491 /* Size of inner mode. */
1492 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1494 if (rcode == ASHIFTRT)
1495 bits += (GET_MODE_UNIT_PRECISION (rmode)
1496 - INTVAL (XEXP (rhs, 1)));
1497 else /* rcode == SIGN_EXTEND */
1498 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1500 /* We can only widen multiplies if the result is mathematiclly
1501 equivalent. I.e. if overflow was impossible. */
1502 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1503 return simplify_gen_binary
1504 (MULT, mode,
1505 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1506 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1510 /* Check for a sign extension of a subreg of a promoted
1511 variable, where the promotion is sign-extended, and the
1512 target mode is the same as the variable's promotion. */
1513 if (GET_CODE (op) == SUBREG
1514 && SUBREG_PROMOTED_VAR_P (op)
1515 && SUBREG_PROMOTED_SIGNED_P (op))
1517 rtx subreg = SUBREG_REG (op);
1518 machine_mode subreg_mode = GET_MODE (subreg);
1519 if (!paradoxical_subreg_p (mode, subreg_mode))
1521 temp = rtl_hooks.gen_lowpart_no_emit (mode, subreg);
1522 if (temp)
1524 /* Preserve SUBREG_PROMOTED_VAR_P. */
1525 if (partial_subreg_p (temp))
1527 SUBREG_PROMOTED_VAR_P (temp) = 1;
1528 SUBREG_PROMOTED_SET (temp, 1);
1530 return temp;
1533 else
1534 /* Sign-extending a sign-extended subreg. */
1535 return simplify_gen_unary (SIGN_EXTEND, mode,
1536 subreg, subreg_mode);
1539 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1540 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1541 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1543 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1544 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1545 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1546 GET_MODE (XEXP (op, 0)));
1549 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1550 is (sign_extend:M (subreg:O <X>)) if there is mode with
1551 GET_MODE_BITSIZE (N) - I bits.
1552 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1553 is similarly (zero_extend:M (subreg:O <X>)). */
1554 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1555 && GET_CODE (XEXP (op, 0)) == ASHIFT
1556 && is_a <scalar_int_mode> (mode, &int_mode)
1557 && CONST_INT_P (XEXP (op, 1))
1558 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1559 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1560 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1562 scalar_int_mode tmode;
1563 gcc_assert (GET_MODE_PRECISION (int_mode)
1564 > GET_MODE_PRECISION (op_mode));
1565 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1566 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1568 rtx inner =
1569 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1570 if (inner)
1571 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1572 ? SIGN_EXTEND : ZERO_EXTEND,
1573 int_mode, inner, tmode);
1577 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1578 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1579 if (GET_CODE (op) == LSHIFTRT
1580 && CONST_INT_P (XEXP (op, 1))
1581 && XEXP (op, 1) != const0_rtx)
1582 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1584 /* (sign_extend:M (truncate:N (lshiftrt:O <X> (const_int I)))) where
1585 I is GET_MODE_PRECISION(O) - GET_MODE_PRECISION(N), simplifies to
1586 (ashiftrt:M <X> (const_int I)) if modes M and O are the same, and
1587 (truncate:M (ashiftrt:O <X> (const_int I))) if M is narrower than
1588 O, and (sign_extend:M (ashiftrt:O <X> (const_int I))) if M is
1589 wider than O. */
1590 if (GET_CODE (op) == TRUNCATE
1591 && GET_CODE (XEXP (op, 0)) == LSHIFTRT
1592 && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
1594 scalar_int_mode m_mode, n_mode, o_mode;
1595 rtx old_shift = XEXP (op, 0);
1596 if (is_a <scalar_int_mode> (mode, &m_mode)
1597 && is_a <scalar_int_mode> (GET_MODE (op), &n_mode)
1598 && is_a <scalar_int_mode> (GET_MODE (old_shift), &o_mode)
1599 && GET_MODE_PRECISION (o_mode) - GET_MODE_PRECISION (n_mode)
1600 == INTVAL (XEXP (old_shift, 1)))
1602 rtx new_shift = simplify_gen_binary (ASHIFTRT,
1603 GET_MODE (old_shift),
1604 XEXP (old_shift, 0),
1605 XEXP (old_shift, 1));
1606 if (GET_MODE_PRECISION (m_mode) > GET_MODE_PRECISION (o_mode))
1607 return simplify_gen_unary (SIGN_EXTEND, mode, new_shift,
1608 GET_MODE (new_shift));
1609 if (mode != GET_MODE (new_shift))
1610 return simplify_gen_unary (TRUNCATE, mode, new_shift,
1611 GET_MODE (new_shift));
1612 return new_shift;
1616 #if defined(POINTERS_EXTEND_UNSIGNED)
1617 /* As we do not know which address space the pointer is referring to,
1618 we can do this only if the target does not support different pointer
1619 or address modes depending on the address space. */
1620 if (target_default_pointer_address_modes_p ()
1621 && ! POINTERS_EXTEND_UNSIGNED
1622 && mode == Pmode && GET_MODE (op) == ptr_mode
1623 && (CONSTANT_P (op)
1624 || (GET_CODE (op) == SUBREG
1625 && REG_P (SUBREG_REG (op))
1626 && REG_POINTER (SUBREG_REG (op))
1627 && GET_MODE (SUBREG_REG (op)) == Pmode))
1628 && !targetm.have_ptr_extend ())
1630 temp
1631 = convert_memory_address_addr_space_1 (Pmode, op,
1632 ADDR_SPACE_GENERIC, false,
1633 true);
1634 if (temp)
1635 return temp;
1637 #endif
1638 break;
1640 case ZERO_EXTEND:
1641 /* Check for useless extension. */
1642 if (GET_MODE (op) == mode)
1643 return op;
1645 /* Check for a zero extension of a subreg of a promoted
1646 variable, where the promotion is zero-extended, and the
1647 target mode is the same as the variable's promotion. */
1648 if (GET_CODE (op) == SUBREG
1649 && SUBREG_PROMOTED_VAR_P (op)
1650 && SUBREG_PROMOTED_UNSIGNED_P (op))
1652 rtx subreg = SUBREG_REG (op);
1653 machine_mode subreg_mode = GET_MODE (subreg);
1654 if (!paradoxical_subreg_p (mode, subreg_mode))
1656 temp = rtl_hooks.gen_lowpart_no_emit (mode, subreg);
1657 if (temp)
1659 /* Preserve SUBREG_PROMOTED_VAR_P. */
1660 if (partial_subreg_p (temp))
1662 SUBREG_PROMOTED_VAR_P (temp) = 1;
1663 SUBREG_PROMOTED_SET (temp, 0);
1665 return temp;
1668 else
1669 /* Zero-extending a zero-extended subreg. */
1670 return simplify_gen_unary (ZERO_EXTEND, mode,
1671 subreg, subreg_mode);
1674 /* Extending a widening multiplication should be canonicalized to
1675 a wider widening multiplication. */
1676 if (GET_CODE (op) == MULT)
1678 rtx lhs = XEXP (op, 0);
1679 rtx rhs = XEXP (op, 1);
1680 enum rtx_code lcode = GET_CODE (lhs);
1681 enum rtx_code rcode = GET_CODE (rhs);
1683 /* Widening multiplies usually extend both operands, but sometimes
1684 they use a shift to extract a portion of a register. */
1685 if ((lcode == ZERO_EXTEND
1686 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1687 && (rcode == ZERO_EXTEND
1688 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1690 machine_mode lmode = GET_MODE (lhs);
1691 machine_mode rmode = GET_MODE (rhs);
1692 int bits;
1694 if (lcode == LSHIFTRT)
1695 /* Number of bits not shifted off the end. */
1696 bits = (GET_MODE_UNIT_PRECISION (lmode)
1697 - INTVAL (XEXP (lhs, 1)));
1698 else /* lcode == ZERO_EXTEND */
1699 /* Size of inner mode. */
1700 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1702 if (rcode == LSHIFTRT)
1703 bits += (GET_MODE_UNIT_PRECISION (rmode)
1704 - INTVAL (XEXP (rhs, 1)));
1705 else /* rcode == ZERO_EXTEND */
1706 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1708 /* We can only widen multiplies if the result is mathematiclly
1709 equivalent. I.e. if overflow was impossible. */
1710 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1711 return simplify_gen_binary
1712 (MULT, mode,
1713 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1714 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1718 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1719 if (GET_CODE (op) == ZERO_EXTEND)
1720 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1721 GET_MODE (XEXP (op, 0)));
1723 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1724 is (zero_extend:M (subreg:O <X>)) if there is mode with
1725 GET_MODE_PRECISION (N) - I bits. */
1726 if (GET_CODE (op) == LSHIFTRT
1727 && GET_CODE (XEXP (op, 0)) == ASHIFT
1728 && is_a <scalar_int_mode> (mode, &int_mode)
1729 && CONST_INT_P (XEXP (op, 1))
1730 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1731 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1732 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1734 scalar_int_mode tmode;
1735 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1736 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1738 rtx inner =
1739 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1740 if (inner)
1741 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1742 inner, tmode);
1746 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1747 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1748 of mode N. E.g.
1749 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1750 (and:SI (reg:SI) (const_int 63)). */
1751 if (partial_subreg_p (op)
1752 && is_a <scalar_int_mode> (mode, &int_mode)
1753 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1754 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1755 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1756 && subreg_lowpart_p (op)
1757 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1758 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1760 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1761 return SUBREG_REG (op);
1762 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1763 op0_mode);
1766 #if defined(POINTERS_EXTEND_UNSIGNED)
1767 /* As we do not know which address space the pointer is referring to,
1768 we can do this only if the target does not support different pointer
1769 or address modes depending on the address space. */
1770 if (target_default_pointer_address_modes_p ()
1771 && POINTERS_EXTEND_UNSIGNED > 0
1772 && mode == Pmode && GET_MODE (op) == ptr_mode
1773 && (CONSTANT_P (op)
1774 || (GET_CODE (op) == SUBREG
1775 && REG_P (SUBREG_REG (op))
1776 && REG_POINTER (SUBREG_REG (op))
1777 && GET_MODE (SUBREG_REG (op)) == Pmode))
1778 && !targetm.have_ptr_extend ())
1780 temp
1781 = convert_memory_address_addr_space_1 (Pmode, op,
1782 ADDR_SPACE_GENERIC, false,
1783 true);
1784 if (temp)
1785 return temp;
1787 #endif
1788 break;
1790 default:
1791 break;
1794 if (VECTOR_MODE_P (mode)
1795 && vec_duplicate_p (op, &elt)
1796 && code != VEC_DUPLICATE)
1798 if (code == SIGN_EXTEND || code == ZERO_EXTEND)
1799 /* Enforce a canonical order of VEC_DUPLICATE wrt other unary
1800 operations by promoting VEC_DUPLICATE to the root of the expression
1801 (as far as possible). */
1802 temp = simplify_gen_unary (code, GET_MODE_INNER (mode),
1803 elt, GET_MODE_INNER (GET_MODE (op)));
1804 else
1805 /* Try applying the operator to ELT and see if that simplifies.
1806 We can duplicate the result if so.
1808 The reason we traditionally haven't used simplify_gen_unary
1809 for these codes is that it didn't necessarily seem to be a
1810 win to convert things like:
1812 (neg:V (vec_duplicate:V (reg:S R)))
1816 (vec_duplicate:V (neg:S (reg:S R)))
1818 The first might be done entirely in vector registers while the
1819 second might need a move between register files.
1821 However, there also cases where promoting the vec_duplicate is
1822 more efficient, and there is definite value in having a canonical
1823 form when matching instruction patterns. We should consider
1824 extending the simplify_gen_unary code above to more cases. */
1825 temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1826 elt, GET_MODE_INNER (GET_MODE (op)));
1827 if (temp)
1828 return gen_vec_duplicate (mode, temp);
1831 return 0;
1834 /* Try to compute the value of a unary operation CODE whose output mode is to
1835 be MODE with input operand OP whose mode was originally OP_MODE.
1836 Return zero if the value cannot be computed. */
1838 simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1839 rtx op, machine_mode op_mode)
1841 scalar_int_mode result_mode;
1843 if (code == VEC_DUPLICATE)
1845 gcc_assert (VECTOR_MODE_P (mode));
1846 if (GET_MODE (op) != VOIDmode)
1848 if (!VECTOR_MODE_P (GET_MODE (op)))
1849 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1850 else
1851 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1852 (GET_MODE (op)));
1854 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1855 return gen_const_vec_duplicate (mode, op);
1856 if (GET_CODE (op) == CONST_VECTOR
1857 && (CONST_VECTOR_DUPLICATE_P (op)
1858 || CONST_VECTOR_NUNITS (op).is_constant ()))
1860 unsigned int npatterns = (CONST_VECTOR_DUPLICATE_P (op)
1861 ? CONST_VECTOR_NPATTERNS (op)
1862 : CONST_VECTOR_NUNITS (op).to_constant ());
1863 gcc_assert (multiple_p (GET_MODE_NUNITS (mode), npatterns));
1864 rtx_vector_builder builder (mode, npatterns, 1);
1865 for (unsigned i = 0; i < npatterns; i++)
1866 builder.quick_push (CONST_VECTOR_ELT (op, i));
1867 return builder.build ();
1871 if (VECTOR_MODE_P (mode)
1872 && GET_CODE (op) == CONST_VECTOR
1873 && known_eq (GET_MODE_NUNITS (mode), CONST_VECTOR_NUNITS (op)))
1875 gcc_assert (GET_MODE (op) == op_mode);
1877 rtx_vector_builder builder;
1878 if (!builder.new_unary_operation (mode, op, false))
1879 return 0;
1881 unsigned int count = builder.encoded_nelts ();
1882 for (unsigned int i = 0; i < count; i++)
1884 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1885 CONST_VECTOR_ELT (op, i),
1886 GET_MODE_INNER (op_mode));
1887 if (!x || !valid_for_const_vector_p (mode, x))
1888 return 0;
1889 builder.quick_push (x);
1891 return builder.build ();
1894 /* The order of these tests is critical so that, for example, we don't
1895 check the wrong mode (input vs. output) for a conversion operation,
1896 such as FIX. At some point, this should be simplified. */
1898 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1900 REAL_VALUE_TYPE d;
1902 if (op_mode == VOIDmode)
1904 /* CONST_INT have VOIDmode as the mode. We assume that all
1905 the bits of the constant are significant, though, this is
1906 a dangerous assumption as many times CONST_INTs are
1907 created and used with garbage in the bits outside of the
1908 precision of the implied mode of the const_int. */
1909 op_mode = MAX_MODE_INT;
1912 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1914 /* Avoid the folding if flag_signaling_nans is on and
1915 operand is a signaling NaN. */
1916 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1917 return 0;
1919 d = real_value_truncate (mode, d);
1920 return const_double_from_real_value (d, mode);
1922 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1924 REAL_VALUE_TYPE d;
1926 if (op_mode == VOIDmode)
1928 /* CONST_INT have VOIDmode as the mode. We assume that all
1929 the bits of the constant are significant, though, this is
1930 a dangerous assumption as many times CONST_INTs are
1931 created and used with garbage in the bits outside of the
1932 precision of the implied mode of the const_int. */
1933 op_mode = MAX_MODE_INT;
1936 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1938 /* Avoid the folding if flag_signaling_nans is on and
1939 operand is a signaling NaN. */
1940 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1941 return 0;
1943 d = real_value_truncate (mode, d);
1944 return const_double_from_real_value (d, mode);
1947 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1949 unsigned int width = GET_MODE_PRECISION (result_mode);
1950 if (width > MAX_BITSIZE_MODE_ANY_INT)
1951 return 0;
1953 wide_int result;
1954 scalar_int_mode imode = (op_mode == VOIDmode
1955 ? result_mode
1956 : as_a <scalar_int_mode> (op_mode));
1957 rtx_mode_t op0 = rtx_mode_t (op, imode);
1958 int int_value;
1960 #if TARGET_SUPPORTS_WIDE_INT == 0
1961 /* This assert keeps the simplification from producing a result
1962 that cannot be represented in a CONST_DOUBLE but a lot of
1963 upstream callers expect that this function never fails to
1964 simplify something and so you if you added this to the test
1965 above the code would die later anyway. If this assert
1966 happens, you just need to make the port support wide int. */
1967 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1968 #endif
1970 switch (code)
1972 case NOT:
1973 result = wi::bit_not (op0);
1974 break;
1976 case NEG:
1977 result = wi::neg (op0);
1978 break;
1980 case ABS:
1981 result = wi::abs (op0);
1982 break;
1984 case FFS:
1985 result = wi::shwi (wi::ffs (op0), result_mode);
1986 break;
1988 case CLZ:
1989 if (wi::ne_p (op0, 0))
1990 int_value = wi::clz (op0);
1991 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1992 return NULL_RTX;
1993 result = wi::shwi (int_value, result_mode);
1994 break;
1996 case CLRSB:
1997 result = wi::shwi (wi::clrsb (op0), result_mode);
1998 break;
2000 case CTZ:
2001 if (wi::ne_p (op0, 0))
2002 int_value = wi::ctz (op0);
2003 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
2004 return NULL_RTX;
2005 result = wi::shwi (int_value, result_mode);
2006 break;
2008 case POPCOUNT:
2009 result = wi::shwi (wi::popcount (op0), result_mode);
2010 break;
2012 case PARITY:
2013 result = wi::shwi (wi::parity (op0), result_mode);
2014 break;
2016 case BSWAP:
2017 result = wide_int (op0).bswap ();
2018 break;
2020 case TRUNCATE:
2021 case ZERO_EXTEND:
2022 result = wide_int::from (op0, width, UNSIGNED);
2023 break;
2025 case SIGN_EXTEND:
2026 result = wide_int::from (op0, width, SIGNED);
2027 break;
2029 case SS_NEG:
2030 if (wi::only_sign_bit_p (op0))
2031 result = wi::max_value (GET_MODE_PRECISION (imode), SIGNED);
2032 else
2033 result = wi::neg (op0);
2034 break;
2036 case SS_ABS:
2037 if (wi::only_sign_bit_p (op0))
2038 result = wi::max_value (GET_MODE_PRECISION (imode), SIGNED);
2039 else
2040 result = wi::abs (op0);
2041 break;
2043 case SQRT:
2044 default:
2045 return 0;
2048 return immed_wide_int_const (result, result_mode);
2051 else if (CONST_DOUBLE_AS_FLOAT_P (op)
2052 && SCALAR_FLOAT_MODE_P (mode)
2053 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
2055 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
2056 switch (code)
2058 case SQRT:
2059 return 0;
2060 case ABS:
2061 d = real_value_abs (&d);
2062 break;
2063 case NEG:
2064 d = real_value_negate (&d);
2065 break;
2066 case FLOAT_TRUNCATE:
2067 /* Don't perform the operation if flag_signaling_nans is on
2068 and the operand is a signaling NaN. */
2069 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
2070 return NULL_RTX;
2071 d = real_value_truncate (mode, d);
2072 break;
2073 case FLOAT_EXTEND:
2074 /* Don't perform the operation if flag_signaling_nans is on
2075 and the operand is a signaling NaN. */
2076 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
2077 return NULL_RTX;
2078 /* All this does is change the mode, unless changing
2079 mode class. */
2080 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
2081 real_convert (&d, mode, &d);
2082 break;
2083 case FIX:
2084 /* Don't perform the operation if flag_signaling_nans is on
2085 and the operand is a signaling NaN. */
2086 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
2087 return NULL_RTX;
2088 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
2089 break;
2090 case NOT:
2092 long tmp[4];
2093 int i;
2095 real_to_target (tmp, &d, GET_MODE (op));
2096 for (i = 0; i < 4; i++)
2097 tmp[i] = ~tmp[i];
2098 real_from_target (&d, tmp, mode);
2099 break;
2101 default:
2102 gcc_unreachable ();
2104 return const_double_from_real_value (d, mode);
2106 else if (CONST_DOUBLE_AS_FLOAT_P (op)
2107 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
2108 && is_int_mode (mode, &result_mode))
2110 unsigned int width = GET_MODE_PRECISION (result_mode);
2111 if (width > MAX_BITSIZE_MODE_ANY_INT)
2112 return 0;
2114 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
2115 operators are intentionally left unspecified (to ease implementation
2116 by target backends), for consistency, this routine implements the
2117 same semantics for constant folding as used by the middle-end. */
2119 /* This was formerly used only for non-IEEE float.
2120 eggert@twinsun.com says it is safe for IEEE also. */
2121 REAL_VALUE_TYPE t;
2122 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
2123 wide_int wmax, wmin;
2124 /* This is part of the abi to real_to_integer, but we check
2125 things before making this call. */
2126 bool fail;
2128 switch (code)
2130 case FIX:
2131 if (REAL_VALUE_ISNAN (*x))
2132 return const0_rtx;
2134 /* Test against the signed upper bound. */
2135 wmax = wi::max_value (width, SIGNED);
2136 real_from_integer (&t, VOIDmode, wmax, SIGNED);
2137 if (real_less (&t, x))
2138 return immed_wide_int_const (wmax, mode);
2140 /* Test against the signed lower bound. */
2141 wmin = wi::min_value (width, SIGNED);
2142 real_from_integer (&t, VOIDmode, wmin, SIGNED);
2143 if (real_less (x, &t))
2144 return immed_wide_int_const (wmin, mode);
2146 return immed_wide_int_const (real_to_integer (x, &fail, width),
2147 mode);
2149 case UNSIGNED_FIX:
2150 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2151 return const0_rtx;
2153 /* Test against the unsigned upper bound. */
2154 wmax = wi::max_value (width, UNSIGNED);
2155 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2156 if (real_less (&t, x))
2157 return immed_wide_int_const (wmax, mode);
2159 return immed_wide_int_const (real_to_integer (x, &fail, width),
2160 mode);
2162 default:
2163 gcc_unreachable ();
2167 /* Handle polynomial integers. */
2168 else if (CONST_POLY_INT_P (op))
2170 poly_wide_int result;
2171 switch (code)
2173 case NEG:
2174 result = -const_poly_int_value (op);
2175 break;
2177 case NOT:
2178 result = ~const_poly_int_value (op);
2179 break;
2181 default:
2182 return NULL_RTX;
2184 return immed_wide_int_const (result, mode);
2187 return NULL_RTX;
2190 /* Subroutine of simplify_binary_operation to simplify a binary operation
2191 CODE that can commute with byte swapping, with result mode MODE and
2192 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2193 Return zero if no simplification or canonicalization is possible. */
2196 simplify_context::simplify_byte_swapping_operation (rtx_code code,
2197 machine_mode mode,
2198 rtx op0, rtx op1)
2200 rtx tem;
2202 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2203 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2205 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2206 simplify_gen_unary (BSWAP, mode, op1, mode));
2207 return simplify_gen_unary (BSWAP, mode, tem, mode);
2210 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2211 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2213 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2214 return simplify_gen_unary (BSWAP, mode, tem, mode);
2217 return NULL_RTX;
2220 /* Subroutine of simplify_binary_operation to simplify a commutative,
2221 associative binary operation CODE with result mode MODE, operating
2222 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2223 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2224 canonicalization is possible. */
2227 simplify_context::simplify_associative_operation (rtx_code code,
2228 machine_mode mode,
2229 rtx op0, rtx op1)
2231 rtx tem;
2233 /* Linearize the operator to the left. */
2234 if (GET_CODE (op1) == code)
2236 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2237 if (GET_CODE (op0) == code)
2239 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2240 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2243 /* "a op (b op c)" becomes "(b op c) op a". */
2244 if (! swap_commutative_operands_p (op1, op0))
2245 return simplify_gen_binary (code, mode, op1, op0);
2247 std::swap (op0, op1);
2250 if (GET_CODE (op0) == code)
2252 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2253 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2255 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2256 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2259 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2260 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2261 if (tem != 0)
2262 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2264 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2265 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2266 if (tem != 0)
2267 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2270 return 0;
2273 /* Return a mask describing the COMPARISON. */
2274 static int
2275 comparison_to_mask (enum rtx_code comparison)
2277 switch (comparison)
2279 case LT:
2280 return 8;
2281 case GT:
2282 return 4;
2283 case EQ:
2284 return 2;
2285 case UNORDERED:
2286 return 1;
2288 case LTGT:
2289 return 12;
2290 case LE:
2291 return 10;
2292 case GE:
2293 return 6;
2294 case UNLT:
2295 return 9;
2296 case UNGT:
2297 return 5;
2298 case UNEQ:
2299 return 3;
2301 case ORDERED:
2302 return 14;
2303 case NE:
2304 return 13;
2305 case UNLE:
2306 return 11;
2307 case UNGE:
2308 return 7;
2310 default:
2311 gcc_unreachable ();
2315 /* Return a comparison corresponding to the MASK. */
2316 static enum rtx_code
2317 mask_to_comparison (int mask)
2319 switch (mask)
2321 case 8:
2322 return LT;
2323 case 4:
2324 return GT;
2325 case 2:
2326 return EQ;
2327 case 1:
2328 return UNORDERED;
2330 case 12:
2331 return LTGT;
2332 case 10:
2333 return LE;
2334 case 6:
2335 return GE;
2336 case 9:
2337 return UNLT;
2338 case 5:
2339 return UNGT;
2340 case 3:
2341 return UNEQ;
2343 case 14:
2344 return ORDERED;
2345 case 13:
2346 return NE;
2347 case 11:
2348 return UNLE;
2349 case 7:
2350 return UNGE;
2352 default:
2353 gcc_unreachable ();
2357 /* Return true if CODE is valid for comparisons of mode MODE, false
2358 otherwise.
2360 It is always safe to return false, even if the code was valid for the
2361 given mode as that will merely suppress optimizations. */
2363 static bool
2364 comparison_code_valid_for_mode (enum rtx_code code, enum machine_mode mode)
2366 switch (code)
2368 /* These are valid for integral, floating and vector modes. */
2369 case NE:
2370 case EQ:
2371 case GE:
2372 case GT:
2373 case LE:
2374 case LT:
2375 return (INTEGRAL_MODE_P (mode)
2376 || FLOAT_MODE_P (mode)
2377 || VECTOR_MODE_P (mode));
2379 /* These are valid for floating point modes. */
2380 case LTGT:
2381 case UNORDERED:
2382 case ORDERED:
2383 case UNEQ:
2384 case UNGE:
2385 case UNGT:
2386 case UNLE:
2387 case UNLT:
2388 return FLOAT_MODE_P (mode);
2390 /* These are filtered out in simplify_logical_operation, but
2391 we check for them too as a matter of safety. They are valid
2392 for integral and vector modes. */
2393 case GEU:
2394 case GTU:
2395 case LEU:
2396 case LTU:
2397 return INTEGRAL_MODE_P (mode) || VECTOR_MODE_P (mode);
2399 default:
2400 gcc_unreachable ();
2404 /* Canonicalize RES, a scalar const0_rtx/const_true_rtx to the right
2405 false/true value of comparison with MODE where comparison operands
2406 have CMP_MODE. */
2408 static rtx
2409 relational_result (machine_mode mode, machine_mode cmp_mode, rtx res)
2411 if (SCALAR_FLOAT_MODE_P (mode))
2413 if (res == const0_rtx)
2414 return CONST0_RTX (mode);
2415 #ifdef FLOAT_STORE_FLAG_VALUE
2416 REAL_VALUE_TYPE val = FLOAT_STORE_FLAG_VALUE (mode);
2417 return const_double_from_real_value (val, mode);
2418 #else
2419 return NULL_RTX;
2420 #endif
2422 if (VECTOR_MODE_P (mode))
2424 if (res == const0_rtx)
2425 return CONST0_RTX (mode);
2426 #ifdef VECTOR_STORE_FLAG_VALUE
2427 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2428 if (val == NULL_RTX)
2429 return NULL_RTX;
2430 if (val == const1_rtx)
2431 return CONST1_RTX (mode);
2433 return gen_const_vec_duplicate (mode, val);
2434 #else
2435 return NULL_RTX;
2436 #endif
2438 /* For vector comparison with scalar int result, it is unknown
2439 if the target means here a comparison into an integral bitmask,
2440 or comparison where all comparisons true mean const_true_rtx
2441 whole result, or where any comparisons true mean const_true_rtx
2442 whole result. For const0_rtx all the cases are the same. */
2443 if (VECTOR_MODE_P (cmp_mode)
2444 && SCALAR_INT_MODE_P (mode)
2445 && res == const_true_rtx)
2446 return NULL_RTX;
2448 return res;
2451 /* Simplify a logical operation CODE with result mode MODE, operating on OP0
2452 and OP1, which should be both relational operations. Return 0 if no such
2453 simplification is possible. */
2455 simplify_context::simplify_logical_relational_operation (rtx_code code,
2456 machine_mode mode,
2457 rtx op0, rtx op1)
2459 /* We only handle IOR of two relational operations. */
2460 if (code != IOR)
2461 return 0;
2463 if (!(COMPARISON_P (op0) && COMPARISON_P (op1)))
2464 return 0;
2466 if (!(rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2467 && rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1))))
2468 return 0;
2470 enum rtx_code code0 = GET_CODE (op0);
2471 enum rtx_code code1 = GET_CODE (op1);
2473 /* We don't handle unsigned comparisons currently. */
2474 if (code0 == LTU || code0 == GTU || code0 == LEU || code0 == GEU)
2475 return 0;
2476 if (code1 == LTU || code1 == GTU || code1 == LEU || code1 == GEU)
2477 return 0;
2479 int mask0 = comparison_to_mask (code0);
2480 int mask1 = comparison_to_mask (code1);
2482 int mask = mask0 | mask1;
2484 if (mask == 15)
2485 return relational_result (mode, GET_MODE (op0), const_true_rtx);
2487 code = mask_to_comparison (mask);
2489 /* Many comparison codes are only valid for certain mode classes. */
2490 if (!comparison_code_valid_for_mode (code, mode))
2491 return 0;
2493 op0 = XEXP (op1, 0);
2494 op1 = XEXP (op1, 1);
2496 return simplify_gen_relational (code, mode, VOIDmode, op0, op1);
2499 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2500 and OP1. Return 0 if no simplification is possible.
2502 Don't use this for relational operations such as EQ or LT.
2503 Use simplify_relational_operation instead. */
2505 simplify_context::simplify_binary_operation (rtx_code code, machine_mode mode,
2506 rtx op0, rtx op1)
2508 rtx trueop0, trueop1;
2509 rtx tem;
2511 /* Relational operations don't work here. We must know the mode
2512 of the operands in order to do the comparison correctly.
2513 Assuming a full word can give incorrect results.
2514 Consider comparing 128 with -128 in QImode. */
2515 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2516 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2518 /* Make sure the constant is second. */
2519 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2520 && swap_commutative_operands_p (op0, op1))
2521 std::swap (op0, op1);
2523 trueop0 = avoid_constant_pool_reference (op0);
2524 trueop1 = avoid_constant_pool_reference (op1);
2526 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2527 if (tem)
2528 return tem;
2529 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2531 if (tem)
2532 return tem;
2534 /* If the above steps did not result in a simplification and op0 or op1
2535 were constant pool references, use the referenced constants directly. */
2536 if (trueop0 != op0 || trueop1 != op1)
2537 return simplify_gen_binary (code, mode, trueop0, trueop1);
2539 return NULL_RTX;
2542 /* Subroutine of simplify_binary_operation_1 that looks for cases in
2543 which OP0 and OP1 are both vector series or vector duplicates
2544 (which are really just series with a step of 0). If so, try to
2545 form a new series by applying CODE to the bases and to the steps.
2546 Return null if no simplification is possible.
2548 MODE is the mode of the operation and is known to be a vector
2549 integer mode. */
2552 simplify_context::simplify_binary_operation_series (rtx_code code,
2553 machine_mode mode,
2554 rtx op0, rtx op1)
2556 rtx base0, step0;
2557 if (vec_duplicate_p (op0, &base0))
2558 step0 = const0_rtx;
2559 else if (!vec_series_p (op0, &base0, &step0))
2560 return NULL_RTX;
2562 rtx base1, step1;
2563 if (vec_duplicate_p (op1, &base1))
2564 step1 = const0_rtx;
2565 else if (!vec_series_p (op1, &base1, &step1))
2566 return NULL_RTX;
2568 /* Only create a new series if we can simplify both parts. In other
2569 cases this isn't really a simplification, and it's not necessarily
2570 a win to replace a vector operation with a scalar operation. */
2571 scalar_mode inner_mode = GET_MODE_INNER (mode);
2572 rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2573 if (!new_base)
2574 return NULL_RTX;
2576 rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2577 if (!new_step)
2578 return NULL_RTX;
2580 return gen_vec_series (mode, new_base, new_step);
2583 /* Subroutine of simplify_binary_operation_1. Un-distribute a binary
2584 operation CODE with result mode MODE, operating on OP0 and OP1.
2585 e.g. simplify (xor (and A C) (and (B C)) to (and (xor (A B) C).
2586 Returns NULL_RTX if no simplification is possible. */
2589 simplify_context::simplify_distributive_operation (rtx_code code,
2590 machine_mode mode,
2591 rtx op0, rtx op1)
2593 enum rtx_code op = GET_CODE (op0);
2594 gcc_assert (GET_CODE (op1) == op);
2596 if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1))
2597 && ! side_effects_p (XEXP (op0, 1)))
2598 return simplify_gen_binary (op, mode,
2599 simplify_gen_binary (code, mode,
2600 XEXP (op0, 0),
2601 XEXP (op1, 0)),
2602 XEXP (op0, 1));
2604 if (GET_RTX_CLASS (op) == RTX_COMM_ARITH)
2606 if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2607 && ! side_effects_p (XEXP (op0, 0)))
2608 return simplify_gen_binary (op, mode,
2609 simplify_gen_binary (code, mode,
2610 XEXP (op0, 1),
2611 XEXP (op1, 1)),
2612 XEXP (op0, 0));
2613 if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 1))
2614 && ! side_effects_p (XEXP (op0, 0)))
2615 return simplify_gen_binary (op, mode,
2616 simplify_gen_binary (code, mode,
2617 XEXP (op0, 1),
2618 XEXP (op1, 0)),
2619 XEXP (op0, 0));
2620 if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 0))
2621 && ! side_effects_p (XEXP (op0, 1)))
2622 return simplify_gen_binary (op, mode,
2623 simplify_gen_binary (code, mode,
2624 XEXP (op0, 0),
2625 XEXP (op1, 1)),
2626 XEXP (op0, 1));
2629 return NULL_RTX;
2632 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2633 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2634 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2635 actual constants. */
2638 simplify_context::simplify_binary_operation_1 (rtx_code code,
2639 machine_mode mode,
2640 rtx op0, rtx op1,
2641 rtx trueop0, rtx trueop1)
2643 rtx tem, reversed, opleft, opright, elt0, elt1;
2644 HOST_WIDE_INT val;
2645 scalar_int_mode int_mode, inner_mode;
2646 poly_int64 offset;
2648 /* Even if we can't compute a constant result,
2649 there are some cases worth simplifying. */
2651 switch (code)
2653 case PLUS:
2654 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2655 when x is NaN, infinite, or finite and nonzero. They aren't
2656 when x is -0 and the rounding mode is not towards -infinity,
2657 since (-0) + 0 is then 0. */
2658 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2659 return op0;
2661 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2662 transformations are safe even for IEEE. */
2663 if (GET_CODE (op0) == NEG)
2664 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2665 else if (GET_CODE (op1) == NEG)
2666 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2668 /* (~a) + 1 -> -a */
2669 if (INTEGRAL_MODE_P (mode)
2670 && GET_CODE (op0) == NOT
2671 && trueop1 == const1_rtx)
2672 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2674 /* Handle both-operands-constant cases. We can only add
2675 CONST_INTs to constants since the sum of relocatable symbols
2676 can't be handled by most assemblers. Don't add CONST_INT
2677 to CONST_INT since overflow won't be computed properly if wider
2678 than HOST_BITS_PER_WIDE_INT. */
2680 if ((GET_CODE (op0) == CONST
2681 || GET_CODE (op0) == SYMBOL_REF
2682 || GET_CODE (op0) == LABEL_REF)
2683 && poly_int_rtx_p (op1, &offset))
2684 return plus_constant (mode, op0, offset);
2685 else if ((GET_CODE (op1) == CONST
2686 || GET_CODE (op1) == SYMBOL_REF
2687 || GET_CODE (op1) == LABEL_REF)
2688 && poly_int_rtx_p (op0, &offset))
2689 return plus_constant (mode, op1, offset);
2691 /* See if this is something like X * C - X or vice versa or
2692 if the multiplication is written as a shift. If so, we can
2693 distribute and make a new multiply, shift, or maybe just
2694 have X (if C is 2 in the example above). But don't make
2695 something more expensive than we had before. */
2697 if (is_a <scalar_int_mode> (mode, &int_mode))
2699 rtx lhs = op0, rhs = op1;
2701 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2702 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2704 if (GET_CODE (lhs) == NEG)
2706 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2707 lhs = XEXP (lhs, 0);
2709 else if (GET_CODE (lhs) == MULT
2710 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2712 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2713 lhs = XEXP (lhs, 0);
2715 else if (GET_CODE (lhs) == ASHIFT
2716 && CONST_INT_P (XEXP (lhs, 1))
2717 && INTVAL (XEXP (lhs, 1)) >= 0
2718 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2720 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2721 GET_MODE_PRECISION (int_mode));
2722 lhs = XEXP (lhs, 0);
2725 if (GET_CODE (rhs) == NEG)
2727 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2728 rhs = XEXP (rhs, 0);
2730 else if (GET_CODE (rhs) == MULT
2731 && CONST_INT_P (XEXP (rhs, 1)))
2733 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2734 rhs = XEXP (rhs, 0);
2736 else if (GET_CODE (rhs) == ASHIFT
2737 && CONST_INT_P (XEXP (rhs, 1))
2738 && INTVAL (XEXP (rhs, 1)) >= 0
2739 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2741 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2742 GET_MODE_PRECISION (int_mode));
2743 rhs = XEXP (rhs, 0);
2746 if (rtx_equal_p (lhs, rhs))
2748 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2749 rtx coeff;
2750 bool speed = optimize_function_for_speed_p (cfun);
2752 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2754 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2755 return (set_src_cost (tem, int_mode, speed)
2756 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2759 /* Optimize (X - 1) * Y + Y to X * Y. */
2760 lhs = op0;
2761 rhs = op1;
2762 if (GET_CODE (op0) == MULT)
2764 if (((GET_CODE (XEXP (op0, 0)) == PLUS
2765 && XEXP (XEXP (op0, 0), 1) == constm1_rtx)
2766 || (GET_CODE (XEXP (op0, 0)) == MINUS
2767 && XEXP (XEXP (op0, 0), 1) == const1_rtx))
2768 && rtx_equal_p (XEXP (op0, 1), op1))
2769 lhs = XEXP (XEXP (op0, 0), 0);
2770 else if (((GET_CODE (XEXP (op0, 1)) == PLUS
2771 && XEXP (XEXP (op0, 1), 1) == constm1_rtx)
2772 || (GET_CODE (XEXP (op0, 1)) == MINUS
2773 && XEXP (XEXP (op0, 1), 1) == const1_rtx))
2774 && rtx_equal_p (XEXP (op0, 0), op1))
2775 lhs = XEXP (XEXP (op0, 1), 0);
2777 else if (GET_CODE (op1) == MULT)
2779 if (((GET_CODE (XEXP (op1, 0)) == PLUS
2780 && XEXP (XEXP (op1, 0), 1) == constm1_rtx)
2781 || (GET_CODE (XEXP (op1, 0)) == MINUS
2782 && XEXP (XEXP (op1, 0), 1) == const1_rtx))
2783 && rtx_equal_p (XEXP (op1, 1), op0))
2784 rhs = XEXP (XEXP (op1, 0), 0);
2785 else if (((GET_CODE (XEXP (op1, 1)) == PLUS
2786 && XEXP (XEXP (op1, 1), 1) == constm1_rtx)
2787 || (GET_CODE (XEXP (op1, 1)) == MINUS
2788 && XEXP (XEXP (op1, 1), 1) == const1_rtx))
2789 && rtx_equal_p (XEXP (op1, 0), op0))
2790 rhs = XEXP (XEXP (op1, 1), 0);
2792 if (lhs != op0 || rhs != op1)
2793 return simplify_gen_binary (MULT, int_mode, lhs, rhs);
2796 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2797 if (CONST_SCALAR_INT_P (op1)
2798 && GET_CODE (op0) == XOR
2799 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2800 && mode_signbit_p (mode, op1))
2801 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2802 simplify_gen_binary (XOR, mode, op1,
2803 XEXP (op0, 1)));
2805 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2806 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2807 && GET_CODE (op0) == MULT
2808 && GET_CODE (XEXP (op0, 0)) == NEG)
2810 rtx in1, in2;
2812 in1 = XEXP (XEXP (op0, 0), 0);
2813 in2 = XEXP (op0, 1);
2814 return simplify_gen_binary (MINUS, mode, op1,
2815 simplify_gen_binary (MULT, mode,
2816 in1, in2));
2819 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2820 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2821 is 1. */
2822 if (COMPARISON_P (op0)
2823 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2824 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2825 && (reversed = reversed_comparison (op0, mode)))
2826 return
2827 simplify_gen_unary (NEG, mode, reversed, mode);
2829 /* If one of the operands is a PLUS or a MINUS, see if we can
2830 simplify this by the associative law.
2831 Don't use the associative law for floating point.
2832 The inaccuracy makes it nonassociative,
2833 and subtle programs can break if operations are associated. */
2835 if (INTEGRAL_MODE_P (mode)
2836 && (plus_minus_operand_p (op0)
2837 || plus_minus_operand_p (op1))
2838 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2839 return tem;
2841 /* Reassociate floating point addition only when the user
2842 specifies associative math operations. */
2843 if (FLOAT_MODE_P (mode)
2844 && flag_associative_math)
2846 tem = simplify_associative_operation (code, mode, op0, op1);
2847 if (tem)
2848 return tem;
2851 /* Handle vector series. */
2852 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2854 tem = simplify_binary_operation_series (code, mode, op0, op1);
2855 if (tem)
2856 return tem;
2858 break;
2860 case COMPARE:
2861 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2862 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2863 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2864 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2866 rtx xop00 = XEXP (op0, 0);
2867 rtx xop10 = XEXP (op1, 0);
2869 if (REG_P (xop00) && REG_P (xop10)
2870 && REGNO (xop00) == REGNO (xop10)
2871 && GET_MODE (xop00) == mode
2872 && GET_MODE (xop10) == mode
2873 && GET_MODE_CLASS (mode) == MODE_CC)
2874 return xop00;
2876 break;
2878 case MINUS:
2879 /* We can't assume x-x is 0 even with non-IEEE floating point,
2880 but since it is zero except in very strange circumstances, we
2881 will treat it as zero with -ffinite-math-only. */
2882 if (rtx_equal_p (trueop0, trueop1)
2883 && ! side_effects_p (op0)
2884 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2885 return CONST0_RTX (mode);
2887 /* Change subtraction from zero into negation. (0 - x) is the
2888 same as -x when x is NaN, infinite, or finite and nonzero.
2889 But if the mode has signed zeros, and does not round towards
2890 -infinity, then 0 - 0 is 0, not -0. */
2891 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2892 return simplify_gen_unary (NEG, mode, op1, mode);
2894 /* (-1 - a) is ~a, unless the expression contains symbolic
2895 constants, in which case not retaining additions and
2896 subtractions could cause invalid assembly to be produced. */
2897 if (trueop0 == constm1_rtx
2898 && !contains_symbolic_reference_p (op1))
2899 return simplify_gen_unary (NOT, mode, op1, mode);
2901 /* Subtracting 0 has no effect unless the mode has signalling NaNs,
2902 or has signed zeros and supports rounding towards -infinity.
2903 In such a case, 0 - 0 is -0. */
2904 if (!(HONOR_SIGNED_ZEROS (mode)
2905 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2906 && !HONOR_SNANS (mode)
2907 && trueop1 == CONST0_RTX (mode))
2908 return op0;
2910 /* See if this is something like X * C - X or vice versa or
2911 if the multiplication is written as a shift. If so, we can
2912 distribute and make a new multiply, shift, or maybe just
2913 have X (if C is 2 in the example above). But don't make
2914 something more expensive than we had before. */
2916 if (is_a <scalar_int_mode> (mode, &int_mode))
2918 rtx lhs = op0, rhs = op1;
2920 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2921 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2923 if (GET_CODE (lhs) == NEG)
2925 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2926 lhs = XEXP (lhs, 0);
2928 else if (GET_CODE (lhs) == MULT
2929 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2931 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2932 lhs = XEXP (lhs, 0);
2934 else if (GET_CODE (lhs) == ASHIFT
2935 && CONST_INT_P (XEXP (lhs, 1))
2936 && INTVAL (XEXP (lhs, 1)) >= 0
2937 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2939 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2940 GET_MODE_PRECISION (int_mode));
2941 lhs = XEXP (lhs, 0);
2944 if (GET_CODE (rhs) == NEG)
2946 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2947 rhs = XEXP (rhs, 0);
2949 else if (GET_CODE (rhs) == MULT
2950 && CONST_INT_P (XEXP (rhs, 1)))
2952 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2953 rhs = XEXP (rhs, 0);
2955 else if (GET_CODE (rhs) == ASHIFT
2956 && CONST_INT_P (XEXP (rhs, 1))
2957 && INTVAL (XEXP (rhs, 1)) >= 0
2958 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2960 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2961 GET_MODE_PRECISION (int_mode));
2962 negcoeff1 = -negcoeff1;
2963 rhs = XEXP (rhs, 0);
2966 if (rtx_equal_p (lhs, rhs))
2968 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2969 rtx coeff;
2970 bool speed = optimize_function_for_speed_p (cfun);
2972 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2974 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2975 return (set_src_cost (tem, int_mode, speed)
2976 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2979 /* Optimize (X + 1) * Y - Y to X * Y. */
2980 lhs = op0;
2981 if (GET_CODE (op0) == MULT)
2983 if (((GET_CODE (XEXP (op0, 0)) == PLUS
2984 && XEXP (XEXP (op0, 0), 1) == const1_rtx)
2985 || (GET_CODE (XEXP (op0, 0)) == MINUS
2986 && XEXP (XEXP (op0, 0), 1) == constm1_rtx))
2987 && rtx_equal_p (XEXP (op0, 1), op1))
2988 lhs = XEXP (XEXP (op0, 0), 0);
2989 else if (((GET_CODE (XEXP (op0, 1)) == PLUS
2990 && XEXP (XEXP (op0, 1), 1) == const1_rtx)
2991 || (GET_CODE (XEXP (op0, 1)) == MINUS
2992 && XEXP (XEXP (op0, 1), 1) == constm1_rtx))
2993 && rtx_equal_p (XEXP (op0, 0), op1))
2994 lhs = XEXP (XEXP (op0, 1), 0);
2996 if (lhs != op0)
2997 return simplify_gen_binary (MULT, int_mode, lhs, op1);
3000 /* (a - (-b)) -> (a + b). True even for IEEE. */
3001 if (GET_CODE (op1) == NEG)
3002 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
3004 /* (-x - c) may be simplified as (-c - x). */
3005 if (GET_CODE (op0) == NEG
3006 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
3008 tem = simplify_unary_operation (NEG, mode, op1, mode);
3009 if (tem)
3010 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
3013 if ((GET_CODE (op0) == CONST
3014 || GET_CODE (op0) == SYMBOL_REF
3015 || GET_CODE (op0) == LABEL_REF)
3016 && poly_int_rtx_p (op1, &offset))
3017 return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
3019 /* Don't let a relocatable value get a negative coeff. */
3020 if (poly_int_rtx_p (op1) && GET_MODE (op0) != VOIDmode)
3021 return simplify_gen_binary (PLUS, mode,
3022 op0,
3023 neg_poly_int_rtx (mode, op1));
3025 /* (x - (x & y)) -> (x & ~y) */
3026 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
3028 if (rtx_equal_p (op0, XEXP (op1, 0)))
3030 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
3031 GET_MODE (XEXP (op1, 1)));
3032 return simplify_gen_binary (AND, mode, op0, tem);
3034 if (rtx_equal_p (op0, XEXP (op1, 1)))
3036 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
3037 GET_MODE (XEXP (op1, 0)));
3038 return simplify_gen_binary (AND, mode, op0, tem);
3042 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
3043 by reversing the comparison code if valid. */
3044 if (STORE_FLAG_VALUE == 1
3045 && trueop0 == const1_rtx
3046 && COMPARISON_P (op1)
3047 && (reversed = reversed_comparison (op1, mode)))
3048 return reversed;
3050 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
3051 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
3052 && GET_CODE (op1) == MULT
3053 && GET_CODE (XEXP (op1, 0)) == NEG)
3055 rtx in1, in2;
3057 in1 = XEXP (XEXP (op1, 0), 0);
3058 in2 = XEXP (op1, 1);
3059 return simplify_gen_binary (PLUS, mode,
3060 simplify_gen_binary (MULT, mode,
3061 in1, in2),
3062 op0);
3065 /* Canonicalize (minus (neg A) (mult B C)) to
3066 (minus (mult (neg B) C) A). */
3067 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
3068 && GET_CODE (op1) == MULT
3069 && GET_CODE (op0) == NEG)
3071 rtx in1, in2;
3073 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
3074 in2 = XEXP (op1, 1);
3075 return simplify_gen_binary (MINUS, mode,
3076 simplify_gen_binary (MULT, mode,
3077 in1, in2),
3078 XEXP (op0, 0));
3081 /* If one of the operands is a PLUS or a MINUS, see if we can
3082 simplify this by the associative law. This will, for example,
3083 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
3084 Don't use the associative law for floating point.
3085 The inaccuracy makes it nonassociative,
3086 and subtle programs can break if operations are associated. */
3088 if (INTEGRAL_MODE_P (mode)
3089 && (plus_minus_operand_p (op0)
3090 || plus_minus_operand_p (op1))
3091 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3092 return tem;
3094 /* Handle vector series. */
3095 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
3097 tem = simplify_binary_operation_series (code, mode, op0, op1);
3098 if (tem)
3099 return tem;
3101 break;
3103 case MULT:
3104 if (trueop1 == constm1_rtx)
3105 return simplify_gen_unary (NEG, mode, op0, mode);
3107 if (GET_CODE (op0) == NEG)
3109 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
3110 /* If op1 is a MULT as well and simplify_unary_operation
3111 just moved the NEG to the second operand, simplify_gen_binary
3112 below could through simplify_associative_operation move
3113 the NEG around again and recurse endlessly. */
3114 if (temp
3115 && GET_CODE (op1) == MULT
3116 && GET_CODE (temp) == MULT
3117 && XEXP (op1, 0) == XEXP (temp, 0)
3118 && GET_CODE (XEXP (temp, 1)) == NEG
3119 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
3120 temp = NULL_RTX;
3121 if (temp)
3122 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
3124 if (GET_CODE (op1) == NEG)
3126 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
3127 /* If op0 is a MULT as well and simplify_unary_operation
3128 just moved the NEG to the second operand, simplify_gen_binary
3129 below could through simplify_associative_operation move
3130 the NEG around again and recurse endlessly. */
3131 if (temp
3132 && GET_CODE (op0) == MULT
3133 && GET_CODE (temp) == MULT
3134 && XEXP (op0, 0) == XEXP (temp, 0)
3135 && GET_CODE (XEXP (temp, 1)) == NEG
3136 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
3137 temp = NULL_RTX;
3138 if (temp)
3139 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
3142 /* Maybe simplify x * 0 to 0. The reduction is not valid if
3143 x is NaN, since x * 0 is then also NaN. Nor is it valid
3144 when the mode has signed zeros, since multiplying a negative
3145 number by 0 will give -0, not 0. */
3146 if (!HONOR_NANS (mode)
3147 && !HONOR_SIGNED_ZEROS (mode)
3148 && trueop1 == CONST0_RTX (mode)
3149 && ! side_effects_p (op0))
3150 return op1;
3152 /* In IEEE floating point, x*1 is not equivalent to x for
3153 signalling NaNs. */
3154 if (!HONOR_SNANS (mode)
3155 && trueop1 == CONST1_RTX (mode))
3156 return op0;
3158 /* Convert multiply by constant power of two into shift. */
3159 if (mem_depth == 0 && CONST_SCALAR_INT_P (trueop1))
3161 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
3162 if (val >= 0)
3163 return simplify_gen_binary (ASHIFT, mode, op0,
3164 gen_int_shift_amount (mode, val));
3167 /* x*2 is x+x and x*(-1) is -x */
3168 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3169 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
3170 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
3171 && GET_MODE (op0) == mode)
3173 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3175 if (real_equal (d1, &dconst2))
3176 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
3178 if (!HONOR_SNANS (mode)
3179 && real_equal (d1, &dconstm1))
3180 return simplify_gen_unary (NEG, mode, op0, mode);
3183 /* Optimize -x * -x as x * x. */
3184 if (FLOAT_MODE_P (mode)
3185 && GET_CODE (op0) == NEG
3186 && GET_CODE (op1) == NEG
3187 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
3188 && !side_effects_p (XEXP (op0, 0)))
3189 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
3191 /* Likewise, optimize abs(x) * abs(x) as x * x. */
3192 if (SCALAR_FLOAT_MODE_P (mode)
3193 && GET_CODE (op0) == ABS
3194 && GET_CODE (op1) == ABS
3195 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
3196 && !side_effects_p (XEXP (op0, 0)))
3197 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
3199 /* Reassociate multiplication, but for floating point MULTs
3200 only when the user specifies unsafe math optimizations. */
3201 if (! FLOAT_MODE_P (mode)
3202 || flag_unsafe_math_optimizations)
3204 tem = simplify_associative_operation (code, mode, op0, op1);
3205 if (tem)
3206 return tem;
3208 break;
3210 case IOR:
3211 if (trueop1 == CONST0_RTX (mode))
3212 return op0;
3213 if (INTEGRAL_MODE_P (mode)
3214 && trueop1 == CONSTM1_RTX (mode)
3215 && !side_effects_p (op0))
3216 return op1;
3217 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3218 return op0;
3219 /* A | (~A) -> -1 */
3220 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3221 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3222 && ! side_effects_p (op0)
3223 && SCALAR_INT_MODE_P (mode))
3224 return constm1_rtx;
3226 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
3227 if (CONST_INT_P (op1)
3228 && HWI_COMPUTABLE_MODE_P (mode)
3229 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
3230 && !side_effects_p (op0))
3231 return op1;
3233 /* Canonicalize (X & C1) | C2. */
3234 if (GET_CODE (op0) == AND
3235 && CONST_INT_P (trueop1)
3236 && CONST_INT_P (XEXP (op0, 1)))
3238 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
3239 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
3240 HOST_WIDE_INT c2 = INTVAL (trueop1);
3242 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
3243 if ((c1 & c2) == c1
3244 && !side_effects_p (XEXP (op0, 0)))
3245 return trueop1;
3247 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
3248 if (((c1|c2) & mask) == mask)
3249 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
3252 /* Convert (A & B) | A to A. */
3253 if (GET_CODE (op0) == AND
3254 && (rtx_equal_p (XEXP (op0, 0), op1)
3255 || rtx_equal_p (XEXP (op0, 1), op1))
3256 && ! side_effects_p (XEXP (op0, 0))
3257 && ! side_effects_p (XEXP (op0, 1)))
3258 return op1;
3260 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
3261 mode size to (rotate A CX). */
3263 if (GET_CODE (op1) == ASHIFT
3264 || GET_CODE (op1) == SUBREG)
3266 opleft = op1;
3267 opright = op0;
3269 else
3271 opright = op1;
3272 opleft = op0;
3275 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
3276 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
3277 && CONST_INT_P (XEXP (opleft, 1))
3278 && CONST_INT_P (XEXP (opright, 1))
3279 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
3280 == GET_MODE_UNIT_PRECISION (mode)))
3281 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
3283 /* Same, but for ashift that has been "simplified" to a wider mode
3284 by simplify_shift_const. */
3286 if (GET_CODE (opleft) == SUBREG
3287 && is_a <scalar_int_mode> (mode, &int_mode)
3288 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
3289 &inner_mode)
3290 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
3291 && GET_CODE (opright) == LSHIFTRT
3292 && GET_CODE (XEXP (opright, 0)) == SUBREG
3293 && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
3294 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
3295 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
3296 SUBREG_REG (XEXP (opright, 0)))
3297 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
3298 && CONST_INT_P (XEXP (opright, 1))
3299 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
3300 + INTVAL (XEXP (opright, 1))
3301 == GET_MODE_PRECISION (int_mode)))
3302 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
3303 XEXP (SUBREG_REG (opleft), 1));
3305 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
3306 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
3307 the PLUS does not affect any of the bits in OP1: then we can do
3308 the IOR as a PLUS and we can associate. This is valid if OP1
3309 can be safely shifted left C bits. */
3310 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
3311 && GET_CODE (XEXP (op0, 0)) == PLUS
3312 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
3313 && CONST_INT_P (XEXP (op0, 1))
3314 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
3316 int count = INTVAL (XEXP (op0, 1));
3317 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
3319 if (mask >> count == INTVAL (trueop1)
3320 && trunc_int_for_mode (mask, mode) == mask
3321 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
3322 return simplify_gen_binary (ASHIFTRT, mode,
3323 plus_constant (mode, XEXP (op0, 0),
3324 mask),
3325 XEXP (op0, 1));
3328 /* The following happens with bitfield merging.
3329 (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
3330 if (GET_CODE (op0) == AND
3331 && GET_CODE (op1) == AND
3332 && CONST_INT_P (XEXP (op0, 1))
3333 && CONST_INT_P (XEXP (op1, 1))
3334 && (INTVAL (XEXP (op0, 1))
3335 == ~INTVAL (XEXP (op1, 1))))
3337 /* The IOR may be on both sides. */
3338 rtx top0 = NULL_RTX, top1 = NULL_RTX;
3339 if (GET_CODE (XEXP (op1, 0)) == IOR)
3340 top0 = op0, top1 = op1;
3341 else if (GET_CODE (XEXP (op0, 0)) == IOR)
3342 top0 = op1, top1 = op0;
3343 if (top0 && top1)
3345 /* X may be on either side of the inner IOR. */
3346 rtx tem = NULL_RTX;
3347 if (rtx_equal_p (XEXP (top0, 0),
3348 XEXP (XEXP (top1, 0), 0)))
3349 tem = XEXP (XEXP (top1, 0), 1);
3350 else if (rtx_equal_p (XEXP (top0, 0),
3351 XEXP (XEXP (top1, 0), 1)))
3352 tem = XEXP (XEXP (top1, 0), 0);
3353 if (tem)
3354 return simplify_gen_binary (IOR, mode, XEXP (top0, 0),
3355 simplify_gen_binary
3356 (AND, mode, tem, XEXP (top1, 1)));
3360 /* Convert (ior (and A C) (and B C)) into (and (ior A B) C). */
3361 if (GET_CODE (op0) == GET_CODE (op1)
3362 && (GET_CODE (op0) == AND
3363 || GET_CODE (op0) == IOR
3364 || GET_CODE (op0) == LSHIFTRT
3365 || GET_CODE (op0) == ASHIFTRT
3366 || GET_CODE (op0) == ASHIFT
3367 || GET_CODE (op0) == ROTATE
3368 || GET_CODE (op0) == ROTATERT))
3370 tem = simplify_distributive_operation (code, mode, op0, op1);
3371 if (tem)
3372 return tem;
3375 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3376 if (tem)
3377 return tem;
3379 tem = simplify_associative_operation (code, mode, op0, op1);
3380 if (tem)
3381 return tem;
3383 tem = simplify_logical_relational_operation (code, mode, op0, op1);
3384 if (tem)
3385 return tem;
3386 break;
3388 case XOR:
3389 if (trueop1 == CONST0_RTX (mode))
3390 return op0;
3391 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3392 return simplify_gen_unary (NOT, mode, op0, mode);
3393 if (rtx_equal_p (trueop0, trueop1)
3394 && ! side_effects_p (op0)
3395 && GET_MODE_CLASS (mode) != MODE_CC)
3396 return CONST0_RTX (mode);
3398 /* Canonicalize XOR of the most significant bit to PLUS. */
3399 if (CONST_SCALAR_INT_P (op1)
3400 && mode_signbit_p (mode, op1))
3401 return simplify_gen_binary (PLUS, mode, op0, op1);
3402 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
3403 if (CONST_SCALAR_INT_P (op1)
3404 && GET_CODE (op0) == PLUS
3405 && CONST_SCALAR_INT_P (XEXP (op0, 1))
3406 && mode_signbit_p (mode, XEXP (op0, 1)))
3407 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
3408 simplify_gen_binary (XOR, mode, op1,
3409 XEXP (op0, 1)));
3411 /* If we are XORing two things that have no bits in common,
3412 convert them into an IOR. This helps to detect rotation encoded
3413 using those methods and possibly other simplifications. */
3415 if (HWI_COMPUTABLE_MODE_P (mode)
3416 && (nonzero_bits (op0, mode)
3417 & nonzero_bits (op1, mode)) == 0)
3418 return (simplify_gen_binary (IOR, mode, op0, op1));
3420 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
3421 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
3422 (NOT y). */
3424 int num_negated = 0;
3426 if (GET_CODE (op0) == NOT)
3427 num_negated++, op0 = XEXP (op0, 0);
3428 if (GET_CODE (op1) == NOT)
3429 num_negated++, op1 = XEXP (op1, 0);
3431 if (num_negated == 2)
3432 return simplify_gen_binary (XOR, mode, op0, op1);
3433 else if (num_negated == 1)
3434 return simplify_gen_unary (NOT, mode,
3435 simplify_gen_binary (XOR, mode, op0, op1),
3436 mode);
3439 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
3440 correspond to a machine insn or result in further simplifications
3441 if B is a constant. */
3443 if (GET_CODE (op0) == AND
3444 && rtx_equal_p (XEXP (op0, 1), op1)
3445 && ! side_effects_p (op1))
3446 return simplify_gen_binary (AND, mode,
3447 simplify_gen_unary (NOT, mode,
3448 XEXP (op0, 0), mode),
3449 op1);
3451 else if (GET_CODE (op0) == AND
3452 && rtx_equal_p (XEXP (op0, 0), op1)
3453 && ! side_effects_p (op1))
3454 return simplify_gen_binary (AND, mode,
3455 simplify_gen_unary (NOT, mode,
3456 XEXP (op0, 1), mode),
3457 op1);
3459 /* Given (xor (ior (xor A B) C) D), where B, C and D are
3460 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
3461 out bits inverted twice and not set by C. Similarly, given
3462 (xor (and (xor A B) C) D), simplify without inverting C in
3463 the xor operand: (xor (and A C) (B&C)^D).
3465 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
3466 && GET_CODE (XEXP (op0, 0)) == XOR
3467 && CONST_INT_P (op1)
3468 && CONST_INT_P (XEXP (op0, 1))
3469 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
3471 enum rtx_code op = GET_CODE (op0);
3472 rtx a = XEXP (XEXP (op0, 0), 0);
3473 rtx b = XEXP (XEXP (op0, 0), 1);
3474 rtx c = XEXP (op0, 1);
3475 rtx d = op1;
3476 HOST_WIDE_INT bval = INTVAL (b);
3477 HOST_WIDE_INT cval = INTVAL (c);
3478 HOST_WIDE_INT dval = INTVAL (d);
3479 HOST_WIDE_INT xcval;
3481 if (op == IOR)
3482 xcval = ~cval;
3483 else
3484 xcval = cval;
3486 return simplify_gen_binary (XOR, mode,
3487 simplify_gen_binary (op, mode, a, c),
3488 gen_int_mode ((bval & xcval) ^ dval,
3489 mode));
3492 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
3493 we can transform like this:
3494 (A&B)^C == ~(A&B)&C | ~C&(A&B)
3495 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
3496 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
3497 Attempt a few simplifications when B and C are both constants. */
3498 if (GET_CODE (op0) == AND
3499 && CONST_INT_P (op1)
3500 && CONST_INT_P (XEXP (op0, 1)))
3502 rtx a = XEXP (op0, 0);
3503 rtx b = XEXP (op0, 1);
3504 rtx c = op1;
3505 HOST_WIDE_INT bval = INTVAL (b);
3506 HOST_WIDE_INT cval = INTVAL (c);
3508 /* Instead of computing ~A&C, we compute its negated value,
3509 ~(A|~C). If it yields -1, ~A&C is zero, so we can
3510 optimize for sure. If it does not simplify, we still try
3511 to compute ~A&C below, but since that always allocates
3512 RTL, we don't try that before committing to returning a
3513 simplified expression. */
3514 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
3515 GEN_INT (~cval));
3517 if ((~cval & bval) == 0)
3519 rtx na_c = NULL_RTX;
3520 if (n_na_c)
3521 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
3522 else
3524 /* If ~A does not simplify, don't bother: we don't
3525 want to simplify 2 operations into 3, and if na_c
3526 were to simplify with na, n_na_c would have
3527 simplified as well. */
3528 rtx na = simplify_unary_operation (NOT, mode, a, mode);
3529 if (na)
3530 na_c = simplify_gen_binary (AND, mode, na, c);
3533 /* Try to simplify ~A&C | ~B&C. */
3534 if (na_c != NULL_RTX)
3535 return simplify_gen_binary (IOR, mode, na_c,
3536 gen_int_mode (~bval & cval, mode));
3538 else
3540 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3541 if (n_na_c == CONSTM1_RTX (mode))
3543 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3544 gen_int_mode (~cval & bval,
3545 mode));
3546 return simplify_gen_binary (IOR, mode, a_nc_b,
3547 gen_int_mode (~bval & cval,
3548 mode));
3553 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3554 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3555 machines, and also has shorter instruction path length. */
3556 if (GET_CODE (op0) == AND
3557 && GET_CODE (XEXP (op0, 0)) == XOR
3558 && CONST_INT_P (XEXP (op0, 1))
3559 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3561 rtx a = trueop1;
3562 rtx b = XEXP (XEXP (op0, 0), 1);
3563 rtx c = XEXP (op0, 1);
3564 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3565 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3566 rtx bc = simplify_gen_binary (AND, mode, b, c);
3567 return simplify_gen_binary (IOR, mode, a_nc, bc);
3569 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3570 else if (GET_CODE (op0) == AND
3571 && GET_CODE (XEXP (op0, 0)) == XOR
3572 && CONST_INT_P (XEXP (op0, 1))
3573 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3575 rtx a = XEXP (XEXP (op0, 0), 0);
3576 rtx b = trueop1;
3577 rtx c = XEXP (op0, 1);
3578 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3579 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3580 rtx ac = simplify_gen_binary (AND, mode, a, c);
3581 return simplify_gen_binary (IOR, mode, ac, b_nc);
3584 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3585 comparison if STORE_FLAG_VALUE is 1. */
3586 if (STORE_FLAG_VALUE == 1
3587 && trueop1 == const1_rtx
3588 && COMPARISON_P (op0)
3589 && (reversed = reversed_comparison (op0, mode)))
3590 return reversed;
3592 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3593 is (lt foo (const_int 0)), so we can perform the above
3594 simplification if STORE_FLAG_VALUE is 1. */
3596 if (is_a <scalar_int_mode> (mode, &int_mode)
3597 && STORE_FLAG_VALUE == 1
3598 && trueop1 == const1_rtx
3599 && GET_CODE (op0) == LSHIFTRT
3600 && CONST_INT_P (XEXP (op0, 1))
3601 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3602 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3604 /* (xor (comparison foo bar) (const_int sign-bit))
3605 when STORE_FLAG_VALUE is the sign bit. */
3606 if (is_a <scalar_int_mode> (mode, &int_mode)
3607 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3608 && trueop1 == const_true_rtx
3609 && COMPARISON_P (op0)
3610 && (reversed = reversed_comparison (op0, int_mode)))
3611 return reversed;
3613 /* Convert (xor (and A C) (and B C)) into (and (xor A B) C). */
3614 if (GET_CODE (op0) == GET_CODE (op1)
3615 && (GET_CODE (op0) == AND
3616 || GET_CODE (op0) == LSHIFTRT
3617 || GET_CODE (op0) == ASHIFTRT
3618 || GET_CODE (op0) == ASHIFT
3619 || GET_CODE (op0) == ROTATE
3620 || GET_CODE (op0) == ROTATERT))
3622 tem = simplify_distributive_operation (code, mode, op0, op1);
3623 if (tem)
3624 return tem;
3627 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3628 if (tem)
3629 return tem;
3631 tem = simplify_associative_operation (code, mode, op0, op1);
3632 if (tem)
3633 return tem;
3634 break;
3636 case AND:
3637 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3638 return trueop1;
3639 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3640 return op0;
3641 if (HWI_COMPUTABLE_MODE_P (mode))
3643 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3644 HOST_WIDE_INT nzop1;
3645 if (CONST_INT_P (trueop1))
3647 HOST_WIDE_INT val1 = INTVAL (trueop1);
3648 /* If we are turning off bits already known off in OP0, we need
3649 not do an AND. */
3650 if ((nzop0 & ~val1) == 0)
3651 return op0;
3653 nzop1 = nonzero_bits (trueop1, mode);
3654 /* If we are clearing all the nonzero bits, the result is zero. */
3655 if ((nzop1 & nzop0) == 0
3656 && !side_effects_p (op0) && !side_effects_p (op1))
3657 return CONST0_RTX (mode);
3659 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3660 && GET_MODE_CLASS (mode) != MODE_CC)
3661 return op0;
3662 /* A & (~A) -> 0 */
3663 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3664 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3665 && ! side_effects_p (op0)
3666 && GET_MODE_CLASS (mode) != MODE_CC)
3667 return CONST0_RTX (mode);
3669 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3670 there are no nonzero bits of C outside of X's mode. */
3671 if ((GET_CODE (op0) == SIGN_EXTEND
3672 || GET_CODE (op0) == ZERO_EXTEND)
3673 && CONST_INT_P (trueop1)
3674 && HWI_COMPUTABLE_MODE_P (mode)
3675 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3676 & UINTVAL (trueop1)) == 0)
3678 machine_mode imode = GET_MODE (XEXP (op0, 0));
3679 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3680 gen_int_mode (INTVAL (trueop1),
3681 imode));
3682 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3685 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3686 we might be able to further simplify the AND with X and potentially
3687 remove the truncation altogether. */
3688 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3690 rtx x = XEXP (op0, 0);
3691 machine_mode xmode = GET_MODE (x);
3692 tem = simplify_gen_binary (AND, xmode, x,
3693 gen_int_mode (INTVAL (trueop1), xmode));
3694 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3697 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3698 if (GET_CODE (op0) == IOR
3699 && CONST_INT_P (trueop1)
3700 && CONST_INT_P (XEXP (op0, 1)))
3702 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3703 return simplify_gen_binary (IOR, mode,
3704 simplify_gen_binary (AND, mode,
3705 XEXP (op0, 0), op1),
3706 gen_int_mode (tmp, mode));
3709 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3710 insn (and may simplify more). */
3711 if (GET_CODE (op0) == XOR
3712 && rtx_equal_p (XEXP (op0, 0), op1)
3713 && ! side_effects_p (op1))
3714 return simplify_gen_binary (AND, mode,
3715 simplify_gen_unary (NOT, mode,
3716 XEXP (op0, 1), mode),
3717 op1);
3719 if (GET_CODE (op0) == XOR
3720 && rtx_equal_p (XEXP (op0, 1), op1)
3721 && ! side_effects_p (op1))
3722 return simplify_gen_binary (AND, mode,
3723 simplify_gen_unary (NOT, mode,
3724 XEXP (op0, 0), mode),
3725 op1);
3727 /* Similarly for (~(A ^ B)) & A. */
3728 if (GET_CODE (op0) == NOT
3729 && GET_CODE (XEXP (op0, 0)) == XOR
3730 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3731 && ! side_effects_p (op1))
3732 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3734 if (GET_CODE (op0) == NOT
3735 && GET_CODE (XEXP (op0, 0)) == XOR
3736 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3737 && ! side_effects_p (op1))
3738 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3740 /* Convert (A | B) & A to A. */
3741 if (GET_CODE (op0) == IOR
3742 && (rtx_equal_p (XEXP (op0, 0), op1)
3743 || rtx_equal_p (XEXP (op0, 1), op1))
3744 && ! side_effects_p (XEXP (op0, 0))
3745 && ! side_effects_p (XEXP (op0, 1)))
3746 return op1;
3748 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3749 ((A & N) + B) & M -> (A + B) & M
3750 Similarly if (N & M) == 0,
3751 ((A | N) + B) & M -> (A + B) & M
3752 and for - instead of + and/or ^ instead of |.
3753 Also, if (N & M) == 0, then
3754 (A +- N) & M -> A & M. */
3755 if (CONST_INT_P (trueop1)
3756 && HWI_COMPUTABLE_MODE_P (mode)
3757 && ~UINTVAL (trueop1)
3758 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3759 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3761 rtx pmop[2];
3762 int which;
3764 pmop[0] = XEXP (op0, 0);
3765 pmop[1] = XEXP (op0, 1);
3767 if (CONST_INT_P (pmop[1])
3768 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3769 return simplify_gen_binary (AND, mode, pmop[0], op1);
3771 for (which = 0; which < 2; which++)
3773 tem = pmop[which];
3774 switch (GET_CODE (tem))
3776 case AND:
3777 if (CONST_INT_P (XEXP (tem, 1))
3778 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3779 == UINTVAL (trueop1))
3780 pmop[which] = XEXP (tem, 0);
3781 break;
3782 case IOR:
3783 case XOR:
3784 if (CONST_INT_P (XEXP (tem, 1))
3785 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3786 pmop[which] = XEXP (tem, 0);
3787 break;
3788 default:
3789 break;
3793 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3795 tem = simplify_gen_binary (GET_CODE (op0), mode,
3796 pmop[0], pmop[1]);
3797 return simplify_gen_binary (code, mode, tem, op1);
3801 /* (and X (ior (not X) Y) -> (and X Y) */
3802 if (GET_CODE (op1) == IOR
3803 && GET_CODE (XEXP (op1, 0)) == NOT
3804 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3805 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3807 /* (and (ior (not X) Y) X) -> (and X Y) */
3808 if (GET_CODE (op0) == IOR
3809 && GET_CODE (XEXP (op0, 0)) == NOT
3810 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3811 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3813 /* (and X (ior Y (not X)) -> (and X Y) */
3814 if (GET_CODE (op1) == IOR
3815 && GET_CODE (XEXP (op1, 1)) == NOT
3816 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3817 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3819 /* (and (ior Y (not X)) X) -> (and X Y) */
3820 if (GET_CODE (op0) == IOR
3821 && GET_CODE (XEXP (op0, 1)) == NOT
3822 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3823 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3825 /* Convert (and (ior A C) (ior B C)) into (ior (and A B) C). */
3826 if (GET_CODE (op0) == GET_CODE (op1)
3827 && (GET_CODE (op0) == AND
3828 || GET_CODE (op0) == IOR
3829 || GET_CODE (op0) == LSHIFTRT
3830 || GET_CODE (op0) == ASHIFTRT
3831 || GET_CODE (op0) == ASHIFT
3832 || GET_CODE (op0) == ROTATE
3833 || GET_CODE (op0) == ROTATERT))
3835 tem = simplify_distributive_operation (code, mode, op0, op1);
3836 if (tem)
3837 return tem;
3840 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3841 if (tem)
3842 return tem;
3844 tem = simplify_associative_operation (code, mode, op0, op1);
3845 if (tem)
3846 return tem;
3847 break;
3849 case UDIV:
3850 /* 0/x is 0 (or x&0 if x has side-effects). */
3851 if (trueop0 == CONST0_RTX (mode)
3852 && !cfun->can_throw_non_call_exceptions)
3854 if (side_effects_p (op1))
3855 return simplify_gen_binary (AND, mode, op1, trueop0);
3856 return trueop0;
3858 /* x/1 is x. */
3859 if (trueop1 == CONST1_RTX (mode))
3861 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3862 if (tem)
3863 return tem;
3865 /* Convert divide by power of two into shift. */
3866 if (CONST_INT_P (trueop1)
3867 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3868 return simplify_gen_binary (LSHIFTRT, mode, op0,
3869 gen_int_shift_amount (mode, val));
3870 break;
3872 case DIV:
3873 /* Handle floating point and integers separately. */
3874 if (SCALAR_FLOAT_MODE_P (mode))
3876 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3877 safe for modes with NaNs, since 0.0 / 0.0 will then be
3878 NaN rather than 0.0. Nor is it safe for modes with signed
3879 zeros, since dividing 0 by a negative number gives -0.0 */
3880 if (trueop0 == CONST0_RTX (mode)
3881 && !HONOR_NANS (mode)
3882 && !HONOR_SIGNED_ZEROS (mode)
3883 && ! side_effects_p (op1))
3884 return op0;
3885 /* x/1.0 is x. */
3886 if (trueop1 == CONST1_RTX (mode)
3887 && !HONOR_SNANS (mode))
3888 return op0;
3890 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3891 && trueop1 != CONST0_RTX (mode))
3893 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3895 /* x/-1.0 is -x. */
3896 if (real_equal (d1, &dconstm1)
3897 && !HONOR_SNANS (mode))
3898 return simplify_gen_unary (NEG, mode, op0, mode);
3900 /* Change FP division by a constant into multiplication.
3901 Only do this with -freciprocal-math. */
3902 if (flag_reciprocal_math
3903 && !real_equal (d1, &dconst0))
3905 REAL_VALUE_TYPE d;
3906 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3907 tem = const_double_from_real_value (d, mode);
3908 return simplify_gen_binary (MULT, mode, op0, tem);
3912 else if (SCALAR_INT_MODE_P (mode))
3914 /* 0/x is 0 (or x&0 if x has side-effects). */
3915 if (trueop0 == CONST0_RTX (mode)
3916 && !cfun->can_throw_non_call_exceptions)
3918 if (side_effects_p (op1))
3919 return simplify_gen_binary (AND, mode, op1, trueop0);
3920 return trueop0;
3922 /* x/1 is x. */
3923 if (trueop1 == CONST1_RTX (mode))
3925 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3926 if (tem)
3927 return tem;
3929 /* x/-1 is -x. */
3930 if (trueop1 == constm1_rtx)
3932 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3933 if (x)
3934 return simplify_gen_unary (NEG, mode, x, mode);
3937 break;
3939 case UMOD:
3940 /* 0%x is 0 (or x&0 if x has side-effects). */
3941 if (trueop0 == CONST0_RTX (mode))
3943 if (side_effects_p (op1))
3944 return simplify_gen_binary (AND, mode, op1, trueop0);
3945 return trueop0;
3947 /* x%1 is 0 (of x&0 if x has side-effects). */
3948 if (trueop1 == CONST1_RTX (mode))
3950 if (side_effects_p (op0))
3951 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3952 return CONST0_RTX (mode);
3954 /* Implement modulus by power of two as AND. */
3955 if (CONST_INT_P (trueop1)
3956 && exact_log2 (UINTVAL (trueop1)) > 0)
3957 return simplify_gen_binary (AND, mode, op0,
3958 gen_int_mode (UINTVAL (trueop1) - 1,
3959 mode));
3960 break;
3962 case MOD:
3963 /* 0%x is 0 (or x&0 if x has side-effects). */
3964 if (trueop0 == CONST0_RTX (mode))
3966 if (side_effects_p (op1))
3967 return simplify_gen_binary (AND, mode, op1, trueop0);
3968 return trueop0;
3970 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3971 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3973 if (side_effects_p (op0))
3974 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3975 return CONST0_RTX (mode);
3977 break;
3979 case ROTATERT:
3980 case ROTATE:
3981 if (trueop1 == CONST0_RTX (mode))
3982 return op0;
3983 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3984 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3985 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3986 amount instead. */
3987 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3988 if (CONST_INT_P (trueop1)
3989 && IN_RANGE (INTVAL (trueop1),
3990 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3991 GET_MODE_UNIT_PRECISION (mode) - 1))
3993 int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
3994 rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
3995 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3996 mode, op0, new_amount_rtx);
3998 #endif
3999 /* FALLTHRU */
4000 case ASHIFTRT:
4001 if (trueop1 == CONST0_RTX (mode))
4002 return op0;
4003 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
4004 return op0;
4005 /* Rotating ~0 always results in ~0. */
4006 if (CONST_INT_P (trueop0)
4007 && HWI_COMPUTABLE_MODE_P (mode)
4008 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
4009 && ! side_effects_p (op1))
4010 return op0;
4012 canonicalize_shift:
4013 /* Given:
4014 scalar modes M1, M2
4015 scalar constants c1, c2
4016 size (M2) > size (M1)
4017 c1 == size (M2) - size (M1)
4018 optimize:
4019 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
4020 <low_part>)
4021 (const_int <c2>))
4023 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
4024 <low_part>). */
4025 if ((code == ASHIFTRT || code == LSHIFTRT)
4026 && is_a <scalar_int_mode> (mode, &int_mode)
4027 && SUBREG_P (op0)
4028 && CONST_INT_P (op1)
4029 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
4030 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
4031 &inner_mode)
4032 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
4033 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
4034 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
4035 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
4036 && subreg_lowpart_p (op0))
4038 rtx tmp = gen_int_shift_amount
4039 (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
4041 /* Combine would usually zero out the value when combining two
4042 local shifts and the range becomes larger or equal to the mode.
4043 However since we fold away one of the shifts here combine won't
4044 see it so we should immediately zero the result if it's out of
4045 range. */
4046 if (code == LSHIFTRT
4047 && INTVAL (tmp) >= GET_MODE_BITSIZE (inner_mode))
4048 tmp = const0_rtx;
4049 else
4050 tmp = simplify_gen_binary (code,
4051 inner_mode,
4052 XEXP (SUBREG_REG (op0), 0),
4053 tmp);
4055 return lowpart_subreg (int_mode, tmp, inner_mode);
4058 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
4060 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
4061 if (val != INTVAL (op1))
4062 return simplify_gen_binary (code, mode, op0,
4063 gen_int_shift_amount (mode, val));
4065 break;
4067 case SS_ASHIFT:
4068 if (CONST_INT_P (trueop0)
4069 && HWI_COMPUTABLE_MODE_P (mode)
4070 && (UINTVAL (trueop0) == (GET_MODE_MASK (mode) >> 1)
4071 || mode_signbit_p (mode, trueop0))
4072 && ! side_effects_p (op1))
4073 return op0;
4074 goto simplify_ashift;
4076 case US_ASHIFT:
4077 if (CONST_INT_P (trueop0)
4078 && HWI_COMPUTABLE_MODE_P (mode)
4079 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
4080 && ! side_effects_p (op1))
4081 return op0;
4082 /* FALLTHRU */
4084 case ASHIFT:
4085 simplify_ashift:
4086 if (trueop1 == CONST0_RTX (mode))
4087 return op0;
4088 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
4089 return op0;
4090 if (mem_depth
4091 && code == ASHIFT
4092 && CONST_INT_P (trueop1)
4093 && is_a <scalar_int_mode> (mode, &int_mode)
4094 && IN_RANGE (UINTVAL (trueop1),
4095 1, GET_MODE_PRECISION (int_mode) - 1))
4097 auto c = (wi::one (GET_MODE_PRECISION (int_mode))
4098 << UINTVAL (trueop1));
4099 rtx new_op1 = immed_wide_int_const (c, int_mode);
4100 return simplify_gen_binary (MULT, int_mode, op0, new_op1);
4102 goto canonicalize_shift;
4104 case LSHIFTRT:
4105 if (trueop1 == CONST0_RTX (mode))
4106 return op0;
4107 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
4108 return op0;
4109 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
4110 if (GET_CODE (op0) == CLZ
4111 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
4112 && CONST_INT_P (trueop1)
4113 && STORE_FLAG_VALUE == 1
4114 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
4116 unsigned HOST_WIDE_INT zero_val = 0;
4118 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
4119 && zero_val == GET_MODE_PRECISION (inner_mode)
4120 && INTVAL (trueop1) == exact_log2 (zero_val))
4121 return simplify_gen_relational (EQ, mode, inner_mode,
4122 XEXP (op0, 0), const0_rtx);
4124 goto canonicalize_shift;
4126 case SMIN:
4127 if (HWI_COMPUTABLE_MODE_P (mode)
4128 && mode_signbit_p (mode, trueop1)
4129 && ! side_effects_p (op0))
4130 return op1;
4131 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4132 return op0;
4133 tem = simplify_associative_operation (code, mode, op0, op1);
4134 if (tem)
4135 return tem;
4136 break;
4138 case SMAX:
4139 if (HWI_COMPUTABLE_MODE_P (mode)
4140 && CONST_INT_P (trueop1)
4141 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
4142 && ! side_effects_p (op0))
4143 return op1;
4144 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4145 return op0;
4146 tem = simplify_associative_operation (code, mode, op0, op1);
4147 if (tem)
4148 return tem;
4149 break;
4151 case UMIN:
4152 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
4153 return op1;
4154 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4155 return op0;
4156 tem = simplify_associative_operation (code, mode, op0, op1);
4157 if (tem)
4158 return tem;
4159 break;
4161 case UMAX:
4162 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
4163 return op1;
4164 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4165 return op0;
4166 tem = simplify_associative_operation (code, mode, op0, op1);
4167 if (tem)
4168 return tem;
4169 break;
4171 case SS_PLUS:
4172 case US_PLUS:
4173 case SS_MINUS:
4174 case US_MINUS:
4175 /* Simplify x +/- 0 to x, if possible. */
4176 if (trueop1 == CONST0_RTX (mode))
4177 return op0;
4178 return 0;
4180 case SS_MULT:
4181 case US_MULT:
4182 /* Simplify x * 0 to 0, if possible. */
4183 if (trueop1 == CONST0_RTX (mode)
4184 && !side_effects_p (op0))
4185 return op1;
4187 /* Simplify x * 1 to x, if possible. */
4188 if (trueop1 == CONST1_RTX (mode))
4189 return op0;
4190 return 0;
4192 case SMUL_HIGHPART:
4193 case UMUL_HIGHPART:
4194 /* Simplify x * 0 to 0, if possible. */
4195 if (trueop1 == CONST0_RTX (mode)
4196 && !side_effects_p (op0))
4197 return op1;
4198 return 0;
4200 case SS_DIV:
4201 case US_DIV:
4202 /* Simplify x / 1 to x, if possible. */
4203 if (trueop1 == CONST1_RTX (mode))
4204 return op0;
4205 return 0;
4207 case VEC_SERIES:
4208 if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
4209 return gen_vec_duplicate (mode, op0);
4210 if (valid_for_const_vector_p (mode, op0)
4211 && valid_for_const_vector_p (mode, op1))
4212 return gen_const_vec_series (mode, op0, op1);
4213 return 0;
4215 case VEC_SELECT:
4216 if (!VECTOR_MODE_P (mode))
4218 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
4219 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
4220 gcc_assert (GET_CODE (trueop1) == PARALLEL);
4221 gcc_assert (XVECLEN (trueop1, 0) == 1);
4223 /* We can't reason about selections made at runtime. */
4224 if (!CONST_INT_P (XVECEXP (trueop1, 0, 0)))
4225 return 0;
4227 if (vec_duplicate_p (trueop0, &elt0))
4228 return elt0;
4230 if (GET_CODE (trueop0) == CONST_VECTOR)
4231 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
4232 (trueop1, 0, 0)));
4234 /* Extract a scalar element from a nested VEC_SELECT expression
4235 (with optional nested VEC_CONCAT expression). Some targets
4236 (i386) extract scalar element from a vector using chain of
4237 nested VEC_SELECT expressions. When input operand is a memory
4238 operand, this operation can be simplified to a simple scalar
4239 load from an offseted memory address. */
4240 int n_elts;
4241 if (GET_CODE (trueop0) == VEC_SELECT
4242 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
4243 .is_constant (&n_elts)))
4245 rtx op0 = XEXP (trueop0, 0);
4246 rtx op1 = XEXP (trueop0, 1);
4248 int i = INTVAL (XVECEXP (trueop1, 0, 0));
4249 int elem;
4251 rtvec vec;
4252 rtx tmp_op, tmp;
4254 gcc_assert (GET_CODE (op1) == PARALLEL);
4255 gcc_assert (i < n_elts);
4257 /* Select element, pointed by nested selector. */
4258 elem = INTVAL (XVECEXP (op1, 0, i));
4260 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
4261 if (GET_CODE (op0) == VEC_CONCAT)
4263 rtx op00 = XEXP (op0, 0);
4264 rtx op01 = XEXP (op0, 1);
4266 machine_mode mode00, mode01;
4267 int n_elts00, n_elts01;
4269 mode00 = GET_MODE (op00);
4270 mode01 = GET_MODE (op01);
4272 /* Find out the number of elements of each operand.
4273 Since the concatenated result has a constant number
4274 of elements, the operands must too. */
4275 n_elts00 = GET_MODE_NUNITS (mode00).to_constant ();
4276 n_elts01 = GET_MODE_NUNITS (mode01).to_constant ();
4278 gcc_assert (n_elts == n_elts00 + n_elts01);
4280 /* Select correct operand of VEC_CONCAT
4281 and adjust selector. */
4282 if (elem < n_elts01)
4283 tmp_op = op00;
4284 else
4286 tmp_op = op01;
4287 elem -= n_elts00;
4290 else
4291 tmp_op = op0;
4293 vec = rtvec_alloc (1);
4294 RTVEC_ELT (vec, 0) = GEN_INT (elem);
4296 tmp = gen_rtx_fmt_ee (code, mode,
4297 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
4298 return tmp;
4301 else
4303 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
4304 gcc_assert (GET_MODE_INNER (mode)
4305 == GET_MODE_INNER (GET_MODE (trueop0)));
4306 gcc_assert (GET_CODE (trueop1) == PARALLEL);
4308 if (vec_duplicate_p (trueop0, &elt0))
4309 /* It doesn't matter which elements are selected by trueop1,
4310 because they are all the same. */
4311 return gen_vec_duplicate (mode, elt0);
4313 if (GET_CODE (trueop0) == CONST_VECTOR)
4315 unsigned n_elts = XVECLEN (trueop1, 0);
4316 rtvec v = rtvec_alloc (n_elts);
4317 unsigned int i;
4319 gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
4320 for (i = 0; i < n_elts; i++)
4322 rtx x = XVECEXP (trueop1, 0, i);
4324 if (!CONST_INT_P (x))
4325 return 0;
4327 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
4328 INTVAL (x));
4331 return gen_rtx_CONST_VECTOR (mode, v);
4334 /* Recognize the identity. */
4335 if (GET_MODE (trueop0) == mode)
4337 bool maybe_ident = true;
4338 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
4340 rtx j = XVECEXP (trueop1, 0, i);
4341 if (!CONST_INT_P (j) || INTVAL (j) != i)
4343 maybe_ident = false;
4344 break;
4347 if (maybe_ident)
4348 return trueop0;
4351 /* If we select a low-part subreg, return that. */
4352 if (vec_series_lowpart_p (mode, GET_MODE (trueop0), trueop1))
4354 rtx new_rtx = lowpart_subreg (mode, trueop0,
4355 GET_MODE (trueop0));
4356 if (new_rtx != NULL_RTX)
4357 return new_rtx;
4360 /* If we build {a,b} then permute it, build the result directly. */
4361 if (XVECLEN (trueop1, 0) == 2
4362 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4363 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
4364 && GET_CODE (trueop0) == VEC_CONCAT
4365 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
4366 && GET_MODE (XEXP (trueop0, 0)) == mode
4367 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
4368 && GET_MODE (XEXP (trueop0, 1)) == mode)
4370 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4371 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
4372 rtx subop0, subop1;
4374 gcc_assert (i0 < 4 && i1 < 4);
4375 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
4376 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
4378 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
4381 if (XVECLEN (trueop1, 0) == 2
4382 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4383 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
4384 && GET_CODE (trueop0) == VEC_CONCAT
4385 && GET_MODE (trueop0) == mode)
4387 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4388 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
4389 rtx subop0, subop1;
4391 gcc_assert (i0 < 2 && i1 < 2);
4392 subop0 = XEXP (trueop0, i0);
4393 subop1 = XEXP (trueop0, i1);
4395 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
4398 /* If we select one half of a vec_concat, return that. */
4399 int l0, l1;
4400 if (GET_CODE (trueop0) == VEC_CONCAT
4401 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
4402 .is_constant (&l0))
4403 && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 1)))
4404 .is_constant (&l1))
4405 && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
4407 rtx subop0 = XEXP (trueop0, 0);
4408 rtx subop1 = XEXP (trueop0, 1);
4409 machine_mode mode0 = GET_MODE (subop0);
4410 machine_mode mode1 = GET_MODE (subop1);
4411 int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
4412 if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
4414 bool success = true;
4415 for (int i = 1; i < l0; ++i)
4417 rtx j = XVECEXP (trueop1, 0, i);
4418 if (!CONST_INT_P (j) || INTVAL (j) != i)
4420 success = false;
4421 break;
4424 if (success)
4425 return subop0;
4427 if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
4429 bool success = true;
4430 for (int i = 1; i < l1; ++i)
4432 rtx j = XVECEXP (trueop1, 0, i);
4433 if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
4435 success = false;
4436 break;
4439 if (success)
4440 return subop1;
4444 /* Simplify vec_select of a subreg of X to just a vec_select of X
4445 when X has same component mode as vec_select. */
4446 unsigned HOST_WIDE_INT subreg_offset = 0;
4447 if (GET_CODE (trueop0) == SUBREG
4448 && GET_MODE_INNER (mode)
4449 == GET_MODE_INNER (GET_MODE (SUBREG_REG (trueop0)))
4450 && GET_MODE_NUNITS (mode).is_constant (&l1)
4451 && constant_multiple_p (subreg_memory_offset (trueop0),
4452 GET_MODE_UNIT_BITSIZE (mode),
4453 &subreg_offset))
4455 poly_uint64 nunits
4456 = GET_MODE_NUNITS (GET_MODE (SUBREG_REG (trueop0)));
4457 bool success = true;
4458 for (int i = 0; i != l1; i++)
4460 rtx idx = XVECEXP (trueop1, 0, i);
4461 if (!CONST_INT_P (idx)
4462 || maybe_ge (UINTVAL (idx) + subreg_offset, nunits))
4464 success = false;
4465 break;
4469 if (success)
4471 rtx par = trueop1;
4472 if (subreg_offset)
4474 rtvec vec = rtvec_alloc (l1);
4475 for (int i = 0; i < l1; i++)
4476 RTVEC_ELT (vec, i)
4477 = GEN_INT (INTVAL (XVECEXP (trueop1, 0, i))
4478 + subreg_offset);
4479 par = gen_rtx_PARALLEL (VOIDmode, vec);
4481 return gen_rtx_VEC_SELECT (mode, SUBREG_REG (trueop0), par);
4486 if (XVECLEN (trueop1, 0) == 1
4487 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
4488 && GET_CODE (trueop0) == VEC_CONCAT)
4490 rtx vec = trueop0;
4491 offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
4493 /* Try to find the element in the VEC_CONCAT. */
4494 while (GET_MODE (vec) != mode
4495 && GET_CODE (vec) == VEC_CONCAT)
4497 poly_int64 vec_size;
4499 if (CONST_INT_P (XEXP (vec, 0)))
4501 /* vec_concat of two const_ints doesn't make sense with
4502 respect to modes. */
4503 if (CONST_INT_P (XEXP (vec, 1)))
4504 return 0;
4506 vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
4507 - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
4509 else
4510 vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
4512 if (known_lt (offset, vec_size))
4513 vec = XEXP (vec, 0);
4514 else if (known_ge (offset, vec_size))
4516 offset -= vec_size;
4517 vec = XEXP (vec, 1);
4519 else
4520 break;
4521 vec = avoid_constant_pool_reference (vec);
4524 if (GET_MODE (vec) == mode)
4525 return vec;
4528 /* If we select elements in a vec_merge that all come from the same
4529 operand, select from that operand directly. */
4530 if (GET_CODE (op0) == VEC_MERGE)
4532 rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
4533 if (CONST_INT_P (trueop02))
4535 unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
4536 bool all_operand0 = true;
4537 bool all_operand1 = true;
4538 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
4540 rtx j = XVECEXP (trueop1, 0, i);
4541 if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
4542 all_operand1 = false;
4543 else
4544 all_operand0 = false;
4546 if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
4547 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
4548 if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
4549 return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
4553 /* If we have two nested selects that are inverses of each
4554 other, replace them with the source operand. */
4555 if (GET_CODE (trueop0) == VEC_SELECT
4556 && GET_MODE (XEXP (trueop0, 0)) == mode)
4558 rtx op0_subop1 = XEXP (trueop0, 1);
4559 gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
4560 gcc_assert (known_eq (XVECLEN (trueop1, 0), GET_MODE_NUNITS (mode)));
4562 /* Apply the outer ordering vector to the inner one. (The inner
4563 ordering vector is expressly permitted to be of a different
4564 length than the outer one.) If the result is { 0, 1, ..., n-1 }
4565 then the two VEC_SELECTs cancel. */
4566 for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
4568 rtx x = XVECEXP (trueop1, 0, i);
4569 if (!CONST_INT_P (x))
4570 return 0;
4571 rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
4572 if (!CONST_INT_P (y) || i != INTVAL (y))
4573 return 0;
4575 return XEXP (trueop0, 0);
4578 return 0;
4579 case VEC_CONCAT:
4581 machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
4582 ? GET_MODE (trueop0)
4583 : GET_MODE_INNER (mode));
4584 machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
4585 ? GET_MODE (trueop1)
4586 : GET_MODE_INNER (mode));
4588 gcc_assert (VECTOR_MODE_P (mode));
4589 gcc_assert (known_eq (GET_MODE_SIZE (op0_mode)
4590 + GET_MODE_SIZE (op1_mode),
4591 GET_MODE_SIZE (mode)));
4593 if (VECTOR_MODE_P (op0_mode))
4594 gcc_assert (GET_MODE_INNER (mode)
4595 == GET_MODE_INNER (op0_mode));
4596 else
4597 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
4599 if (VECTOR_MODE_P (op1_mode))
4600 gcc_assert (GET_MODE_INNER (mode)
4601 == GET_MODE_INNER (op1_mode));
4602 else
4603 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
4605 unsigned int n_elts, in_n_elts;
4606 if ((GET_CODE (trueop0) == CONST_VECTOR
4607 || CONST_SCALAR_INT_P (trueop0)
4608 || CONST_DOUBLE_AS_FLOAT_P (trueop0))
4609 && (GET_CODE (trueop1) == CONST_VECTOR
4610 || CONST_SCALAR_INT_P (trueop1)
4611 || CONST_DOUBLE_AS_FLOAT_P (trueop1))
4612 && GET_MODE_NUNITS (mode).is_constant (&n_elts)
4613 && GET_MODE_NUNITS (op0_mode).is_constant (&in_n_elts))
4615 rtvec v = rtvec_alloc (n_elts);
4616 unsigned int i;
4617 for (i = 0; i < n_elts; i++)
4619 if (i < in_n_elts)
4621 if (!VECTOR_MODE_P (op0_mode))
4622 RTVEC_ELT (v, i) = trueop0;
4623 else
4624 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
4626 else
4628 if (!VECTOR_MODE_P (op1_mode))
4629 RTVEC_ELT (v, i) = trueop1;
4630 else
4631 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
4632 i - in_n_elts);
4636 return gen_rtx_CONST_VECTOR (mode, v);
4639 /* Try to merge two VEC_SELECTs from the same vector into a single one.
4640 Restrict the transformation to avoid generating a VEC_SELECT with a
4641 mode unrelated to its operand. */
4642 if (GET_CODE (trueop0) == VEC_SELECT
4643 && GET_CODE (trueop1) == VEC_SELECT
4644 && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
4645 && GET_MODE_INNER (GET_MODE (XEXP (trueop0, 0)))
4646 == GET_MODE_INNER(mode))
4648 rtx par0 = XEXP (trueop0, 1);
4649 rtx par1 = XEXP (trueop1, 1);
4650 int len0 = XVECLEN (par0, 0);
4651 int len1 = XVECLEN (par1, 0);
4652 rtvec vec = rtvec_alloc (len0 + len1);
4653 for (int i = 0; i < len0; i++)
4654 RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
4655 for (int i = 0; i < len1; i++)
4656 RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
4657 return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
4658 gen_rtx_PARALLEL (VOIDmode, vec));
4661 return 0;
4663 default:
4664 gcc_unreachable ();
4667 if (mode == GET_MODE (op0)
4668 && mode == GET_MODE (op1)
4669 && vec_duplicate_p (op0, &elt0)
4670 && vec_duplicate_p (op1, &elt1))
4672 /* Try applying the operator to ELT and see if that simplifies.
4673 We can duplicate the result if so.
4675 The reason we don't use simplify_gen_binary is that it isn't
4676 necessarily a win to convert things like:
4678 (plus:V (vec_duplicate:V (reg:S R1))
4679 (vec_duplicate:V (reg:S R2)))
4683 (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
4685 The first might be done entirely in vector registers while the
4686 second might need a move between register files. */
4687 tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
4688 elt0, elt1);
4689 if (tem)
4690 return gen_vec_duplicate (mode, tem);
4693 return 0;
4696 /* Return true if binary operation OP distributes over addition in operand
4697 OPNO, with the other operand being held constant. OPNO counts from 1. */
4699 static bool
4700 distributes_over_addition_p (rtx_code op, int opno)
4702 switch (op)
4704 case PLUS:
4705 case MINUS:
4706 case MULT:
4707 return true;
4709 case ASHIFT:
4710 return opno == 1;
4712 default:
4713 return false;
4718 simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
4719 rtx op0, rtx op1)
4721 if (VECTOR_MODE_P (mode)
4722 && code != VEC_CONCAT
4723 && GET_CODE (op0) == CONST_VECTOR
4724 && GET_CODE (op1) == CONST_VECTOR)
4726 bool step_ok_p;
4727 if (CONST_VECTOR_STEPPED_P (op0)
4728 && CONST_VECTOR_STEPPED_P (op1))
4729 /* We can operate directly on the encoding if:
4731 a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
4732 implies
4733 (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
4735 Addition and subtraction are the supported operators
4736 for which this is true. */
4737 step_ok_p = (code == PLUS || code == MINUS);
4738 else if (CONST_VECTOR_STEPPED_P (op0))
4739 /* We can operate directly on stepped encodings if:
4741 a3 - a2 == a2 - a1
4742 implies:
4743 (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
4745 which is true if (x -> x op c) distributes over addition. */
4746 step_ok_p = distributes_over_addition_p (code, 1);
4747 else
4748 /* Similarly in reverse. */
4749 step_ok_p = distributes_over_addition_p (code, 2);
4750 rtx_vector_builder builder;
4751 if (!builder.new_binary_operation (mode, op0, op1, step_ok_p))
4752 return 0;
4754 unsigned int count = builder.encoded_nelts ();
4755 for (unsigned int i = 0; i < count; i++)
4757 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
4758 CONST_VECTOR_ELT (op0, i),
4759 CONST_VECTOR_ELT (op1, i));
4760 if (!x || !valid_for_const_vector_p (mode, x))
4761 return 0;
4762 builder.quick_push (x);
4764 return builder.build ();
4767 if (VECTOR_MODE_P (mode)
4768 && code == VEC_CONCAT
4769 && (CONST_SCALAR_INT_P (op0)
4770 || CONST_FIXED_P (op0)
4771 || CONST_DOUBLE_AS_FLOAT_P (op0))
4772 && (CONST_SCALAR_INT_P (op1)
4773 || CONST_DOUBLE_AS_FLOAT_P (op1)
4774 || CONST_FIXED_P (op1)))
4776 /* Both inputs have a constant number of elements, so the result
4777 must too. */
4778 unsigned n_elts = GET_MODE_NUNITS (mode).to_constant ();
4779 rtvec v = rtvec_alloc (n_elts);
4781 gcc_assert (n_elts >= 2);
4782 if (n_elts == 2)
4784 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
4785 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
4787 RTVEC_ELT (v, 0) = op0;
4788 RTVEC_ELT (v, 1) = op1;
4790 else
4792 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)).to_constant ();
4793 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)).to_constant ();
4794 unsigned i;
4796 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
4797 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
4798 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
4800 for (i = 0; i < op0_n_elts; ++i)
4801 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op0, i);
4802 for (i = 0; i < op1_n_elts; ++i)
4803 RTVEC_ELT (v, op0_n_elts+i) = CONST_VECTOR_ELT (op1, i);
4806 return gen_rtx_CONST_VECTOR (mode, v);
4809 if (SCALAR_FLOAT_MODE_P (mode)
4810 && CONST_DOUBLE_AS_FLOAT_P (op0)
4811 && CONST_DOUBLE_AS_FLOAT_P (op1)
4812 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
4814 if (code == AND
4815 || code == IOR
4816 || code == XOR)
4818 long tmp0[4];
4819 long tmp1[4];
4820 REAL_VALUE_TYPE r;
4821 int i;
4823 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
4824 GET_MODE (op0));
4825 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
4826 GET_MODE (op1));
4827 for (i = 0; i < 4; i++)
4829 switch (code)
4831 case AND:
4832 tmp0[i] &= tmp1[i];
4833 break;
4834 case IOR:
4835 tmp0[i] |= tmp1[i];
4836 break;
4837 case XOR:
4838 tmp0[i] ^= tmp1[i];
4839 break;
4840 default:
4841 gcc_unreachable ();
4844 real_from_target (&r, tmp0, mode);
4845 return const_double_from_real_value (r, mode);
4847 else
4849 REAL_VALUE_TYPE f0, f1, value, result;
4850 const REAL_VALUE_TYPE *opr0, *opr1;
4851 bool inexact;
4853 opr0 = CONST_DOUBLE_REAL_VALUE (op0);
4854 opr1 = CONST_DOUBLE_REAL_VALUE (op1);
4856 if (HONOR_SNANS (mode)
4857 && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
4858 || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
4859 return 0;
4861 real_convert (&f0, mode, opr0);
4862 real_convert (&f1, mode, opr1);
4864 if (code == DIV
4865 && real_equal (&f1, &dconst0)
4866 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
4867 return 0;
4869 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4870 && flag_trapping_math
4871 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
4873 int s0 = REAL_VALUE_NEGATIVE (f0);
4874 int s1 = REAL_VALUE_NEGATIVE (f1);
4876 switch (code)
4878 case PLUS:
4879 /* Inf + -Inf = NaN plus exception. */
4880 if (s0 != s1)
4881 return 0;
4882 break;
4883 case MINUS:
4884 /* Inf - Inf = NaN plus exception. */
4885 if (s0 == s1)
4886 return 0;
4887 break;
4888 case DIV:
4889 /* Inf / Inf = NaN plus exception. */
4890 return 0;
4891 default:
4892 break;
4896 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
4897 && flag_trapping_math
4898 && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
4899 || (REAL_VALUE_ISINF (f1)
4900 && real_equal (&f0, &dconst0))))
4901 /* Inf * 0 = NaN plus exception. */
4902 return 0;
4904 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
4905 &f0, &f1);
4906 real_convert (&result, mode, &value);
4908 /* Don't constant fold this floating point operation if
4909 the result has overflowed and flag_trapping_math. */
4911 if (flag_trapping_math
4912 && MODE_HAS_INFINITIES (mode)
4913 && REAL_VALUE_ISINF (result)
4914 && !REAL_VALUE_ISINF (f0)
4915 && !REAL_VALUE_ISINF (f1))
4916 /* Overflow plus exception. */
4917 return 0;
4919 /* Don't constant fold this floating point operation if the
4920 result may dependent upon the run-time rounding mode and
4921 flag_rounding_math is set, or if GCC's software emulation
4922 is unable to accurately represent the result. */
4924 if ((flag_rounding_math
4925 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
4926 && (inexact || !real_identical (&result, &value)))
4927 return NULL_RTX;
4929 return const_double_from_real_value (result, mode);
4933 /* We can fold some multi-word operations. */
4934 scalar_int_mode int_mode;
4935 if (is_a <scalar_int_mode> (mode, &int_mode)
4936 && CONST_SCALAR_INT_P (op0)
4937 && CONST_SCALAR_INT_P (op1)
4938 && GET_MODE_PRECISION (int_mode) <= MAX_BITSIZE_MODE_ANY_INT)
4940 wide_int result;
4941 wi::overflow_type overflow;
4942 rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
4943 rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
4945 #if TARGET_SUPPORTS_WIDE_INT == 0
4946 /* This assert keeps the simplification from producing a result
4947 that cannot be represented in a CONST_DOUBLE but a lot of
4948 upstream callers expect that this function never fails to
4949 simplify something and so you if you added this to the test
4950 above the code would die later anyway. If this assert
4951 happens, you just need to make the port support wide int. */
4952 gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
4953 #endif
4954 switch (code)
4956 case MINUS:
4957 result = wi::sub (pop0, pop1);
4958 break;
4960 case PLUS:
4961 result = wi::add (pop0, pop1);
4962 break;
4964 case MULT:
4965 result = wi::mul (pop0, pop1);
4966 break;
4968 case DIV:
4969 result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
4970 if (overflow)
4971 return NULL_RTX;
4972 break;
4974 case MOD:
4975 result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
4976 if (overflow)
4977 return NULL_RTX;
4978 break;
4980 case UDIV:
4981 result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
4982 if (overflow)
4983 return NULL_RTX;
4984 break;
4986 case UMOD:
4987 result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
4988 if (overflow)
4989 return NULL_RTX;
4990 break;
4992 case AND:
4993 result = wi::bit_and (pop0, pop1);
4994 break;
4996 case IOR:
4997 result = wi::bit_or (pop0, pop1);
4998 break;
5000 case XOR:
5001 result = wi::bit_xor (pop0, pop1);
5002 break;
5004 case SMIN:
5005 result = wi::smin (pop0, pop1);
5006 break;
5008 case SMAX:
5009 result = wi::smax (pop0, pop1);
5010 break;
5012 case UMIN:
5013 result = wi::umin (pop0, pop1);
5014 break;
5016 case UMAX:
5017 result = wi::umax (pop0, pop1);
5018 break;
5020 case LSHIFTRT:
5021 case ASHIFTRT:
5022 case ASHIFT:
5023 case SS_ASHIFT:
5024 case US_ASHIFT:
5026 wide_int wop1 = pop1;
5027 if (SHIFT_COUNT_TRUNCATED)
5028 wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
5029 else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
5030 return NULL_RTX;
5032 switch (code)
5034 case LSHIFTRT:
5035 result = wi::lrshift (pop0, wop1);
5036 break;
5038 case ASHIFTRT:
5039 result = wi::arshift (pop0, wop1);
5040 break;
5042 case ASHIFT:
5043 result = wi::lshift (pop0, wop1);
5044 break;
5046 case SS_ASHIFT:
5047 if (wi::leu_p (wop1, wi::clrsb (pop0)))
5048 result = wi::lshift (pop0, wop1);
5049 else if (wi::neg_p (pop0))
5050 result = wi::min_value (int_mode, SIGNED);
5051 else
5052 result = wi::max_value (int_mode, SIGNED);
5053 break;
5055 case US_ASHIFT:
5056 if (wi::eq_p (pop0, 0))
5057 result = pop0;
5058 else if (wi::leu_p (wop1, wi::clz (pop0)))
5059 result = wi::lshift (pop0, wop1);
5060 else
5061 result = wi::max_value (int_mode, UNSIGNED);
5062 break;
5064 default:
5065 gcc_unreachable ();
5067 break;
5069 case ROTATE:
5070 case ROTATERT:
5072 if (wi::neg_p (pop1))
5073 return NULL_RTX;
5075 switch (code)
5077 case ROTATE:
5078 result = wi::lrotate (pop0, pop1);
5079 break;
5081 case ROTATERT:
5082 result = wi::rrotate (pop0, pop1);
5083 break;
5085 default:
5086 gcc_unreachable ();
5088 break;
5091 case SS_PLUS:
5092 result = wi::add (pop0, pop1, SIGNED, &overflow);
5093 clamp_signed_saturation:
5094 if (overflow == wi::OVF_OVERFLOW)
5095 result = wi::max_value (GET_MODE_PRECISION (int_mode), SIGNED);
5096 else if (overflow == wi::OVF_UNDERFLOW)
5097 result = wi::min_value (GET_MODE_PRECISION (int_mode), SIGNED);
5098 else if (overflow != wi::OVF_NONE)
5099 return NULL_RTX;
5100 break;
5102 case US_PLUS:
5103 result = wi::add (pop0, pop1, UNSIGNED, &overflow);
5104 clamp_unsigned_saturation:
5105 if (overflow != wi::OVF_NONE)
5106 result = wi::max_value (GET_MODE_PRECISION (int_mode), UNSIGNED);
5107 break;
5109 case SS_MINUS:
5110 result = wi::sub (pop0, pop1, SIGNED, &overflow);
5111 goto clamp_signed_saturation;
5113 case US_MINUS:
5114 result = wi::sub (pop0, pop1, UNSIGNED, &overflow);
5115 if (overflow != wi::OVF_NONE)
5116 result = wi::min_value (GET_MODE_PRECISION (int_mode), UNSIGNED);
5117 break;
5119 case SS_MULT:
5120 result = wi::mul (pop0, pop1, SIGNED, &overflow);
5121 goto clamp_signed_saturation;
5123 case US_MULT:
5124 result = wi::mul (pop0, pop1, UNSIGNED, &overflow);
5125 goto clamp_unsigned_saturation;
5127 case SMUL_HIGHPART:
5128 result = wi::mul_high (pop0, pop1, SIGNED);
5129 break;
5131 case UMUL_HIGHPART:
5132 result = wi::mul_high (pop0, pop1, UNSIGNED);
5133 break;
5135 default:
5136 return NULL_RTX;
5138 return immed_wide_int_const (result, int_mode);
5141 /* Handle polynomial integers. */
5142 if (NUM_POLY_INT_COEFFS > 1
5143 && is_a <scalar_int_mode> (mode, &int_mode)
5144 && poly_int_rtx_p (op0)
5145 && poly_int_rtx_p (op1))
5147 poly_wide_int result;
5148 switch (code)
5150 case PLUS:
5151 result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
5152 break;
5154 case MINUS:
5155 result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
5156 break;
5158 case MULT:
5159 if (CONST_SCALAR_INT_P (op1))
5160 result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
5161 else
5162 return NULL_RTX;
5163 break;
5165 case ASHIFT:
5166 if (CONST_SCALAR_INT_P (op1))
5168 wide_int shift = rtx_mode_t (op1, mode);
5169 if (SHIFT_COUNT_TRUNCATED)
5170 shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
5171 else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
5172 return NULL_RTX;
5173 result = wi::to_poly_wide (op0, mode) << shift;
5175 else
5176 return NULL_RTX;
5177 break;
5179 case IOR:
5180 if (!CONST_SCALAR_INT_P (op1)
5181 || !can_ior_p (wi::to_poly_wide (op0, mode),
5182 rtx_mode_t (op1, mode), &result))
5183 return NULL_RTX;
5184 break;
5186 default:
5187 return NULL_RTX;
5189 return immed_wide_int_const (result, int_mode);
5192 return NULL_RTX;
5197 /* Return a positive integer if X should sort after Y. The value
5198 returned is 1 if and only if X and Y are both regs. */
5200 static int
5201 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
5203 int result;
5205 result = (commutative_operand_precedence (y)
5206 - commutative_operand_precedence (x));
5207 if (result)
5208 return result + result;
5210 /* Group together equal REGs to do more simplification. */
5211 if (REG_P (x) && REG_P (y))
5212 return REGNO (x) > REGNO (y);
5214 return 0;
5217 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
5218 operands may be another PLUS or MINUS.
5220 Rather than test for specific case, we do this by a brute-force method
5221 and do all possible simplifications until no more changes occur. Then
5222 we rebuild the operation.
5224 May return NULL_RTX when no changes were made. */
5227 simplify_context::simplify_plus_minus (rtx_code code, machine_mode mode,
5228 rtx op0, rtx op1)
5230 struct simplify_plus_minus_op_data
5232 rtx op;
5233 short neg;
5234 } ops[16];
5235 rtx result, tem;
5236 int n_ops = 2;
5237 int changed, n_constants, canonicalized = 0;
5238 int i, j;
5240 memset (ops, 0, sizeof ops);
5242 /* Set up the two operands and then expand them until nothing has been
5243 changed. If we run out of room in our array, give up; this should
5244 almost never happen. */
5246 ops[0].op = op0;
5247 ops[0].neg = 0;
5248 ops[1].op = op1;
5249 ops[1].neg = (code == MINUS);
5253 changed = 0;
5254 n_constants = 0;
5256 for (i = 0; i < n_ops; i++)
5258 rtx this_op = ops[i].op;
5259 int this_neg = ops[i].neg;
5260 enum rtx_code this_code = GET_CODE (this_op);
5262 switch (this_code)
5264 case PLUS:
5265 case MINUS:
5266 if (n_ops == ARRAY_SIZE (ops))
5267 return NULL_RTX;
5269 ops[n_ops].op = XEXP (this_op, 1);
5270 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
5271 n_ops++;
5273 ops[i].op = XEXP (this_op, 0);
5274 changed = 1;
5275 /* If this operand was negated then we will potentially
5276 canonicalize the expression. Similarly if we don't
5277 place the operands adjacent we're re-ordering the
5278 expression and thus might be performing a
5279 canonicalization. Ignore register re-ordering.
5280 ??? It might be better to shuffle the ops array here,
5281 but then (plus (plus (A, B), plus (C, D))) wouldn't
5282 be seen as non-canonical. */
5283 if (this_neg
5284 || (i != n_ops - 2
5285 && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
5286 canonicalized = 1;
5287 break;
5289 case NEG:
5290 ops[i].op = XEXP (this_op, 0);
5291 ops[i].neg = ! this_neg;
5292 changed = 1;
5293 canonicalized = 1;
5294 break;
5296 case CONST:
5297 if (n_ops != ARRAY_SIZE (ops)
5298 && GET_CODE (XEXP (this_op, 0)) == PLUS
5299 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
5300 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
5302 ops[i].op = XEXP (XEXP (this_op, 0), 0);
5303 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
5304 ops[n_ops].neg = this_neg;
5305 n_ops++;
5306 changed = 1;
5307 canonicalized = 1;
5309 break;
5311 case NOT:
5312 /* ~a -> (-a - 1) */
5313 if (n_ops != ARRAY_SIZE (ops))
5315 ops[n_ops].op = CONSTM1_RTX (mode);
5316 ops[n_ops++].neg = this_neg;
5317 ops[i].op = XEXP (this_op, 0);
5318 ops[i].neg = !this_neg;
5319 changed = 1;
5320 canonicalized = 1;
5322 break;
5324 CASE_CONST_SCALAR_INT:
5325 case CONST_POLY_INT:
5326 n_constants++;
5327 if (this_neg)
5329 ops[i].op = neg_poly_int_rtx (mode, this_op);
5330 ops[i].neg = 0;
5331 changed = 1;
5332 canonicalized = 1;
5334 break;
5336 default:
5337 break;
5341 while (changed);
5343 if (n_constants > 1)
5344 canonicalized = 1;
5346 gcc_assert (n_ops >= 2);
5348 /* If we only have two operands, we can avoid the loops. */
5349 if (n_ops == 2)
5351 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
5352 rtx lhs, rhs;
5354 /* Get the two operands. Be careful with the order, especially for
5355 the cases where code == MINUS. */
5356 if (ops[0].neg && ops[1].neg)
5358 lhs = gen_rtx_NEG (mode, ops[0].op);
5359 rhs = ops[1].op;
5361 else if (ops[0].neg)
5363 lhs = ops[1].op;
5364 rhs = ops[0].op;
5366 else
5368 lhs = ops[0].op;
5369 rhs = ops[1].op;
5372 return simplify_const_binary_operation (code, mode, lhs, rhs);
5375 /* Now simplify each pair of operands until nothing changes. */
5376 while (1)
5378 /* Insertion sort is good enough for a small array. */
5379 for (i = 1; i < n_ops; i++)
5381 struct simplify_plus_minus_op_data save;
5382 int cmp;
5384 j = i - 1;
5385 cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
5386 if (cmp <= 0)
5387 continue;
5388 /* Just swapping registers doesn't count as canonicalization. */
5389 if (cmp != 1)
5390 canonicalized = 1;
5392 save = ops[i];
5394 ops[j + 1] = ops[j];
5395 while (j--
5396 && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
5397 ops[j + 1] = save;
5400 changed = 0;
5401 for (i = n_ops - 1; i > 0; i--)
5402 for (j = i - 1; j >= 0; j--)
5404 rtx lhs = ops[j].op, rhs = ops[i].op;
5405 int lneg = ops[j].neg, rneg = ops[i].neg;
5407 if (lhs != 0 && rhs != 0)
5409 enum rtx_code ncode = PLUS;
5411 if (lneg != rneg)
5413 ncode = MINUS;
5414 if (lneg)
5415 std::swap (lhs, rhs);
5417 else if (swap_commutative_operands_p (lhs, rhs))
5418 std::swap (lhs, rhs);
5420 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
5421 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
5423 rtx tem_lhs, tem_rhs;
5425 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
5426 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
5427 tem = simplify_binary_operation (ncode, mode, tem_lhs,
5428 tem_rhs);
5430 if (tem && !CONSTANT_P (tem))
5431 tem = gen_rtx_CONST (GET_MODE (tem), tem);
5433 else
5434 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
5436 if (tem)
5438 /* Reject "simplifications" that just wrap the two
5439 arguments in a CONST. Failure to do so can result
5440 in infinite recursion with simplify_binary_operation
5441 when it calls us to simplify CONST operations.
5442 Also, if we find such a simplification, don't try
5443 any more combinations with this rhs: We must have
5444 something like symbol+offset, ie. one of the
5445 trivial CONST expressions we handle later. */
5446 if (GET_CODE (tem) == CONST
5447 && GET_CODE (XEXP (tem, 0)) == ncode
5448 && XEXP (XEXP (tem, 0), 0) == lhs
5449 && XEXP (XEXP (tem, 0), 1) == rhs)
5450 break;
5451 lneg &= rneg;
5452 if (GET_CODE (tem) == NEG)
5453 tem = XEXP (tem, 0), lneg = !lneg;
5454 if (poly_int_rtx_p (tem) && lneg)
5455 tem = neg_poly_int_rtx (mode, tem), lneg = 0;
5457 ops[i].op = tem;
5458 ops[i].neg = lneg;
5459 ops[j].op = NULL_RTX;
5460 changed = 1;
5461 canonicalized = 1;
5466 if (!changed)
5467 break;
5469 /* Pack all the operands to the lower-numbered entries. */
5470 for (i = 0, j = 0; j < n_ops; j++)
5471 if (ops[j].op)
5473 ops[i] = ops[j];
5474 i++;
5476 n_ops = i;
5479 /* If nothing changed, check that rematerialization of rtl instructions
5480 is still required. */
5481 if (!canonicalized)
5483 /* Perform rematerialization if only all operands are registers and
5484 all operations are PLUS. */
5485 /* ??? Also disallow (non-global, non-frame) fixed registers to work
5486 around rs6000 and how it uses the CA register. See PR67145. */
5487 for (i = 0; i < n_ops; i++)
5488 if (ops[i].neg
5489 || !REG_P (ops[i].op)
5490 || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
5491 && fixed_regs[REGNO (ops[i].op)]
5492 && !global_regs[REGNO (ops[i].op)]
5493 && ops[i].op != frame_pointer_rtx
5494 && ops[i].op != arg_pointer_rtx
5495 && ops[i].op != stack_pointer_rtx))
5496 return NULL_RTX;
5497 goto gen_result;
5500 /* Create (minus -C X) instead of (neg (const (plus X C))). */
5501 if (n_ops == 2
5502 && CONST_INT_P (ops[1].op)
5503 && CONSTANT_P (ops[0].op)
5504 && ops[0].neg)
5505 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
5507 /* We suppressed creation of trivial CONST expressions in the
5508 combination loop to avoid recursion. Create one manually now.
5509 The combination loop should have ensured that there is exactly
5510 one CONST_INT, and the sort will have ensured that it is last
5511 in the array and that any other constant will be next-to-last. */
5513 if (n_ops > 1
5514 && poly_int_rtx_p (ops[n_ops - 1].op)
5515 && CONSTANT_P (ops[n_ops - 2].op))
5517 rtx value = ops[n_ops - 1].op;
5518 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
5519 value = neg_poly_int_rtx (mode, value);
5520 if (CONST_INT_P (value))
5522 ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
5523 INTVAL (value));
5524 n_ops--;
5528 /* Put a non-negated operand first, if possible. */
5530 for (i = 0; i < n_ops && ops[i].neg; i++)
5531 continue;
5532 if (i == n_ops)
5533 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
5534 else if (i != 0)
5536 tem = ops[0].op;
5537 ops[0] = ops[i];
5538 ops[i].op = tem;
5539 ops[i].neg = 1;
5542 /* Now make the result by performing the requested operations. */
5543 gen_result:
5544 result = ops[0].op;
5545 for (i = 1; i < n_ops; i++)
5546 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
5547 mode, result, ops[i].op);
5549 return result;
5552 /* Check whether an operand is suitable for calling simplify_plus_minus. */
5553 static bool
5554 plus_minus_operand_p (const_rtx x)
5556 return GET_CODE (x) == PLUS
5557 || GET_CODE (x) == MINUS
5558 || (GET_CODE (x) == CONST
5559 && GET_CODE (XEXP (x, 0)) == PLUS
5560 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
5561 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
5564 /* Like simplify_binary_operation except used for relational operators.
5565 MODE is the mode of the result. If MODE is VOIDmode, both operands must
5566 not also be VOIDmode.
5568 CMP_MODE specifies in which mode the comparison is done in, so it is
5569 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
5570 the operands or, if both are VOIDmode, the operands are compared in
5571 "infinite precision". */
5573 simplify_context::simplify_relational_operation (rtx_code code,
5574 machine_mode mode,
5575 machine_mode cmp_mode,
5576 rtx op0, rtx op1)
5578 rtx tem, trueop0, trueop1;
5580 if (cmp_mode == VOIDmode)
5581 cmp_mode = GET_MODE (op0);
5582 if (cmp_mode == VOIDmode)
5583 cmp_mode = GET_MODE (op1);
5585 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
5586 if (tem)
5587 return relational_result (mode, cmp_mode, tem);
5589 /* For the following tests, ensure const0_rtx is op1. */
5590 if (swap_commutative_operands_p (op0, op1)
5591 || (op0 == const0_rtx && op1 != const0_rtx))
5592 std::swap (op0, op1), code = swap_condition (code);
5594 /* If op0 is a compare, extract the comparison arguments from it. */
5595 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5596 return simplify_gen_relational (code, mode, VOIDmode,
5597 XEXP (op0, 0), XEXP (op0, 1));
5599 if (GET_MODE_CLASS (cmp_mode) == MODE_CC)
5600 return NULL_RTX;
5602 trueop0 = avoid_constant_pool_reference (op0);
5603 trueop1 = avoid_constant_pool_reference (op1);
5604 return simplify_relational_operation_1 (code, mode, cmp_mode,
5605 trueop0, trueop1);
5608 /* This part of simplify_relational_operation is only used when CMP_MODE
5609 is not in class MODE_CC (i.e. it is a real comparison).
5611 MODE is the mode of the result, while CMP_MODE specifies in which
5612 mode the comparison is done in, so it is the mode of the operands. */
5615 simplify_context::simplify_relational_operation_1 (rtx_code code,
5616 machine_mode mode,
5617 machine_mode cmp_mode,
5618 rtx op0, rtx op1)
5620 enum rtx_code op0code = GET_CODE (op0);
5622 if (op1 == const0_rtx && COMPARISON_P (op0))
5624 /* If op0 is a comparison, extract the comparison arguments
5625 from it. */
5626 if (code == NE)
5628 if (GET_MODE (op0) == mode)
5629 return simplify_rtx (op0);
5630 else
5631 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
5632 XEXP (op0, 0), XEXP (op0, 1));
5634 else if (code == EQ)
5636 enum rtx_code new_code = reversed_comparison_code (op0, NULL);
5637 if (new_code != UNKNOWN)
5638 return simplify_gen_relational (new_code, mode, VOIDmode,
5639 XEXP (op0, 0), XEXP (op0, 1));
5643 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
5644 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
5645 if ((code == LTU || code == GEU)
5646 && GET_CODE (op0) == PLUS
5647 && CONST_INT_P (XEXP (op0, 1))
5648 && (rtx_equal_p (op1, XEXP (op0, 0))
5649 || rtx_equal_p (op1, XEXP (op0, 1)))
5650 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
5651 && XEXP (op0, 1) != const0_rtx)
5653 rtx new_cmp
5654 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
5655 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
5656 cmp_mode, XEXP (op0, 0), new_cmp);
5659 /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
5660 transformed into (LTU a -C). */
5661 if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
5662 && CONST_INT_P (XEXP (op0, 1))
5663 && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
5664 && XEXP (op0, 1) != const0_rtx)
5666 rtx new_cmp
5667 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
5668 return simplify_gen_relational (LTU, mode, cmp_mode,
5669 XEXP (op0, 0), new_cmp);
5672 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
5673 if ((code == LTU || code == GEU)
5674 && GET_CODE (op0) == PLUS
5675 && rtx_equal_p (op1, XEXP (op0, 1))
5676 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
5677 && !rtx_equal_p (op1, XEXP (op0, 0)))
5678 return simplify_gen_relational (code, mode, cmp_mode, op0,
5679 copy_rtx (XEXP (op0, 0)));
5681 if (op1 == const0_rtx)
5683 /* Canonicalize (GTU x 0) as (NE x 0). */
5684 if (code == GTU)
5685 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
5686 /* Canonicalize (LEU x 0) as (EQ x 0). */
5687 if (code == LEU)
5688 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
5690 else if (op1 == const1_rtx)
5692 switch (code)
5694 case GE:
5695 /* Canonicalize (GE x 1) as (GT x 0). */
5696 return simplify_gen_relational (GT, mode, cmp_mode,
5697 op0, const0_rtx);
5698 case GEU:
5699 /* Canonicalize (GEU x 1) as (NE x 0). */
5700 return simplify_gen_relational (NE, mode, cmp_mode,
5701 op0, const0_rtx);
5702 case LT:
5703 /* Canonicalize (LT x 1) as (LE x 0). */
5704 return simplify_gen_relational (LE, mode, cmp_mode,
5705 op0, const0_rtx);
5706 case LTU:
5707 /* Canonicalize (LTU x 1) as (EQ x 0). */
5708 return simplify_gen_relational (EQ, mode, cmp_mode,
5709 op0, const0_rtx);
5710 default:
5711 break;
5714 else if (op1 == constm1_rtx)
5716 /* Canonicalize (LE x -1) as (LT x 0). */
5717 if (code == LE)
5718 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
5719 /* Canonicalize (GT x -1) as (GE x 0). */
5720 if (code == GT)
5721 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
5724 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
5725 if ((code == EQ || code == NE)
5726 && (op0code == PLUS || op0code == MINUS)
5727 && CONSTANT_P (op1)
5728 && CONSTANT_P (XEXP (op0, 1))
5729 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
5731 rtx x = XEXP (op0, 0);
5732 rtx c = XEXP (op0, 1);
5733 enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
5734 rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
5736 /* Detect an infinite recursive condition, where we oscillate at this
5737 simplification case between:
5738 A + B == C <---> C - B == A,
5739 where A, B, and C are all constants with non-simplifiable expressions,
5740 usually SYMBOL_REFs. */
5741 if (GET_CODE (tem) == invcode
5742 && CONSTANT_P (x)
5743 && rtx_equal_p (c, XEXP (tem, 1)))
5744 return NULL_RTX;
5746 return simplify_gen_relational (code, mode, cmp_mode, x, tem);
5749 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
5750 the same as (zero_extract:SI FOO (const_int 1) BAR). */
5751 scalar_int_mode int_mode, int_cmp_mode;
5752 if (code == NE
5753 && op1 == const0_rtx
5754 && is_int_mode (mode, &int_mode)
5755 && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
5756 /* ??? Work-around BImode bugs in the ia64 backend. */
5757 && int_mode != BImode
5758 && int_cmp_mode != BImode
5759 && nonzero_bits (op0, int_cmp_mode) == 1
5760 && STORE_FLAG_VALUE == 1)
5761 return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
5762 ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
5763 : lowpart_subreg (int_mode, op0, int_cmp_mode);
5765 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
5766 if ((code == EQ || code == NE)
5767 && op1 == const0_rtx
5768 && op0code == XOR)
5769 return simplify_gen_relational (code, mode, cmp_mode,
5770 XEXP (op0, 0), XEXP (op0, 1));
5772 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
5773 if ((code == EQ || code == NE)
5774 && op0code == XOR
5775 && rtx_equal_p (XEXP (op0, 0), op1)
5776 && !side_effects_p (XEXP (op0, 0)))
5777 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
5778 CONST0_RTX (mode));
5780 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
5781 if ((code == EQ || code == NE)
5782 && op0code == XOR
5783 && rtx_equal_p (XEXP (op0, 1), op1)
5784 && !side_effects_p (XEXP (op0, 1)))
5785 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5786 CONST0_RTX (mode));
5788 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
5789 if ((code == EQ || code == NE)
5790 && op0code == XOR
5791 && CONST_SCALAR_INT_P (op1)
5792 && CONST_SCALAR_INT_P (XEXP (op0, 1)))
5793 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5794 simplify_gen_binary (XOR, cmp_mode,
5795 XEXP (op0, 1), op1));
5797 /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
5798 constant folding if x/y is a constant. */
5799 if ((code == EQ || code == NE)
5800 && (op0code == AND || op0code == IOR)
5801 && !side_effects_p (op1)
5802 && op1 != CONST0_RTX (cmp_mode))
5804 /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
5805 (eq/ne (and (not y) x) 0). */
5806 if ((op0code == AND && rtx_equal_p (XEXP (op0, 0), op1))
5807 || (op0code == IOR && rtx_equal_p (XEXP (op0, 1), op1)))
5809 rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1),
5810 cmp_mode);
5811 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
5813 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5814 CONST0_RTX (cmp_mode));
5817 /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
5818 (eq/ne (and (not x) y) 0). */
5819 if ((op0code == AND && rtx_equal_p (XEXP (op0, 1), op1))
5820 || (op0code == IOR && rtx_equal_p (XEXP (op0, 0), op1)))
5822 rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0),
5823 cmp_mode);
5824 rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
5826 return simplify_gen_relational (code, mode, cmp_mode, lhs,
5827 CONST0_RTX (cmp_mode));
5831 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
5832 if ((code == EQ || code == NE)
5833 && GET_CODE (op0) == BSWAP
5834 && CONST_SCALAR_INT_P (op1))
5835 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
5836 simplify_gen_unary (BSWAP, cmp_mode,
5837 op1, cmp_mode));
5839 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
5840 if ((code == EQ || code == NE)
5841 && GET_CODE (op0) == BSWAP
5842 && GET_CODE (op1) == BSWAP)
5843 return simplify_gen_relational (code, mode, cmp_mode,
5844 XEXP (op0, 0), XEXP (op1, 0));
5846 if (op0code == POPCOUNT && op1 == const0_rtx)
5847 switch (code)
5849 case EQ:
5850 case LE:
5851 case LEU:
5852 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
5853 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
5854 XEXP (op0, 0), const0_rtx);
5856 case NE:
5857 case GT:
5858 case GTU:
5859 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
5860 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
5861 XEXP (op0, 0), const0_rtx);
5863 default:
5864 break;
5867 return NULL_RTX;
5870 enum
5872 CMP_EQ = 1,
5873 CMP_LT = 2,
5874 CMP_GT = 4,
5875 CMP_LTU = 8,
5876 CMP_GTU = 16
5880 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
5881 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
5882 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
5883 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
5884 For floating-point comparisons, assume that the operands were ordered. */
5886 static rtx
5887 comparison_result (enum rtx_code code, int known_results)
5889 switch (code)
5891 case EQ:
5892 case UNEQ:
5893 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
5894 case NE:
5895 case LTGT:
5896 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
5898 case LT:
5899 case UNLT:
5900 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
5901 case GE:
5902 case UNGE:
5903 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
5905 case GT:
5906 case UNGT:
5907 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
5908 case LE:
5909 case UNLE:
5910 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
5912 case LTU:
5913 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
5914 case GEU:
5915 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
5917 case GTU:
5918 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
5919 case LEU:
5920 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
5922 case ORDERED:
5923 return const_true_rtx;
5924 case UNORDERED:
5925 return const0_rtx;
5926 default:
5927 gcc_unreachable ();
5931 /* Check if the given comparison (done in the given MODE) is actually
5932 a tautology or a contradiction. If the mode is VOIDmode, the
5933 comparison is done in "infinite precision". If no simplification
5934 is possible, this function returns zero. Otherwise, it returns
5935 either const_true_rtx or const0_rtx. */
5938 simplify_const_relational_operation (enum rtx_code code,
5939 machine_mode mode,
5940 rtx op0, rtx op1)
5942 rtx tem;
5943 rtx trueop0;
5944 rtx trueop1;
5946 gcc_assert (mode != VOIDmode
5947 || (GET_MODE (op0) == VOIDmode
5948 && GET_MODE (op1) == VOIDmode));
5950 /* If op0 is a compare, extract the comparison arguments from it. */
5951 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
5953 op1 = XEXP (op0, 1);
5954 op0 = XEXP (op0, 0);
5956 if (GET_MODE (op0) != VOIDmode)
5957 mode = GET_MODE (op0);
5958 else if (GET_MODE (op1) != VOIDmode)
5959 mode = GET_MODE (op1);
5960 else
5961 return 0;
5964 /* We can't simplify MODE_CC values since we don't know what the
5965 actual comparison is. */
5966 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5967 return 0;
5969 /* Make sure the constant is second. */
5970 if (swap_commutative_operands_p (op0, op1))
5972 std::swap (op0, op1);
5973 code = swap_condition (code);
5976 trueop0 = avoid_constant_pool_reference (op0);
5977 trueop1 = avoid_constant_pool_reference (op1);
5979 /* For integer comparisons of A and B maybe we can simplify A - B and can
5980 then simplify a comparison of that with zero. If A and B are both either
5981 a register or a CONST_INT, this can't help; testing for these cases will
5982 prevent infinite recursion here and speed things up.
5984 We can only do this for EQ and NE comparisons as otherwise we may
5985 lose or introduce overflow which we cannot disregard as undefined as
5986 we do not know the signedness of the operation on either the left or
5987 the right hand side of the comparison. */
5989 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
5990 && (code == EQ || code == NE)
5991 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
5992 && (REG_P (op1) || CONST_INT_P (trueop1)))
5993 && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
5994 /* We cannot do this if tem is a nonzero address. */
5995 && ! nonzero_address_p (tem))
5996 return simplify_const_relational_operation (signed_condition (code),
5997 mode, tem, const0_rtx);
5999 if (! HONOR_NANS (mode) && code == ORDERED)
6000 return const_true_rtx;
6002 if (! HONOR_NANS (mode) && code == UNORDERED)
6003 return const0_rtx;
6005 /* For modes without NaNs, if the two operands are equal, we know the
6006 result except if they have side-effects. Even with NaNs we know
6007 the result of unordered comparisons and, if signaling NaNs are
6008 irrelevant, also the result of LT/GT/LTGT. */
6009 if ((! HONOR_NANS (trueop0)
6010 || code == UNEQ || code == UNLE || code == UNGE
6011 || ((code == LT || code == GT || code == LTGT)
6012 && ! HONOR_SNANS (trueop0)))
6013 && rtx_equal_p (trueop0, trueop1)
6014 && ! side_effects_p (trueop0))
6015 return comparison_result (code, CMP_EQ);
6017 /* If the operands are floating-point constants, see if we can fold
6018 the result. */
6019 if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
6020 && CONST_DOUBLE_AS_FLOAT_P (trueop1)
6021 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
6023 const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
6024 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
6026 /* Comparisons are unordered iff at least one of the values is NaN. */
6027 if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
6028 switch (code)
6030 case UNEQ:
6031 case UNLT:
6032 case UNGT:
6033 case UNLE:
6034 case UNGE:
6035 case NE:
6036 case UNORDERED:
6037 return const_true_rtx;
6038 case EQ:
6039 case LT:
6040 case GT:
6041 case LE:
6042 case GE:
6043 case LTGT:
6044 case ORDERED:
6045 return const0_rtx;
6046 default:
6047 return 0;
6050 return comparison_result (code,
6051 (real_equal (d0, d1) ? CMP_EQ :
6052 real_less (d0, d1) ? CMP_LT : CMP_GT));
6055 /* Otherwise, see if the operands are both integers. */
6056 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
6057 && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
6059 /* It would be nice if we really had a mode here. However, the
6060 largest int representable on the target is as good as
6061 infinite. */
6062 machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
6063 rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
6064 rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
6066 if (wi::eq_p (ptrueop0, ptrueop1))
6067 return comparison_result (code, CMP_EQ);
6068 else
6070 int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
6071 cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
6072 return comparison_result (code, cr);
6076 /* Optimize comparisons with upper and lower bounds. */
6077 scalar_int_mode int_mode;
6078 if (CONST_INT_P (trueop1)
6079 && is_a <scalar_int_mode> (mode, &int_mode)
6080 && HWI_COMPUTABLE_MODE_P (int_mode)
6081 && !side_effects_p (trueop0))
6083 int sign;
6084 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
6085 HOST_WIDE_INT val = INTVAL (trueop1);
6086 HOST_WIDE_INT mmin, mmax;
6088 if (code == GEU
6089 || code == LEU
6090 || code == GTU
6091 || code == LTU)
6092 sign = 0;
6093 else
6094 sign = 1;
6096 /* Get a reduced range if the sign bit is zero. */
6097 if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
6099 mmin = 0;
6100 mmax = nonzero;
6102 else
6104 rtx mmin_rtx, mmax_rtx;
6105 get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
6107 mmin = INTVAL (mmin_rtx);
6108 mmax = INTVAL (mmax_rtx);
6109 if (sign)
6111 unsigned int sign_copies
6112 = num_sign_bit_copies (trueop0, int_mode);
6114 mmin >>= (sign_copies - 1);
6115 mmax >>= (sign_copies - 1);
6119 switch (code)
6121 /* x >= y is always true for y <= mmin, always false for y > mmax. */
6122 case GEU:
6123 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
6124 return const_true_rtx;
6125 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
6126 return const0_rtx;
6127 break;
6128 case GE:
6129 if (val <= mmin)
6130 return const_true_rtx;
6131 if (val > mmax)
6132 return const0_rtx;
6133 break;
6135 /* x <= y is always true for y >= mmax, always false for y < mmin. */
6136 case LEU:
6137 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
6138 return const_true_rtx;
6139 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
6140 return const0_rtx;
6141 break;
6142 case LE:
6143 if (val >= mmax)
6144 return const_true_rtx;
6145 if (val < mmin)
6146 return const0_rtx;
6147 break;
6149 case EQ:
6150 /* x == y is always false for y out of range. */
6151 if (val < mmin || val > mmax)
6152 return const0_rtx;
6153 break;
6155 /* x > y is always false for y >= mmax, always true for y < mmin. */
6156 case GTU:
6157 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
6158 return const0_rtx;
6159 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
6160 return const_true_rtx;
6161 break;
6162 case GT:
6163 if (val >= mmax)
6164 return const0_rtx;
6165 if (val < mmin)
6166 return const_true_rtx;
6167 break;
6169 /* x < y is always false for y <= mmin, always true for y > mmax. */
6170 case LTU:
6171 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
6172 return const0_rtx;
6173 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
6174 return const_true_rtx;
6175 break;
6176 case LT:
6177 if (val <= mmin)
6178 return const0_rtx;
6179 if (val > mmax)
6180 return const_true_rtx;
6181 break;
6183 case NE:
6184 /* x != y is always true for y out of range. */
6185 if (val < mmin || val > mmax)
6186 return const_true_rtx;
6187 break;
6189 default:
6190 break;
6194 /* Optimize integer comparisons with zero. */
6195 if (is_a <scalar_int_mode> (mode, &int_mode)
6196 && trueop1 == const0_rtx
6197 && !side_effects_p (trueop0))
6199 /* Some addresses are known to be nonzero. We don't know
6200 their sign, but equality comparisons are known. */
6201 if (nonzero_address_p (trueop0))
6203 if (code == EQ || code == LEU)
6204 return const0_rtx;
6205 if (code == NE || code == GTU)
6206 return const_true_rtx;
6209 /* See if the first operand is an IOR with a constant. If so, we
6210 may be able to determine the result of this comparison. */
6211 if (GET_CODE (op0) == IOR)
6213 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
6214 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
6216 int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
6217 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
6218 && (UINTVAL (inner_const)
6219 & (HOST_WIDE_INT_1U
6220 << sign_bitnum)));
6222 switch (code)
6224 case EQ:
6225 case LEU:
6226 return const0_rtx;
6227 case NE:
6228 case GTU:
6229 return const_true_rtx;
6230 case LT:
6231 case LE:
6232 if (has_sign)
6233 return const_true_rtx;
6234 break;
6235 case GT:
6236 case GE:
6237 if (has_sign)
6238 return const0_rtx;
6239 break;
6240 default:
6241 break;
6247 /* Optimize comparison of ABS with zero. */
6248 if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
6249 && (GET_CODE (trueop0) == ABS
6250 || (GET_CODE (trueop0) == FLOAT_EXTEND
6251 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
6253 switch (code)
6255 case LT:
6256 /* Optimize abs(x) < 0.0. */
6257 if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
6258 return const0_rtx;
6259 break;
6261 case GE:
6262 /* Optimize abs(x) >= 0.0. */
6263 if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
6264 return const_true_rtx;
6265 break;
6267 case UNGE:
6268 /* Optimize ! (abs(x) < 0.0). */
6269 return const_true_rtx;
6271 default:
6272 break;
6276 return 0;
6279 /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
6280 where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
6281 or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
6282 can be simplified to that or NULL_RTX if not.
6283 Assume X is compared against zero with CMP_CODE and the true
6284 arm is TRUE_VAL and the false arm is FALSE_VAL. */
6287 simplify_context::simplify_cond_clz_ctz (rtx x, rtx_code cmp_code,
6288 rtx true_val, rtx false_val)
6290 if (cmp_code != EQ && cmp_code != NE)
6291 return NULL_RTX;
6293 /* Result on X == 0 and X !=0 respectively. */
6294 rtx on_zero, on_nonzero;
6295 if (cmp_code == EQ)
6297 on_zero = true_val;
6298 on_nonzero = false_val;
6300 else
6302 on_zero = false_val;
6303 on_nonzero = true_val;
6306 rtx_code op_code = GET_CODE (on_nonzero);
6307 if ((op_code != CLZ && op_code != CTZ)
6308 || !rtx_equal_p (XEXP (on_nonzero, 0), x)
6309 || !CONST_INT_P (on_zero))
6310 return NULL_RTX;
6312 HOST_WIDE_INT op_val;
6313 scalar_int_mode mode ATTRIBUTE_UNUSED
6314 = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
6315 if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
6316 || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
6317 && op_val == INTVAL (on_zero))
6318 return on_nonzero;
6320 return NULL_RTX;
6323 /* Try to simplify X given that it appears within operand OP of a
6324 VEC_MERGE operation whose mask is MASK. X need not use the same
6325 vector mode as the VEC_MERGE, but it must have the same number of
6326 elements.
6328 Return the simplified X on success, otherwise return NULL_RTX. */
6331 simplify_context::simplify_merge_mask (rtx x, rtx mask, int op)
6333 gcc_assert (VECTOR_MODE_P (GET_MODE (x)));
6334 poly_uint64 nunits = GET_MODE_NUNITS (GET_MODE (x));
6335 if (GET_CODE (x) == VEC_MERGE && rtx_equal_p (XEXP (x, 2), mask))
6337 if (side_effects_p (XEXP (x, 1 - op)))
6338 return NULL_RTX;
6340 return XEXP (x, op);
6342 if (UNARY_P (x)
6343 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6344 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits))
6346 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6347 if (top0)
6348 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), top0,
6349 GET_MODE (XEXP (x, 0)));
6351 if (BINARY_P (x)
6352 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6353 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
6354 && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
6355 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits))
6357 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6358 rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
6359 if (top0 || top1)
6361 if (COMPARISON_P (x))
6362 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
6363 GET_MODE (XEXP (x, 0)) != VOIDmode
6364 ? GET_MODE (XEXP (x, 0))
6365 : GET_MODE (XEXP (x, 1)),
6366 top0 ? top0 : XEXP (x, 0),
6367 top1 ? top1 : XEXP (x, 1));
6368 else
6369 return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
6370 top0 ? top0 : XEXP (x, 0),
6371 top1 ? top1 : XEXP (x, 1));
6374 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_TERNARY
6375 && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
6376 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
6377 && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
6378 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits)
6379 && VECTOR_MODE_P (GET_MODE (XEXP (x, 2)))
6380 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 2))), nunits))
6382 rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
6383 rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
6384 rtx top2 = simplify_merge_mask (XEXP (x, 2), mask, op);
6385 if (top0 || top1 || top2)
6386 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
6387 GET_MODE (XEXP (x, 0)),
6388 top0 ? top0 : XEXP (x, 0),
6389 top1 ? top1 : XEXP (x, 1),
6390 top2 ? top2 : XEXP (x, 2));
6392 return NULL_RTX;
6396 /* Simplify CODE, an operation with result mode MODE and three operands,
6397 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
6398 a constant. Return 0 if no simplifications is possible. */
6401 simplify_context::simplify_ternary_operation (rtx_code code, machine_mode mode,
6402 machine_mode op0_mode,
6403 rtx op0, rtx op1, rtx op2)
6405 bool any_change = false;
6406 rtx tem, trueop2;
6407 scalar_int_mode int_mode, int_op0_mode;
6408 unsigned int n_elts;
6410 switch (code)
6412 case FMA:
6413 /* Simplify negations around the multiplication. */
6414 /* -a * -b + c => a * b + c. */
6415 if (GET_CODE (op0) == NEG)
6417 tem = simplify_unary_operation (NEG, mode, op1, mode);
6418 if (tem)
6419 op1 = tem, op0 = XEXP (op0, 0), any_change = true;
6421 else if (GET_CODE (op1) == NEG)
6423 tem = simplify_unary_operation (NEG, mode, op0, mode);
6424 if (tem)
6425 op0 = tem, op1 = XEXP (op1, 0), any_change = true;
6428 /* Canonicalize the two multiplication operands. */
6429 /* a * -b + c => -b * a + c. */
6430 if (swap_commutative_operands_p (op0, op1))
6431 std::swap (op0, op1), any_change = true;
6433 if (any_change)
6434 return gen_rtx_FMA (mode, op0, op1, op2);
6435 return NULL_RTX;
6437 case SIGN_EXTRACT:
6438 case ZERO_EXTRACT:
6439 if (CONST_INT_P (op0)
6440 && CONST_INT_P (op1)
6441 && CONST_INT_P (op2)
6442 && is_a <scalar_int_mode> (mode, &int_mode)
6443 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
6444 && HWI_COMPUTABLE_MODE_P (int_mode))
6446 /* Extracting a bit-field from a constant */
6447 unsigned HOST_WIDE_INT val = UINTVAL (op0);
6448 HOST_WIDE_INT op1val = INTVAL (op1);
6449 HOST_WIDE_INT op2val = INTVAL (op2);
6450 if (!BITS_BIG_ENDIAN)
6451 val >>= op2val;
6452 else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
6453 val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
6454 else
6455 /* Not enough information to calculate the bit position. */
6456 break;
6458 if (HOST_BITS_PER_WIDE_INT != op1val)
6460 /* First zero-extend. */
6461 val &= (HOST_WIDE_INT_1U << op1val) - 1;
6462 /* If desired, propagate sign bit. */
6463 if (code == SIGN_EXTRACT
6464 && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
6465 != 0)
6466 val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
6469 return gen_int_mode (val, int_mode);
6471 break;
6473 case IF_THEN_ELSE:
6474 if (CONST_INT_P (op0))
6475 return op0 != const0_rtx ? op1 : op2;
6477 /* Convert c ? a : a into "a". */
6478 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
6479 return op1;
6481 /* Convert a != b ? a : b into "a". */
6482 if (GET_CODE (op0) == NE
6483 && ! side_effects_p (op0)
6484 && ! HONOR_NANS (mode)
6485 && ! HONOR_SIGNED_ZEROS (mode)
6486 && ((rtx_equal_p (XEXP (op0, 0), op1)
6487 && rtx_equal_p (XEXP (op0, 1), op2))
6488 || (rtx_equal_p (XEXP (op0, 0), op2)
6489 && rtx_equal_p (XEXP (op0, 1), op1))))
6490 return op1;
6492 /* Convert a == b ? a : b into "b". */
6493 if (GET_CODE (op0) == EQ
6494 && ! side_effects_p (op0)
6495 && ! HONOR_NANS (mode)
6496 && ! HONOR_SIGNED_ZEROS (mode)
6497 && ((rtx_equal_p (XEXP (op0, 0), op1)
6498 && rtx_equal_p (XEXP (op0, 1), op2))
6499 || (rtx_equal_p (XEXP (op0, 0), op2)
6500 && rtx_equal_p (XEXP (op0, 1), op1))))
6501 return op2;
6503 /* Convert (!c) != {0,...,0} ? a : b into
6504 c != {0,...,0} ? b : a for vector modes. */
6505 if (VECTOR_MODE_P (GET_MODE (op1))
6506 && GET_CODE (op0) == NE
6507 && GET_CODE (XEXP (op0, 0)) == NOT
6508 && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
6510 rtx cv = XEXP (op0, 1);
6511 int nunits;
6512 bool ok = true;
6513 if (!CONST_VECTOR_NUNITS (cv).is_constant (&nunits))
6514 ok = false;
6515 else
6516 for (int i = 0; i < nunits; ++i)
6517 if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
6519 ok = false;
6520 break;
6522 if (ok)
6524 rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
6525 XEXP (XEXP (op0, 0), 0),
6526 XEXP (op0, 1));
6527 rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
6528 return retval;
6532 /* Convert x == 0 ? N : clz (x) into clz (x) when
6533 CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
6534 Similarly for ctz (x). */
6535 if (COMPARISON_P (op0) && !side_effects_p (op0)
6536 && XEXP (op0, 1) == const0_rtx)
6538 rtx simplified
6539 = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
6540 op1, op2);
6541 if (simplified)
6542 return simplified;
6545 if (COMPARISON_P (op0) && ! side_effects_p (op0))
6547 machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
6548 ? GET_MODE (XEXP (op0, 1))
6549 : GET_MODE (XEXP (op0, 0)));
6550 rtx temp;
6552 /* Look for happy constants in op1 and op2. */
6553 if (CONST_INT_P (op1) && CONST_INT_P (op2))
6555 HOST_WIDE_INT t = INTVAL (op1);
6556 HOST_WIDE_INT f = INTVAL (op2);
6558 if (t == STORE_FLAG_VALUE && f == 0)
6559 code = GET_CODE (op0);
6560 else if (t == 0 && f == STORE_FLAG_VALUE)
6562 enum rtx_code tmp;
6563 tmp = reversed_comparison_code (op0, NULL);
6564 if (tmp == UNKNOWN)
6565 break;
6566 code = tmp;
6568 else
6569 break;
6571 return simplify_gen_relational (code, mode, cmp_mode,
6572 XEXP (op0, 0), XEXP (op0, 1));
6575 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
6576 cmp_mode, XEXP (op0, 0),
6577 XEXP (op0, 1));
6579 /* See if any simplifications were possible. */
6580 if (temp)
6582 if (CONST_INT_P (temp))
6583 return temp == const0_rtx ? op2 : op1;
6584 else if (temp)
6585 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
6588 break;
6590 case VEC_MERGE:
6591 gcc_assert (GET_MODE (op0) == mode);
6592 gcc_assert (GET_MODE (op1) == mode);
6593 gcc_assert (VECTOR_MODE_P (mode));
6594 trueop2 = avoid_constant_pool_reference (op2);
6595 if (CONST_INT_P (trueop2)
6596 && GET_MODE_NUNITS (mode).is_constant (&n_elts))
6598 unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
6599 unsigned HOST_WIDE_INT mask;
6600 if (n_elts == HOST_BITS_PER_WIDE_INT)
6601 mask = -1;
6602 else
6603 mask = (HOST_WIDE_INT_1U << n_elts) - 1;
6605 if (!(sel & mask) && !side_effects_p (op0))
6606 return op1;
6607 if ((sel & mask) == mask && !side_effects_p (op1))
6608 return op0;
6610 rtx trueop0 = avoid_constant_pool_reference (op0);
6611 rtx trueop1 = avoid_constant_pool_reference (op1);
6612 if (GET_CODE (trueop0) == CONST_VECTOR
6613 && GET_CODE (trueop1) == CONST_VECTOR)
6615 rtvec v = rtvec_alloc (n_elts);
6616 unsigned int i;
6618 for (i = 0; i < n_elts; i++)
6619 RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
6620 ? CONST_VECTOR_ELT (trueop0, i)
6621 : CONST_VECTOR_ELT (trueop1, i));
6622 return gen_rtx_CONST_VECTOR (mode, v);
6625 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
6626 if no element from a appears in the result. */
6627 if (GET_CODE (op0) == VEC_MERGE)
6629 tem = avoid_constant_pool_reference (XEXP (op0, 2));
6630 if (CONST_INT_P (tem))
6632 unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
6633 if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
6634 return simplify_gen_ternary (code, mode, mode,
6635 XEXP (op0, 1), op1, op2);
6636 if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
6637 return simplify_gen_ternary (code, mode, mode,
6638 XEXP (op0, 0), op1, op2);
6641 if (GET_CODE (op1) == VEC_MERGE)
6643 tem = avoid_constant_pool_reference (XEXP (op1, 2));
6644 if (CONST_INT_P (tem))
6646 unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
6647 if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
6648 return simplify_gen_ternary (code, mode, mode,
6649 op0, XEXP (op1, 1), op2);
6650 if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
6651 return simplify_gen_ternary (code, mode, mode,
6652 op0, XEXP (op1, 0), op2);
6656 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
6657 with a. */
6658 if (GET_CODE (op0) == VEC_DUPLICATE
6659 && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
6660 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
6661 && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0, 0))), 1))
6663 tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
6664 if (CONST_INT_P (tem) && CONST_INT_P (op2))
6666 if (XEXP (XEXP (op0, 0), 0) == op1
6667 && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
6668 return op1;
6671 /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
6672 (const_int N))
6673 with (vec_concat (X) (B)) if N == 1 or
6674 (vec_concat (A) (X)) if N == 2. */
6675 if (GET_CODE (op0) == VEC_DUPLICATE
6676 && GET_CODE (op1) == CONST_VECTOR
6677 && known_eq (CONST_VECTOR_NUNITS (op1), 2)
6678 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6679 && IN_RANGE (sel, 1, 2))
6681 rtx newop0 = XEXP (op0, 0);
6682 rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
6683 if (sel == 2)
6684 std::swap (newop0, newop1);
6685 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6687 /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
6688 with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
6689 Only applies for vectors of two elements. */
6690 if (GET_CODE (op0) == VEC_DUPLICATE
6691 && GET_CODE (op1) == VEC_CONCAT
6692 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6693 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6694 && IN_RANGE (sel, 1, 2))
6696 rtx newop0 = XEXP (op0, 0);
6697 rtx newop1 = XEXP (op1, 2 - sel);
6698 rtx otherop = XEXP (op1, sel - 1);
6699 if (sel == 2)
6700 std::swap (newop0, newop1);
6701 /* Don't want to throw away the other part of the vec_concat if
6702 it has side-effects. */
6703 if (!side_effects_p (otherop))
6704 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6707 /* Replace:
6709 (vec_merge:outer (vec_duplicate:outer x:inner)
6710 (subreg:outer y:inner 0)
6711 (const_int N))
6713 with (vec_concat:outer x:inner y:inner) if N == 1,
6714 or (vec_concat:outer y:inner x:inner) if N == 2.
6716 Implicitly, this means we have a paradoxical subreg, but such
6717 a check is cheap, so make it anyway.
6719 Only applies for vectors of two elements. */
6720 if (GET_CODE (op0) == VEC_DUPLICATE
6721 && GET_CODE (op1) == SUBREG
6722 && GET_MODE (op1) == GET_MODE (op0)
6723 && GET_MODE (SUBREG_REG (op1)) == GET_MODE (XEXP (op0, 0))
6724 && paradoxical_subreg_p (op1)
6725 && subreg_lowpart_p (op1)
6726 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6727 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6728 && IN_RANGE (sel, 1, 2))
6730 rtx newop0 = XEXP (op0, 0);
6731 rtx newop1 = SUBREG_REG (op1);
6732 if (sel == 2)
6733 std::swap (newop0, newop1);
6734 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6737 /* Same as above but with switched operands:
6738 Replace (vec_merge:outer (subreg:outer x:inner 0)
6739 (vec_duplicate:outer y:inner)
6740 (const_int N))
6742 with (vec_concat:outer x:inner y:inner) if N == 1,
6743 or (vec_concat:outer y:inner x:inner) if N == 2. */
6744 if (GET_CODE (op1) == VEC_DUPLICATE
6745 && GET_CODE (op0) == SUBREG
6746 && GET_MODE (op0) == GET_MODE (op1)
6747 && GET_MODE (SUBREG_REG (op0)) == GET_MODE (XEXP (op1, 0))
6748 && paradoxical_subreg_p (op0)
6749 && subreg_lowpart_p (op0)
6750 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6751 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6752 && IN_RANGE (sel, 1, 2))
6754 rtx newop0 = SUBREG_REG (op0);
6755 rtx newop1 = XEXP (op1, 0);
6756 if (sel == 2)
6757 std::swap (newop0, newop1);
6758 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6761 /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
6762 (const_int n))
6763 with (vec_concat x y) or (vec_concat y x) depending on value
6764 of N. */
6765 if (GET_CODE (op0) == VEC_DUPLICATE
6766 && GET_CODE (op1) == VEC_DUPLICATE
6767 && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
6768 && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
6769 && IN_RANGE (sel, 1, 2))
6771 rtx newop0 = XEXP (op0, 0);
6772 rtx newop1 = XEXP (op1, 0);
6773 if (sel == 2)
6774 std::swap (newop0, newop1);
6776 return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
6780 if (rtx_equal_p (op0, op1)
6781 && !side_effects_p (op2) && !side_effects_p (op1))
6782 return op0;
6784 if (!side_effects_p (op2))
6786 rtx top0
6787 = may_trap_p (op0) ? NULL_RTX : simplify_merge_mask (op0, op2, 0);
6788 rtx top1
6789 = may_trap_p (op1) ? NULL_RTX : simplify_merge_mask (op1, op2, 1);
6790 if (top0 || top1)
6791 return simplify_gen_ternary (code, mode, mode,
6792 top0 ? top0 : op0,
6793 top1 ? top1 : op1, op2);
6796 break;
6798 default:
6799 gcc_unreachable ();
6802 return 0;
6805 /* Try to calculate NUM_BYTES bytes of the target memory image of X,
6806 starting at byte FIRST_BYTE. Return true on success and add the
6807 bytes to BYTES, such that each byte has BITS_PER_UNIT bits and such
6808 that the bytes follow target memory order. Leave BYTES unmodified
6809 on failure.
6811 MODE is the mode of X. The caller must reserve NUM_BYTES bytes in
6812 BYTES before calling this function. */
6814 bool
6815 native_encode_rtx (machine_mode mode, rtx x, vec<target_unit> &bytes,
6816 unsigned int first_byte, unsigned int num_bytes)
6818 /* Check the mode is sensible. */
6819 gcc_assert (GET_MODE (x) == VOIDmode
6820 ? is_a <scalar_int_mode> (mode)
6821 : mode == GET_MODE (x));
6823 if (GET_CODE (x) == CONST_VECTOR)
6825 /* CONST_VECTOR_ELT follows target memory order, so no shuffling
6826 is necessary. The only complication is that MODE_VECTOR_BOOL
6827 vectors can have several elements per byte. */
6828 unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
6829 GET_MODE_NUNITS (mode));
6830 unsigned int elt = first_byte * BITS_PER_UNIT / elt_bits;
6831 if (elt_bits < BITS_PER_UNIT)
6833 /* This is the only case in which elements can be smaller than
6834 a byte. */
6835 gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
6836 for (unsigned int i = 0; i < num_bytes; ++i)
6838 target_unit value = 0;
6839 for (unsigned int j = 0; j < BITS_PER_UNIT; j += elt_bits)
6841 value |= (INTVAL (CONST_VECTOR_ELT (x, elt)) & 1) << j;
6842 elt += 1;
6844 bytes.quick_push (value);
6846 return true;
6849 unsigned int start = bytes.length ();
6850 unsigned int elt_bytes = GET_MODE_UNIT_SIZE (mode);
6851 /* Make FIRST_BYTE relative to ELT. */
6852 first_byte %= elt_bytes;
6853 while (num_bytes > 0)
6855 /* Work out how many bytes we want from element ELT. */
6856 unsigned int chunk_bytes = MIN (num_bytes, elt_bytes - first_byte);
6857 if (!native_encode_rtx (GET_MODE_INNER (mode),
6858 CONST_VECTOR_ELT (x, elt), bytes,
6859 first_byte, chunk_bytes))
6861 bytes.truncate (start);
6862 return false;
6864 elt += 1;
6865 first_byte = 0;
6866 num_bytes -= chunk_bytes;
6868 return true;
6871 /* All subsequent cases are limited to scalars. */
6872 scalar_mode smode;
6873 if (!is_a <scalar_mode> (mode, &smode))
6874 return false;
6876 /* Make sure that the region is in range. */
6877 unsigned int end_byte = first_byte + num_bytes;
6878 unsigned int mode_bytes = GET_MODE_SIZE (smode);
6879 gcc_assert (end_byte <= mode_bytes);
6881 if (CONST_SCALAR_INT_P (x))
6883 /* The target memory layout is affected by both BYTES_BIG_ENDIAN
6884 and WORDS_BIG_ENDIAN. Use the subreg machinery to get the lsb
6885 position of each byte. */
6886 rtx_mode_t value (x, smode);
6887 wide_int_ref value_wi (value);
6888 for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6890 /* Always constant because the inputs are. */
6891 unsigned int lsb
6892 = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
6893 /* Operate directly on the encoding rather than using
6894 wi::extract_uhwi, so that we preserve the sign or zero
6895 extension for modes that are not a whole number of bits in
6896 size. (Zero extension is only used for the combination of
6897 innermode == BImode && STORE_FLAG_VALUE == 1). */
6898 unsigned int elt = lsb / HOST_BITS_PER_WIDE_INT;
6899 unsigned int shift = lsb % HOST_BITS_PER_WIDE_INT;
6900 unsigned HOST_WIDE_INT uhwi = value_wi.elt (elt);
6901 bytes.quick_push (uhwi >> shift);
6903 return true;
6906 if (CONST_DOUBLE_P (x))
6908 /* real_to_target produces an array of integers in target memory order.
6909 All integers before the last one have 32 bits; the last one may
6910 have 32 bits or fewer, depending on whether the mode bitsize
6911 is divisible by 32. Each of these integers is then laid out
6912 in target memory as any other integer would be. */
6913 long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
6914 real_to_target (el32, CONST_DOUBLE_REAL_VALUE (x), smode);
6916 /* The (maximum) number of target bytes per element of el32. */
6917 unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
6918 gcc_assert (bytes_per_el32 != 0);
6920 /* Build up the integers in a similar way to the CONST_SCALAR_INT_P
6921 handling above. */
6922 for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6924 unsigned int index = byte / bytes_per_el32;
6925 unsigned int subbyte = byte % bytes_per_el32;
6926 unsigned int int_bytes = MIN (bytes_per_el32,
6927 mode_bytes - index * bytes_per_el32);
6928 /* Always constant because the inputs are. */
6929 unsigned int lsb
6930 = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
6931 bytes.quick_push ((unsigned long) el32[index] >> lsb);
6933 return true;
6936 if (GET_CODE (x) == CONST_FIXED)
6938 for (unsigned int byte = first_byte; byte < end_byte; ++byte)
6940 /* Always constant because the inputs are. */
6941 unsigned int lsb
6942 = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
6943 unsigned HOST_WIDE_INT piece = CONST_FIXED_VALUE_LOW (x);
6944 if (lsb >= HOST_BITS_PER_WIDE_INT)
6946 lsb -= HOST_BITS_PER_WIDE_INT;
6947 piece = CONST_FIXED_VALUE_HIGH (x);
6949 bytes.quick_push (piece >> lsb);
6951 return true;
6954 return false;
6957 /* Read a vector of mode MODE from the target memory image given by BYTES,
6958 starting at byte FIRST_BYTE. The vector is known to be encodable using
6959 NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each,
6960 and BYTES is known to have enough bytes to supply NPATTERNS *
6961 NELTS_PER_PATTERN vector elements. Each element of BYTES contains
6962 BITS_PER_UNIT bits and the bytes are in target memory order.
6964 Return the vector on success, otherwise return NULL_RTX. */
6967 native_decode_vector_rtx (machine_mode mode, const vec<target_unit> &bytes,
6968 unsigned int first_byte, unsigned int npatterns,
6969 unsigned int nelts_per_pattern)
6971 rtx_vector_builder builder (mode, npatterns, nelts_per_pattern);
6973 unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
6974 GET_MODE_NUNITS (mode));
6975 if (elt_bits < BITS_PER_UNIT)
6977 /* This is the only case in which elements can be smaller than a byte.
6978 Element 0 is always in the lsb of the containing byte. */
6979 gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
6980 for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
6982 unsigned int bit_index = first_byte * BITS_PER_UNIT + i * elt_bits;
6983 unsigned int byte_index = bit_index / BITS_PER_UNIT;
6984 unsigned int lsb = bit_index % BITS_PER_UNIT;
6985 builder.quick_push (bytes[byte_index] & (1 << lsb)
6986 ? CONST1_RTX (BImode)
6987 : CONST0_RTX (BImode));
6990 else
6992 for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
6994 rtx x = native_decode_rtx (GET_MODE_INNER (mode), bytes, first_byte);
6995 if (!x)
6996 return NULL_RTX;
6997 builder.quick_push (x);
6998 first_byte += elt_bits / BITS_PER_UNIT;
7001 return builder.build ();
7004 /* Read an rtx of mode MODE from the target memory image given by BYTES,
7005 starting at byte FIRST_BYTE. Each element of BYTES contains BITS_PER_UNIT
7006 bits and the bytes are in target memory order. The image has enough
7007 values to specify all bytes of MODE.
7009 Return the rtx on success, otherwise return NULL_RTX. */
7012 native_decode_rtx (machine_mode mode, const vec<target_unit> &bytes,
7013 unsigned int first_byte)
7015 if (VECTOR_MODE_P (mode))
7017 /* If we know at compile time how many elements there are,
7018 pull each element directly from BYTES. */
7019 unsigned int nelts;
7020 if (GET_MODE_NUNITS (mode).is_constant (&nelts))
7021 return native_decode_vector_rtx (mode, bytes, first_byte, nelts, 1);
7022 return NULL_RTX;
7025 scalar_int_mode imode;
7026 if (is_a <scalar_int_mode> (mode, &imode)
7027 && GET_MODE_PRECISION (imode) <= MAX_BITSIZE_MODE_ANY_INT)
7029 /* Pull the bytes msb first, so that we can use simple
7030 shift-and-insert wide_int operations. */
7031 unsigned int size = GET_MODE_SIZE (imode);
7032 wide_int result (wi::zero (GET_MODE_PRECISION (imode)));
7033 for (unsigned int i = 0; i < size; ++i)
7035 unsigned int lsb = (size - i - 1) * BITS_PER_UNIT;
7036 /* Always constant because the inputs are. */
7037 unsigned int subbyte
7038 = subreg_size_offset_from_lsb (1, size, lsb).to_constant ();
7039 result <<= BITS_PER_UNIT;
7040 result |= bytes[first_byte + subbyte];
7042 return immed_wide_int_const (result, imode);
7045 scalar_float_mode fmode;
7046 if (is_a <scalar_float_mode> (mode, &fmode))
7048 /* We need to build an array of integers in target memory order.
7049 All integers before the last one have 32 bits; the last one may
7050 have 32 bits or fewer, depending on whether the mode bitsize
7051 is divisible by 32. */
7052 long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
7053 unsigned int num_el32 = CEIL (GET_MODE_BITSIZE (fmode), 32);
7054 memset (el32, 0, num_el32 * sizeof (long));
7056 /* The (maximum) number of target bytes per element of el32. */
7057 unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
7058 gcc_assert (bytes_per_el32 != 0);
7060 unsigned int mode_bytes = GET_MODE_SIZE (fmode);
7061 for (unsigned int byte = 0; byte < mode_bytes; ++byte)
7063 unsigned int index = byte / bytes_per_el32;
7064 unsigned int subbyte = byte % bytes_per_el32;
7065 unsigned int int_bytes = MIN (bytes_per_el32,
7066 mode_bytes - index * bytes_per_el32);
7067 /* Always constant because the inputs are. */
7068 unsigned int lsb
7069 = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
7070 el32[index] |= (unsigned long) bytes[first_byte + byte] << lsb;
7072 REAL_VALUE_TYPE r;
7073 real_from_target (&r, el32, fmode);
7074 return const_double_from_real_value (r, fmode);
7077 if (ALL_SCALAR_FIXED_POINT_MODE_P (mode))
7079 scalar_mode smode = as_a <scalar_mode> (mode);
7080 FIXED_VALUE_TYPE f;
7081 f.data.low = 0;
7082 f.data.high = 0;
7083 f.mode = smode;
7085 unsigned int mode_bytes = GET_MODE_SIZE (smode);
7086 for (unsigned int byte = 0; byte < mode_bytes; ++byte)
7088 /* Always constant because the inputs are. */
7089 unsigned int lsb
7090 = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
7091 unsigned HOST_WIDE_INT unit = bytes[first_byte + byte];
7092 if (lsb >= HOST_BITS_PER_WIDE_INT)
7093 f.data.high |= unit << (lsb - HOST_BITS_PER_WIDE_INT);
7094 else
7095 f.data.low |= unit << lsb;
7097 return CONST_FIXED_FROM_FIXED_VALUE (f, mode);
7100 return NULL_RTX;
7103 /* Simplify a byte offset BYTE into CONST_VECTOR X. The main purpose
7104 is to convert a runtime BYTE value into a constant one. */
7106 static poly_uint64
7107 simplify_const_vector_byte_offset (rtx x, poly_uint64 byte)
7109 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
7110 machine_mode mode = GET_MODE (x);
7111 unsigned int elt_bits = vector_element_size (GET_MODE_BITSIZE (mode),
7112 GET_MODE_NUNITS (mode));
7113 /* The number of bits needed to encode one element from each pattern. */
7114 unsigned int sequence_bits = CONST_VECTOR_NPATTERNS (x) * elt_bits;
7116 /* Identify the start point in terms of a sequence number and a byte offset
7117 within that sequence. */
7118 poly_uint64 first_sequence;
7119 unsigned HOST_WIDE_INT subbit;
7120 if (can_div_trunc_p (byte * BITS_PER_UNIT, sequence_bits,
7121 &first_sequence, &subbit))
7123 unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
7124 if (nelts_per_pattern == 1)
7125 /* This is a duplicated vector, so the value of FIRST_SEQUENCE
7126 doesn't matter. */
7127 byte = subbit / BITS_PER_UNIT;
7128 else if (nelts_per_pattern == 2 && known_gt (first_sequence, 0U))
7130 /* The subreg drops the first element from each pattern and
7131 only uses the second element. Find the first sequence
7132 that starts on a byte boundary. */
7133 subbit += least_common_multiple (sequence_bits, BITS_PER_UNIT);
7134 byte = subbit / BITS_PER_UNIT;
7137 return byte;
7140 /* Subroutine of simplify_subreg in which:
7142 - X is known to be a CONST_VECTOR
7143 - OUTERMODE is known to be a vector mode
7145 Try to handle the subreg by operating on the CONST_VECTOR encoding
7146 rather than on each individual element of the CONST_VECTOR.
7148 Return the simplified subreg on success, otherwise return NULL_RTX. */
7150 static rtx
7151 simplify_const_vector_subreg (machine_mode outermode, rtx x,
7152 machine_mode innermode, unsigned int first_byte)
7154 /* Paradoxical subregs of vectors have dubious semantics. */
7155 if (paradoxical_subreg_p (outermode, innermode))
7156 return NULL_RTX;
7158 /* We can only preserve the semantics of a stepped pattern if the new
7159 vector element is the same as the original one. */
7160 if (CONST_VECTOR_STEPPED_P (x)
7161 && GET_MODE_INNER (outermode) != GET_MODE_INNER (innermode))
7162 return NULL_RTX;
7164 /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
7165 unsigned int x_elt_bits
7166 = vector_element_size (GET_MODE_BITSIZE (innermode),
7167 GET_MODE_NUNITS (innermode));
7168 unsigned int out_elt_bits
7169 = vector_element_size (GET_MODE_BITSIZE (outermode),
7170 GET_MODE_NUNITS (outermode));
7172 /* The number of bits needed to encode one element from every pattern
7173 of the original vector. */
7174 unsigned int x_sequence_bits = CONST_VECTOR_NPATTERNS (x) * x_elt_bits;
7176 /* The number of bits needed to encode one element from every pattern
7177 of the result. */
7178 unsigned int out_sequence_bits
7179 = least_common_multiple (x_sequence_bits, out_elt_bits);
7181 /* Work out the number of interleaved patterns in the output vector
7182 and the number of encoded elements per pattern. */
7183 unsigned int out_npatterns = out_sequence_bits / out_elt_bits;
7184 unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
7186 /* The encoding scheme requires the number of elements to be a multiple
7187 of the number of patterns, so that each pattern appears at least once
7188 and so that the same number of elements appear from each pattern. */
7189 bool ok_p = multiple_p (GET_MODE_NUNITS (outermode), out_npatterns);
7190 unsigned int const_nunits;
7191 if (GET_MODE_NUNITS (outermode).is_constant (&const_nunits)
7192 && (!ok_p || out_npatterns * nelts_per_pattern > const_nunits))
7194 /* Either the encoding is invalid, or applying it would give us
7195 more elements than we need. Just encode each element directly. */
7196 out_npatterns = const_nunits;
7197 nelts_per_pattern = 1;
7199 else if (!ok_p)
7200 return NULL_RTX;
7202 /* Get enough bytes of X to form the new encoding. */
7203 unsigned int buffer_bits = out_npatterns * nelts_per_pattern * out_elt_bits;
7204 unsigned int buffer_bytes = CEIL (buffer_bits, BITS_PER_UNIT);
7205 auto_vec<target_unit, 128> buffer (buffer_bytes);
7206 if (!native_encode_rtx (innermode, x, buffer, first_byte, buffer_bytes))
7207 return NULL_RTX;
7209 /* Reencode the bytes as OUTERMODE. */
7210 return native_decode_vector_rtx (outermode, buffer, 0, out_npatterns,
7211 nelts_per_pattern);
7214 /* Try to simplify a subreg of a constant by encoding the subreg region
7215 as a sequence of target bytes and reading them back in the new mode.
7216 Return the new value on success, otherwise return null.
7218 The subreg has outer mode OUTERMODE, inner mode INNERMODE, inner value X
7219 and byte offset FIRST_BYTE. */
7221 static rtx
7222 simplify_immed_subreg (fixed_size_mode outermode, rtx x,
7223 machine_mode innermode, unsigned int first_byte)
7225 unsigned int buffer_bytes = GET_MODE_SIZE (outermode);
7226 auto_vec<target_unit, 128> buffer (buffer_bytes);
7228 /* Some ports misuse CCmode. */
7229 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (x))
7230 return x;
7232 /* Paradoxical subregs read undefined values for bytes outside of the
7233 inner value. However, we have traditionally always sign-extended
7234 integer constants and zero-extended others. */
7235 unsigned int inner_bytes = buffer_bytes;
7236 if (paradoxical_subreg_p (outermode, innermode))
7238 if (!GET_MODE_SIZE (innermode).is_constant (&inner_bytes))
7239 return NULL_RTX;
7241 target_unit filler = 0;
7242 if (CONST_SCALAR_INT_P (x) && wi::neg_p (rtx_mode_t (x, innermode)))
7243 filler = -1;
7245 /* Add any leading bytes due to big-endian layout. The number of
7246 bytes must be constant because both modes have constant size. */
7247 unsigned int leading_bytes
7248 = -byte_lowpart_offset (outermode, innermode).to_constant ();
7249 for (unsigned int i = 0; i < leading_bytes; ++i)
7250 buffer.quick_push (filler);
7252 if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
7253 return NULL_RTX;
7255 /* Add any trailing bytes due to little-endian layout. */
7256 while (buffer.length () < buffer_bytes)
7257 buffer.quick_push (filler);
7259 else if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
7260 return NULL_RTX;
7261 rtx ret = native_decode_rtx (outermode, buffer, 0);
7262 if (ret && MODE_COMPOSITE_P (outermode))
7264 auto_vec<target_unit, 128> buffer2 (buffer_bytes);
7265 if (!native_encode_rtx (outermode, ret, buffer2, 0, buffer_bytes))
7266 return NULL_RTX;
7267 for (unsigned int i = 0; i < buffer_bytes; ++i)
7268 if (buffer[i] != buffer2[i])
7269 return NULL_RTX;
7271 return ret;
7274 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
7275 Return 0 if no simplifications are possible. */
7277 simplify_context::simplify_subreg (machine_mode outermode, rtx op,
7278 machine_mode innermode, poly_uint64 byte)
7280 /* Little bit of sanity checking. */
7281 gcc_assert (innermode != VOIDmode);
7282 gcc_assert (outermode != VOIDmode);
7283 gcc_assert (innermode != BLKmode);
7284 gcc_assert (outermode != BLKmode);
7286 gcc_assert (GET_MODE (op) == innermode
7287 || GET_MODE (op) == VOIDmode);
7289 poly_uint64 outersize = GET_MODE_SIZE (outermode);
7290 if (!multiple_p (byte, outersize))
7291 return NULL_RTX;
7293 poly_uint64 innersize = GET_MODE_SIZE (innermode);
7294 if (maybe_ge (byte, innersize))
7295 return NULL_RTX;
7297 if (outermode == innermode && known_eq (byte, 0U))
7298 return op;
7300 if (GET_CODE (op) == CONST_VECTOR)
7301 byte = simplify_const_vector_byte_offset (op, byte);
7303 if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
7305 rtx elt;
7307 if (VECTOR_MODE_P (outermode)
7308 && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
7309 && vec_duplicate_p (op, &elt))
7310 return gen_vec_duplicate (outermode, elt);
7312 if (outermode == GET_MODE_INNER (innermode)
7313 && vec_duplicate_p (op, &elt))
7314 return elt;
7317 if (CONST_SCALAR_INT_P (op)
7318 || CONST_DOUBLE_AS_FLOAT_P (op)
7319 || CONST_FIXED_P (op)
7320 || GET_CODE (op) == CONST_VECTOR)
7322 unsigned HOST_WIDE_INT cbyte;
7323 if (byte.is_constant (&cbyte))
7325 if (GET_CODE (op) == CONST_VECTOR && VECTOR_MODE_P (outermode))
7327 rtx tmp = simplify_const_vector_subreg (outermode, op,
7328 innermode, cbyte);
7329 if (tmp)
7330 return tmp;
7333 fixed_size_mode fs_outermode;
7334 if (is_a <fixed_size_mode> (outermode, &fs_outermode))
7335 return simplify_immed_subreg (fs_outermode, op, innermode, cbyte);
7339 /* Changing mode twice with SUBREG => just change it once,
7340 or not at all if changing back op starting mode. */
7341 if (GET_CODE (op) == SUBREG)
7343 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
7344 poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode);
7345 rtx newx;
7347 if (outermode == innermostmode
7348 && known_eq (byte, 0U)
7349 && known_eq (SUBREG_BYTE (op), 0))
7350 return SUBREG_REG (op);
7352 /* Work out the memory offset of the final OUTERMODE value relative
7353 to the inner value of OP. */
7354 poly_int64 mem_offset = subreg_memory_offset (outermode,
7355 innermode, byte);
7356 poly_int64 op_mem_offset = subreg_memory_offset (op);
7357 poly_int64 final_offset = mem_offset + op_mem_offset;
7359 /* See whether resulting subreg will be paradoxical. */
7360 if (!paradoxical_subreg_p (outermode, innermostmode))
7362 /* Bail out in case resulting subreg would be incorrect. */
7363 if (maybe_lt (final_offset, 0)
7364 || maybe_ge (poly_uint64 (final_offset), innermostsize)
7365 || !multiple_p (final_offset, outersize))
7366 return NULL_RTX;
7368 else
7370 poly_int64 required_offset = subreg_memory_offset (outermode,
7371 innermostmode, 0);
7372 if (maybe_ne (final_offset, required_offset))
7373 return NULL_RTX;
7374 /* Paradoxical subregs always have byte offset 0. */
7375 final_offset = 0;
7378 /* Recurse for further possible simplifications. */
7379 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
7380 final_offset);
7381 if (newx)
7382 return newx;
7383 if (validate_subreg (outermode, innermostmode,
7384 SUBREG_REG (op), final_offset))
7386 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
7387 if (SUBREG_PROMOTED_VAR_P (op)
7388 && SUBREG_PROMOTED_SIGN (op) >= 0
7389 && GET_MODE_CLASS (outermode) == MODE_INT
7390 && known_ge (outersize, innersize)
7391 && known_le (outersize, innermostsize)
7392 && subreg_lowpart_p (newx))
7394 SUBREG_PROMOTED_VAR_P (newx) = 1;
7395 SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
7397 return newx;
7399 return NULL_RTX;
7402 /* SUBREG of a hard register => just change the register number
7403 and/or mode. If the hard register is not valid in that mode,
7404 suppress this simplification. If the hard register is the stack,
7405 frame, or argument pointer, leave this as a SUBREG. */
7407 if (REG_P (op) && HARD_REGISTER_P (op))
7409 unsigned int regno, final_regno;
7411 regno = REGNO (op);
7412 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
7413 if (HARD_REGISTER_NUM_P (final_regno))
7415 rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
7416 subreg_memory_offset (outermode,
7417 innermode, byte));
7419 /* Propagate original regno. We don't have any way to specify
7420 the offset inside original regno, so do so only for lowpart.
7421 The information is used only by alias analysis that cannot
7422 grog partial register anyway. */
7424 if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
7425 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
7426 return x;
7430 /* If we have a SUBREG of a register that we are replacing and we are
7431 replacing it with a MEM, make a new MEM and try replacing the
7432 SUBREG with it. Don't do this if the MEM has a mode-dependent address
7433 or if we would be widening it. */
7435 if (MEM_P (op)
7436 && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
7437 /* Allow splitting of volatile memory references in case we don't
7438 have instruction to move the whole thing. */
7439 && (! MEM_VOLATILE_P (op)
7440 || ! have_insn_for (SET, innermode))
7441 && !(STRICT_ALIGNMENT && MEM_ALIGN (op) < GET_MODE_ALIGNMENT (outermode))
7442 && known_le (outersize, innersize))
7443 return adjust_address_nv (op, outermode, byte);
7445 /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
7446 of two parts. */
7447 if (GET_CODE (op) == CONCAT
7448 || GET_CODE (op) == VEC_CONCAT)
7450 poly_uint64 final_offset;
7451 rtx part, res;
7453 machine_mode part_mode = GET_MODE (XEXP (op, 0));
7454 if (part_mode == VOIDmode)
7455 part_mode = GET_MODE_INNER (GET_MODE (op));
7456 poly_uint64 part_size = GET_MODE_SIZE (part_mode);
7457 if (known_lt (byte, part_size))
7459 part = XEXP (op, 0);
7460 final_offset = byte;
7462 else if (known_ge (byte, part_size))
7464 part = XEXP (op, 1);
7465 final_offset = byte - part_size;
7467 else
7468 return NULL_RTX;
7470 if (maybe_gt (final_offset + outersize, part_size))
7471 return NULL_RTX;
7473 part_mode = GET_MODE (part);
7474 if (part_mode == VOIDmode)
7475 part_mode = GET_MODE_INNER (GET_MODE (op));
7476 res = simplify_subreg (outermode, part, part_mode, final_offset);
7477 if (res)
7478 return res;
7479 if (validate_subreg (outermode, part_mode, part, final_offset))
7480 return gen_rtx_SUBREG (outermode, part, final_offset);
7481 return NULL_RTX;
7484 /* Simplify
7485 (subreg (vec_merge (X)
7486 (vector)
7487 (const_int ((1 << N) | M)))
7488 (N * sizeof (outermode)))
7490 (subreg (X) (N * sizeof (outermode)))
7492 unsigned int idx;
7493 if (constant_multiple_p (byte, GET_MODE_SIZE (outermode), &idx)
7494 && idx < HOST_BITS_PER_WIDE_INT
7495 && GET_CODE (op) == VEC_MERGE
7496 && GET_MODE_INNER (innermode) == outermode
7497 && CONST_INT_P (XEXP (op, 2))
7498 && (UINTVAL (XEXP (op, 2)) & (HOST_WIDE_INT_1U << idx)) != 0)
7499 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode, byte);
7501 /* A SUBREG resulting from a zero extension may fold to zero if
7502 it extracts higher bits that the ZERO_EXTEND's source bits. */
7503 if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
7505 poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
7506 if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
7507 return CONST0_RTX (outermode);
7510 scalar_int_mode int_outermode, int_innermode;
7511 if (is_a <scalar_int_mode> (outermode, &int_outermode)
7512 && is_a <scalar_int_mode> (innermode, &int_innermode)
7513 && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
7515 /* Handle polynomial integers. The upper bits of a paradoxical
7516 subreg are undefined, so this is safe regardless of whether
7517 we're truncating or extending. */
7518 if (CONST_POLY_INT_P (op))
7520 poly_wide_int val
7521 = poly_wide_int::from (const_poly_int_value (op),
7522 GET_MODE_PRECISION (int_outermode),
7523 SIGNED);
7524 return immed_wide_int_const (val, int_outermode);
7527 if (GET_MODE_PRECISION (int_outermode)
7528 < GET_MODE_PRECISION (int_innermode))
7530 rtx tem = simplify_truncation (int_outermode, op, int_innermode);
7531 if (tem)
7532 return tem;
7536 /* If OP is a vector comparison and the subreg is not changing the
7537 number of elements or the size of the elements, change the result
7538 of the comparison to the new mode. */
7539 if (COMPARISON_P (op)
7540 && VECTOR_MODE_P (outermode)
7541 && VECTOR_MODE_P (innermode)
7542 && known_eq (GET_MODE_NUNITS (outermode), GET_MODE_NUNITS (innermode))
7543 && known_eq (GET_MODE_UNIT_SIZE (outermode),
7544 GET_MODE_UNIT_SIZE (innermode)))
7545 return simplify_gen_relational (GET_CODE (op), outermode, innermode,
7546 XEXP (op, 0), XEXP (op, 1));
7547 return NULL_RTX;
7550 /* Make a SUBREG operation or equivalent if it folds. */
7553 simplify_context::simplify_gen_subreg (machine_mode outermode, rtx op,
7554 machine_mode innermode,
7555 poly_uint64 byte)
7557 rtx newx;
7559 newx = simplify_subreg (outermode, op, innermode, byte);
7560 if (newx)
7561 return newx;
7563 if (GET_CODE (op) == SUBREG
7564 || GET_CODE (op) == CONCAT
7565 || GET_MODE (op) == VOIDmode)
7566 return NULL_RTX;
7568 if (MODE_COMPOSITE_P (outermode)
7569 && (CONST_SCALAR_INT_P (op)
7570 || CONST_DOUBLE_AS_FLOAT_P (op)
7571 || CONST_FIXED_P (op)
7572 || GET_CODE (op) == CONST_VECTOR))
7573 return NULL_RTX;
7575 if (validate_subreg (outermode, innermode, op, byte))
7576 return gen_rtx_SUBREG (outermode, op, byte);
7578 return NULL_RTX;
7581 /* Generates a subreg to get the least significant part of EXPR (in mode
7582 INNER_MODE) to OUTER_MODE. */
7585 simplify_context::lowpart_subreg (machine_mode outer_mode, rtx expr,
7586 machine_mode inner_mode)
7588 return simplify_gen_subreg (outer_mode, expr, inner_mode,
7589 subreg_lowpart_offset (outer_mode, inner_mode));
7592 /* Simplify X, an rtx expression.
7594 Return the simplified expression or NULL if no simplifications
7595 were possible.
7597 This is the preferred entry point into the simplification routines;
7598 however, we still allow passes to call the more specific routines.
7600 Right now GCC has three (yes, three) major bodies of RTL simplification
7601 code that need to be unified.
7603 1. fold_rtx in cse.c. This code uses various CSE specific
7604 information to aid in RTL simplification.
7606 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
7607 it uses combine specific information to aid in RTL
7608 simplification.
7610 3. The routines in this file.
7613 Long term we want to only have one body of simplification code; to
7614 get to that state I recommend the following steps:
7616 1. Pour over fold_rtx & simplify_rtx and move any simplifications
7617 which are not pass dependent state into these routines.
7619 2. As code is moved by #1, change fold_rtx & simplify_rtx to
7620 use this routine whenever possible.
7622 3. Allow for pass dependent state to be provided to these
7623 routines and add simplifications based on the pass dependent
7624 state. Remove code from cse.c & combine.c that becomes
7625 redundant/dead.
7627 It will take time, but ultimately the compiler will be easier to
7628 maintain and improve. It's totally silly that when we add a
7629 simplification that it needs to be added to 4 places (3 for RTL
7630 simplification and 1 for tree simplification. */
7633 simplify_rtx (const_rtx x)
7635 const enum rtx_code code = GET_CODE (x);
7636 const machine_mode mode = GET_MODE (x);
7638 switch (GET_RTX_CLASS (code))
7640 case RTX_UNARY:
7641 return simplify_unary_operation (code, mode,
7642 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
7643 case RTX_COMM_ARITH:
7644 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
7645 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
7647 /* Fall through. */
7649 case RTX_BIN_ARITH:
7650 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
7652 case RTX_TERNARY:
7653 case RTX_BITFIELD_OPS:
7654 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
7655 XEXP (x, 0), XEXP (x, 1),
7656 XEXP (x, 2));
7658 case RTX_COMPARE:
7659 case RTX_COMM_COMPARE:
7660 return simplify_relational_operation (code, mode,
7661 ((GET_MODE (XEXP (x, 0))
7662 != VOIDmode)
7663 ? GET_MODE (XEXP (x, 0))
7664 : GET_MODE (XEXP (x, 1))),
7665 XEXP (x, 0),
7666 XEXP (x, 1));
7668 case RTX_EXTRA:
7669 if (code == SUBREG)
7670 return simplify_subreg (mode, SUBREG_REG (x),
7671 GET_MODE (SUBREG_REG (x)),
7672 SUBREG_BYTE (x));
7673 break;
7675 case RTX_OBJ:
7676 if (code == LO_SUM)
7678 /* Convert (lo_sum (high FOO) FOO) to FOO. */
7679 if (GET_CODE (XEXP (x, 0)) == HIGH
7680 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
7681 return XEXP (x, 1);
7683 break;
7685 default:
7686 break;
7688 return NULL;
7691 #if CHECKING_P
7693 namespace selftest {
7695 /* Make a unique pseudo REG of mode MODE for use by selftests. */
7697 static rtx
7698 make_test_reg (machine_mode mode)
7700 static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
7702 return gen_rtx_REG (mode, test_reg_num++);
7705 static void
7706 test_scalar_int_ops (machine_mode mode)
7708 rtx op0 = make_test_reg (mode);
7709 rtx op1 = make_test_reg (mode);
7710 rtx six = GEN_INT (6);
7712 rtx neg_op0 = simplify_gen_unary (NEG, mode, op0, mode);
7713 rtx not_op0 = simplify_gen_unary (NOT, mode, op0, mode);
7714 rtx bswap_op0 = simplify_gen_unary (BSWAP, mode, op0, mode);
7716 rtx and_op0_op1 = simplify_gen_binary (AND, mode, op0, op1);
7717 rtx ior_op0_op1 = simplify_gen_binary (IOR, mode, op0, op1);
7718 rtx xor_op0_op1 = simplify_gen_binary (XOR, mode, op0, op1);
7720 rtx and_op0_6 = simplify_gen_binary (AND, mode, op0, six);
7721 rtx and_op1_6 = simplify_gen_binary (AND, mode, op1, six);
7723 /* Test some binary identities. */
7724 ASSERT_RTX_EQ (op0, simplify_gen_binary (PLUS, mode, op0, const0_rtx));
7725 ASSERT_RTX_EQ (op0, simplify_gen_binary (PLUS, mode, const0_rtx, op0));
7726 ASSERT_RTX_EQ (op0, simplify_gen_binary (MINUS, mode, op0, const0_rtx));
7727 ASSERT_RTX_EQ (op0, simplify_gen_binary (MULT, mode, op0, const1_rtx));
7728 ASSERT_RTX_EQ (op0, simplify_gen_binary (MULT, mode, const1_rtx, op0));
7729 ASSERT_RTX_EQ (op0, simplify_gen_binary (DIV, mode, op0, const1_rtx));
7730 ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, op0, constm1_rtx));
7731 ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, constm1_rtx, op0));
7732 ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, op0, const0_rtx));
7733 ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, const0_rtx, op0));
7734 ASSERT_RTX_EQ (op0, simplify_gen_binary (XOR, mode, op0, const0_rtx));
7735 ASSERT_RTX_EQ (op0, simplify_gen_binary (XOR, mode, const0_rtx, op0));
7736 ASSERT_RTX_EQ (op0, simplify_gen_binary (ASHIFT, mode, op0, const0_rtx));
7737 ASSERT_RTX_EQ (op0, simplify_gen_binary (ROTATE, mode, op0, const0_rtx));
7738 ASSERT_RTX_EQ (op0, simplify_gen_binary (ASHIFTRT, mode, op0, const0_rtx));
7739 ASSERT_RTX_EQ (op0, simplify_gen_binary (LSHIFTRT, mode, op0, const0_rtx));
7740 ASSERT_RTX_EQ (op0, simplify_gen_binary (ROTATERT, mode, op0, const0_rtx));
7742 /* Test some self-inverse operations. */
7743 ASSERT_RTX_EQ (op0, simplify_gen_unary (NEG, mode, neg_op0, mode));
7744 ASSERT_RTX_EQ (op0, simplify_gen_unary (NOT, mode, not_op0, mode));
7745 ASSERT_RTX_EQ (op0, simplify_gen_unary (BSWAP, mode, bswap_op0, mode));
7747 /* Test some reflexive operations. */
7748 ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, op0, op0));
7749 ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, op0, op0));
7750 ASSERT_RTX_EQ (op0, simplify_gen_binary (SMIN, mode, op0, op0));
7751 ASSERT_RTX_EQ (op0, simplify_gen_binary (SMAX, mode, op0, op0));
7752 ASSERT_RTX_EQ (op0, simplify_gen_binary (UMIN, mode, op0, op0));
7753 ASSERT_RTX_EQ (op0, simplify_gen_binary (UMAX, mode, op0, op0));
7755 ASSERT_RTX_EQ (const0_rtx, simplify_gen_binary (MINUS, mode, op0, op0));
7756 ASSERT_RTX_EQ (const0_rtx, simplify_gen_binary (XOR, mode, op0, op0));
7758 /* Test simplify_distributive_operation. */
7759 ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, xor_op0_op1, six),
7760 simplify_gen_binary (XOR, mode, and_op0_6, and_op1_6));
7761 ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, ior_op0_op1, six),
7762 simplify_gen_binary (IOR, mode, and_op0_6, and_op1_6));
7763 ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, and_op0_op1, six),
7764 simplify_gen_binary (AND, mode, and_op0_6, and_op1_6));
7766 /* Test useless extensions are eliminated. */
7767 ASSERT_RTX_EQ (op0, simplify_gen_unary (TRUNCATE, mode, op0, mode));
7768 ASSERT_RTX_EQ (op0, simplify_gen_unary (ZERO_EXTEND, mode, op0, mode));
7769 ASSERT_RTX_EQ (op0, simplify_gen_unary (SIGN_EXTEND, mode, op0, mode));
7770 ASSERT_RTX_EQ (op0, lowpart_subreg (mode, op0, mode));
7773 /* Verify some simplifications of integer extension/truncation.
7774 Machine mode BMODE is the guaranteed wider than SMODE. */
7776 static void
7777 test_scalar_int_ext_ops (machine_mode bmode, machine_mode smode)
7779 rtx sreg = make_test_reg (smode);
7781 /* Check truncation of extension. */
7782 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7783 simplify_gen_unary (ZERO_EXTEND, bmode,
7784 sreg, smode),
7785 bmode),
7786 sreg);
7787 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7788 simplify_gen_unary (SIGN_EXTEND, bmode,
7789 sreg, smode),
7790 bmode),
7791 sreg);
7792 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7793 lowpart_subreg (bmode, sreg, smode),
7794 bmode),
7795 sreg);
7798 /* Verify more simplifications of integer extension/truncation.
7799 BMODE is wider than MMODE which is wider than SMODE. */
7801 static void
7802 test_scalar_int_ext_ops2 (machine_mode bmode, machine_mode mmode,
7803 machine_mode smode)
7805 rtx breg = make_test_reg (bmode);
7806 rtx mreg = make_test_reg (mmode);
7807 rtx sreg = make_test_reg (smode);
7809 /* Check truncate of truncate. */
7810 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7811 simplify_gen_unary (TRUNCATE, mmode,
7812 breg, bmode),
7813 mmode),
7814 simplify_gen_unary (TRUNCATE, smode, breg, bmode));
7816 /* Check extension of extension. */
7817 ASSERT_RTX_EQ (simplify_gen_unary (ZERO_EXTEND, bmode,
7818 simplify_gen_unary (ZERO_EXTEND, mmode,
7819 sreg, smode),
7820 mmode),
7821 simplify_gen_unary (ZERO_EXTEND, bmode, sreg, smode));
7822 ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND, bmode,
7823 simplify_gen_unary (SIGN_EXTEND, mmode,
7824 sreg, smode),
7825 mmode),
7826 simplify_gen_unary (SIGN_EXTEND, bmode, sreg, smode));
7827 ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND, bmode,
7828 simplify_gen_unary (ZERO_EXTEND, mmode,
7829 sreg, smode),
7830 mmode),
7831 simplify_gen_unary (ZERO_EXTEND, bmode, sreg, smode));
7833 /* Check truncation of extension. */
7834 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7835 simplify_gen_unary (ZERO_EXTEND, bmode,
7836 mreg, mmode),
7837 bmode),
7838 simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
7839 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7840 simplify_gen_unary (SIGN_EXTEND, bmode,
7841 mreg, mmode),
7842 bmode),
7843 simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
7844 ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
7845 lowpart_subreg (bmode, mreg, mmode),
7846 bmode),
7847 simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
7851 /* Verify some simplifications involving scalar expressions. */
7853 static void
7854 test_scalar_ops ()
7856 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
7858 machine_mode mode = (machine_mode) i;
7859 if (SCALAR_INT_MODE_P (mode) && mode != BImode)
7860 test_scalar_int_ops (mode);
7863 test_scalar_int_ext_ops (HImode, QImode);
7864 test_scalar_int_ext_ops (SImode, QImode);
7865 test_scalar_int_ext_ops (SImode, HImode);
7866 test_scalar_int_ext_ops (DImode, QImode);
7867 test_scalar_int_ext_ops (DImode, HImode);
7868 test_scalar_int_ext_ops (DImode, SImode);
7870 test_scalar_int_ext_ops2 (SImode, HImode, QImode);
7871 test_scalar_int_ext_ops2 (DImode, HImode, QImode);
7872 test_scalar_int_ext_ops2 (DImode, SImode, QImode);
7873 test_scalar_int_ext_ops2 (DImode, SImode, HImode);
7876 /* Test vector simplifications involving VEC_DUPLICATE in which the
7877 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7878 register that holds one element of MODE. */
7880 static void
7881 test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
7883 scalar_mode inner_mode = GET_MODE_INNER (mode);
7884 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
7885 poly_uint64 nunits = GET_MODE_NUNITS (mode);
7886 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
7888 /* Test some simple unary cases with VEC_DUPLICATE arguments. */
7889 rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
7890 rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
7891 ASSERT_RTX_EQ (duplicate,
7892 simplify_unary_operation (NOT, mode,
7893 duplicate_not, mode));
7895 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
7896 rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
7897 ASSERT_RTX_EQ (duplicate,
7898 simplify_unary_operation (NEG, mode,
7899 duplicate_neg, mode));
7901 /* Test some simple binary cases with VEC_DUPLICATE arguments. */
7902 ASSERT_RTX_EQ (duplicate,
7903 simplify_binary_operation (PLUS, mode, duplicate,
7904 CONST0_RTX (mode)));
7906 ASSERT_RTX_EQ (duplicate,
7907 simplify_binary_operation (MINUS, mode, duplicate,
7908 CONST0_RTX (mode)));
7910 ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
7911 simplify_binary_operation (MINUS, mode, duplicate,
7912 duplicate));
7915 /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
7916 rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
7917 ASSERT_RTX_PTR_EQ (scalar_reg,
7918 simplify_binary_operation (VEC_SELECT, inner_mode,
7919 duplicate, zero_par));
7921 unsigned HOST_WIDE_INT const_nunits;
7922 if (nunits.is_constant (&const_nunits))
7924 /* And again with the final element. */
7925 rtx last_index = gen_int_mode (const_nunits - 1, word_mode);
7926 rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
7927 ASSERT_RTX_PTR_EQ (scalar_reg,
7928 simplify_binary_operation (VEC_SELECT, inner_mode,
7929 duplicate, last_par));
7931 /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
7932 rtx vector_reg = make_test_reg (mode);
7933 for (unsigned HOST_WIDE_INT i = 0; i < const_nunits; i++)
7935 if (i >= HOST_BITS_PER_WIDE_INT)
7936 break;
7937 rtx mask = GEN_INT ((HOST_WIDE_INT_1U << i) | (i + 1));
7938 rtx vm = gen_rtx_VEC_MERGE (mode, duplicate, vector_reg, mask);
7939 poly_uint64 offset = i * GET_MODE_SIZE (inner_mode);
7940 ASSERT_RTX_EQ (scalar_reg,
7941 simplify_gen_subreg (inner_mode, vm,
7942 mode, offset));
7946 /* Test a scalar subreg of a VEC_DUPLICATE. */
7947 poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
7948 ASSERT_RTX_EQ (scalar_reg,
7949 simplify_gen_subreg (inner_mode, duplicate,
7950 mode, offset));
7952 machine_mode narrower_mode;
7953 if (maybe_ne (nunits, 2U)
7954 && multiple_p (nunits, 2)
7955 && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
7956 && VECTOR_MODE_P (narrower_mode))
7958 /* Test VEC_DUPLICATE of a vector. */
7959 rtx_vector_builder nbuilder (narrower_mode, 2, 1);
7960 nbuilder.quick_push (const0_rtx);
7961 nbuilder.quick_push (const1_rtx);
7962 rtx_vector_builder builder (mode, 2, 1);
7963 builder.quick_push (const0_rtx);
7964 builder.quick_push (const1_rtx);
7965 ASSERT_RTX_EQ (builder.build (),
7966 simplify_unary_operation (VEC_DUPLICATE, mode,
7967 nbuilder.build (),
7968 narrower_mode));
7970 /* Test VEC_SELECT of a vector. */
7971 rtx vec_par
7972 = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
7973 rtx narrower_duplicate
7974 = gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
7975 ASSERT_RTX_EQ (narrower_duplicate,
7976 simplify_binary_operation (VEC_SELECT, narrower_mode,
7977 duplicate, vec_par));
7979 /* Test a vector subreg of a VEC_DUPLICATE. */
7980 poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
7981 ASSERT_RTX_EQ (narrower_duplicate,
7982 simplify_gen_subreg (narrower_mode, duplicate,
7983 mode, offset));
7987 /* Test vector simplifications involving VEC_SERIES in which the
7988 operands and result have vector mode MODE. SCALAR_REG is a pseudo
7989 register that holds one element of MODE. */
7991 static void
7992 test_vector_ops_series (machine_mode mode, rtx scalar_reg)
7994 /* Test unary cases with VEC_SERIES arguments. */
7995 scalar_mode inner_mode = GET_MODE_INNER (mode);
7996 rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
7997 rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
7998 rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
7999 rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
8000 rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
8001 rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
8002 rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
8003 rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
8004 neg_scalar_reg);
8005 ASSERT_RTX_EQ (series_0_r,
8006 simplify_unary_operation (NEG, mode, series_0_nr, mode));
8007 ASSERT_RTX_EQ (series_r_m1,
8008 simplify_unary_operation (NEG, mode, series_nr_1, mode));
8009 ASSERT_RTX_EQ (series_r_r,
8010 simplify_unary_operation (NEG, mode, series_nr_nr, mode));
8012 /* Test that a VEC_SERIES with a zero step is simplified away. */
8013 ASSERT_RTX_EQ (duplicate,
8014 simplify_binary_operation (VEC_SERIES, mode,
8015 scalar_reg, const0_rtx));
8017 /* Test PLUS and MINUS with VEC_SERIES. */
8018 rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
8019 rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
8020 rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
8021 ASSERT_RTX_EQ (series_r_r,
8022 simplify_binary_operation (PLUS, mode, series_0_r,
8023 duplicate));
8024 ASSERT_RTX_EQ (series_r_1,
8025 simplify_binary_operation (PLUS, mode, duplicate,
8026 series_0_1));
8027 ASSERT_RTX_EQ (series_r_m1,
8028 simplify_binary_operation (PLUS, mode, duplicate,
8029 series_0_m1));
8030 ASSERT_RTX_EQ (series_0_r,
8031 simplify_binary_operation (MINUS, mode, series_r_r,
8032 duplicate));
8033 ASSERT_RTX_EQ (series_r_m1,
8034 simplify_binary_operation (MINUS, mode, duplicate,
8035 series_0_1));
8036 ASSERT_RTX_EQ (series_r_1,
8037 simplify_binary_operation (MINUS, mode, duplicate,
8038 series_0_m1));
8039 ASSERT_RTX_EQ (series_0_m1,
8040 simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
8041 constm1_rtx));
8043 /* Test NEG on constant vector series. */
8044 ASSERT_RTX_EQ (series_0_m1,
8045 simplify_unary_operation (NEG, mode, series_0_1, mode));
8046 ASSERT_RTX_EQ (series_0_1,
8047 simplify_unary_operation (NEG, mode, series_0_m1, mode));
8049 /* Test PLUS and MINUS on constant vector series. */
8050 rtx scalar2 = gen_int_mode (2, inner_mode);
8051 rtx scalar3 = gen_int_mode (3, inner_mode);
8052 rtx series_1_1 = gen_const_vec_series (mode, const1_rtx, const1_rtx);
8053 rtx series_0_2 = gen_const_vec_series (mode, const0_rtx, scalar2);
8054 rtx series_1_3 = gen_const_vec_series (mode, const1_rtx, scalar3);
8055 ASSERT_RTX_EQ (series_1_1,
8056 simplify_binary_operation (PLUS, mode, series_0_1,
8057 CONST1_RTX (mode)));
8058 ASSERT_RTX_EQ (series_0_m1,
8059 simplify_binary_operation (PLUS, mode, CONST0_RTX (mode),
8060 series_0_m1));
8061 ASSERT_RTX_EQ (series_1_3,
8062 simplify_binary_operation (PLUS, mode, series_1_1,
8063 series_0_2));
8064 ASSERT_RTX_EQ (series_0_1,
8065 simplify_binary_operation (MINUS, mode, series_1_1,
8066 CONST1_RTX (mode)));
8067 ASSERT_RTX_EQ (series_1_1,
8068 simplify_binary_operation (MINUS, mode, CONST1_RTX (mode),
8069 series_0_m1));
8070 ASSERT_RTX_EQ (series_1_1,
8071 simplify_binary_operation (MINUS, mode, series_1_3,
8072 series_0_2));
8074 /* Test MULT between constant vectors. */
8075 rtx vec2 = gen_const_vec_duplicate (mode, scalar2);
8076 rtx vec3 = gen_const_vec_duplicate (mode, scalar3);
8077 rtx scalar9 = gen_int_mode (9, inner_mode);
8078 rtx series_3_9 = gen_const_vec_series (mode, scalar3, scalar9);
8079 ASSERT_RTX_EQ (series_0_2,
8080 simplify_binary_operation (MULT, mode, series_0_1, vec2));
8081 ASSERT_RTX_EQ (series_3_9,
8082 simplify_binary_operation (MULT, mode, vec3, series_1_3));
8083 if (!GET_MODE_NUNITS (mode).is_constant ())
8084 ASSERT_FALSE (simplify_binary_operation (MULT, mode, series_0_1,
8085 series_0_1));
8087 /* Test ASHIFT between constant vectors. */
8088 ASSERT_RTX_EQ (series_0_2,
8089 simplify_binary_operation (ASHIFT, mode, series_0_1,
8090 CONST1_RTX (mode)));
8091 if (!GET_MODE_NUNITS (mode).is_constant ())
8092 ASSERT_FALSE (simplify_binary_operation (ASHIFT, mode, CONST1_RTX (mode),
8093 series_0_1));
8096 static rtx
8097 simplify_merge_mask (rtx x, rtx mask, int op)
8099 return simplify_context ().simplify_merge_mask (x, mask, op);
8102 /* Verify simplify_merge_mask works correctly. */
8104 static void
8105 test_vec_merge (machine_mode mode)
8107 rtx op0 = make_test_reg (mode);
8108 rtx op1 = make_test_reg (mode);
8109 rtx op2 = make_test_reg (mode);
8110 rtx op3 = make_test_reg (mode);
8111 rtx op4 = make_test_reg (mode);
8112 rtx op5 = make_test_reg (mode);
8113 rtx mask1 = make_test_reg (SImode);
8114 rtx mask2 = make_test_reg (SImode);
8115 rtx vm1 = gen_rtx_VEC_MERGE (mode, op0, op1, mask1);
8116 rtx vm2 = gen_rtx_VEC_MERGE (mode, op2, op3, mask1);
8117 rtx vm3 = gen_rtx_VEC_MERGE (mode, op4, op5, mask1);
8119 /* Simple vec_merge. */
8120 ASSERT_EQ (op0, simplify_merge_mask (vm1, mask1, 0));
8121 ASSERT_EQ (op1, simplify_merge_mask (vm1, mask1, 1));
8122 ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 0));
8123 ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 1));
8125 /* Nested vec_merge.
8126 It's tempting to make this simplify right down to opN, but we don't
8127 because all the simplify_* functions assume that the operands have
8128 already been simplified. */
8129 rtx nvm = gen_rtx_VEC_MERGE (mode, vm1, vm2, mask1);
8130 ASSERT_EQ (vm1, simplify_merge_mask (nvm, mask1, 0));
8131 ASSERT_EQ (vm2, simplify_merge_mask (nvm, mask1, 1));
8133 /* Intermediate unary op. */
8134 rtx unop = gen_rtx_NOT (mode, vm1);
8135 ASSERT_RTX_EQ (gen_rtx_NOT (mode, op0),
8136 simplify_merge_mask (unop, mask1, 0));
8137 ASSERT_RTX_EQ (gen_rtx_NOT (mode, op1),
8138 simplify_merge_mask (unop, mask1, 1));
8140 /* Intermediate binary op. */
8141 rtx binop = gen_rtx_PLUS (mode, vm1, vm2);
8142 ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op0, op2),
8143 simplify_merge_mask (binop, mask1, 0));
8144 ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op1, op3),
8145 simplify_merge_mask (binop, mask1, 1));
8147 /* Intermediate ternary op. */
8148 rtx tenop = gen_rtx_FMA (mode, vm1, vm2, vm3);
8149 ASSERT_RTX_EQ (gen_rtx_FMA (mode, op0, op2, op4),
8150 simplify_merge_mask (tenop, mask1, 0));
8151 ASSERT_RTX_EQ (gen_rtx_FMA (mode, op1, op3, op5),
8152 simplify_merge_mask (tenop, mask1, 1));
8154 /* Side effects. */
8155 rtx badop0 = gen_rtx_PRE_INC (mode, op0);
8156 rtx badvm = gen_rtx_VEC_MERGE (mode, badop0, op1, mask1);
8157 ASSERT_EQ (badop0, simplify_merge_mask (badvm, mask1, 0));
8158 ASSERT_EQ (NULL_RTX, simplify_merge_mask (badvm, mask1, 1));
8160 /* Called indirectly. */
8161 ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode, op0, op3, mask1),
8162 simplify_rtx (nvm));
8165 /* Test subregs of integer vector constant X, trying elements in
8166 the range [ELT_BIAS, ELT_BIAS + constant_lower_bound (NELTS)),
8167 where NELTS is the number of elements in X. Subregs involving
8168 elements [ELT_BIAS, ELT_BIAS + FIRST_VALID) are expected to fail. */
8170 static void
8171 test_vector_subregs_modes (rtx x, poly_uint64 elt_bias = 0,
8172 unsigned int first_valid = 0)
8174 machine_mode inner_mode = GET_MODE (x);
8175 scalar_mode int_mode = GET_MODE_INNER (inner_mode);
8177 for (unsigned int modei = 0; modei < NUM_MACHINE_MODES; ++modei)
8179 machine_mode outer_mode = (machine_mode) modei;
8180 if (!VECTOR_MODE_P (outer_mode))
8181 continue;
8183 unsigned int outer_nunits;
8184 if (GET_MODE_INNER (outer_mode) == int_mode
8185 && GET_MODE_NUNITS (outer_mode).is_constant (&outer_nunits)
8186 && multiple_p (GET_MODE_NUNITS (inner_mode), outer_nunits))
8188 /* Test subregs in which the outer mode is a smaller,
8189 constant-sized vector of the same element type. */
8190 unsigned int limit
8191 = constant_lower_bound (GET_MODE_NUNITS (inner_mode));
8192 for (unsigned int elt = 0; elt < limit; elt += outer_nunits)
8194 rtx expected = NULL_RTX;
8195 if (elt >= first_valid)
8197 rtx_vector_builder builder (outer_mode, outer_nunits, 1);
8198 for (unsigned int i = 0; i < outer_nunits; ++i)
8199 builder.quick_push (CONST_VECTOR_ELT (x, elt + i));
8200 expected = builder.build ();
8202 poly_uint64 byte = (elt_bias + elt) * GET_MODE_SIZE (int_mode);
8203 ASSERT_RTX_EQ (expected,
8204 simplify_subreg (outer_mode, x,
8205 inner_mode, byte));
8208 else if (known_eq (GET_MODE_SIZE (outer_mode),
8209 GET_MODE_SIZE (inner_mode))
8210 && known_eq (elt_bias, 0U)
8211 && (GET_MODE_CLASS (outer_mode) != MODE_VECTOR_BOOL
8212 || known_eq (GET_MODE_BITSIZE (outer_mode),
8213 GET_MODE_NUNITS (outer_mode)))
8214 && (!FLOAT_MODE_P (outer_mode)
8215 || (FLOAT_MODE_FORMAT (outer_mode)->ieee_bits
8216 == GET_MODE_UNIT_PRECISION (outer_mode)))
8217 && (GET_MODE_SIZE (inner_mode).is_constant ()
8218 || !CONST_VECTOR_STEPPED_P (x)))
8220 /* Try converting to OUTER_MODE and back. */
8221 rtx outer_x = simplify_subreg (outer_mode, x, inner_mode, 0);
8222 ASSERT_TRUE (outer_x != NULL_RTX);
8223 ASSERT_RTX_EQ (x, simplify_subreg (inner_mode, outer_x,
8224 outer_mode, 0));
8228 if (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN)
8230 /* Test each byte in the element range. */
8231 unsigned int limit
8232 = constant_lower_bound (GET_MODE_SIZE (inner_mode));
8233 for (unsigned int i = 0; i < limit; ++i)
8235 unsigned int elt = i / GET_MODE_SIZE (int_mode);
8236 rtx expected = NULL_RTX;
8237 if (elt >= first_valid)
8239 unsigned int byte_shift = i % GET_MODE_SIZE (int_mode);
8240 if (BYTES_BIG_ENDIAN)
8241 byte_shift = GET_MODE_SIZE (int_mode) - byte_shift - 1;
8242 rtx_mode_t vec_elt (CONST_VECTOR_ELT (x, elt), int_mode);
8243 wide_int shifted_elt
8244 = wi::lrshift (vec_elt, byte_shift * BITS_PER_UNIT);
8245 expected = immed_wide_int_const (shifted_elt, QImode);
8247 poly_uint64 byte = elt_bias * GET_MODE_SIZE (int_mode) + i;
8248 ASSERT_RTX_EQ (expected,
8249 simplify_subreg (QImode, x, inner_mode, byte));
8254 /* Test constant subregs of integer vector mode INNER_MODE, using 1
8255 element per pattern. */
8257 static void
8258 test_vector_subregs_repeating (machine_mode inner_mode)
8260 poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
8261 unsigned int min_nunits = constant_lower_bound (nunits);
8262 scalar_mode int_mode = GET_MODE_INNER (inner_mode);
8263 unsigned int count = gcd (min_nunits, 8);
8265 rtx_vector_builder builder (inner_mode, count, 1);
8266 for (unsigned int i = 0; i < count; ++i)
8267 builder.quick_push (gen_int_mode (8 - i, int_mode));
8268 rtx x = builder.build ();
8270 test_vector_subregs_modes (x);
8271 if (!nunits.is_constant ())
8272 test_vector_subregs_modes (x, nunits - min_nunits);
8275 /* Test constant subregs of integer vector mode INNER_MODE, using 2
8276 elements per pattern. */
8278 static void
8279 test_vector_subregs_fore_back (machine_mode inner_mode)
8281 poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
8282 unsigned int min_nunits = constant_lower_bound (nunits);
8283 scalar_mode int_mode = GET_MODE_INNER (inner_mode);
8284 unsigned int count = gcd (min_nunits, 4);
8286 rtx_vector_builder builder (inner_mode, count, 2);
8287 for (unsigned int i = 0; i < count; ++i)
8288 builder.quick_push (gen_int_mode (i, int_mode));
8289 for (unsigned int i = 0; i < count; ++i)
8290 builder.quick_push (gen_int_mode (-(int) i, int_mode));
8291 rtx x = builder.build ();
8293 test_vector_subregs_modes (x);
8294 if (!nunits.is_constant ())
8295 test_vector_subregs_modes (x, nunits - min_nunits, count);
8298 /* Test constant subregs of integer vector mode INNER_MODE, using 3
8299 elements per pattern. */
8301 static void
8302 test_vector_subregs_stepped (machine_mode inner_mode)
8304 /* Build { 0, 1, 2, 3, ... }. */
8305 scalar_mode int_mode = GET_MODE_INNER (inner_mode);
8306 rtx_vector_builder builder (inner_mode, 1, 3);
8307 for (unsigned int i = 0; i < 3; ++i)
8308 builder.quick_push (gen_int_mode (i, int_mode));
8309 rtx x = builder.build ();
8311 test_vector_subregs_modes (x);
8314 /* Test constant subregs of integer vector mode INNER_MODE. */
8316 static void
8317 test_vector_subregs (machine_mode inner_mode)
8319 test_vector_subregs_repeating (inner_mode);
8320 test_vector_subregs_fore_back (inner_mode);
8321 test_vector_subregs_stepped (inner_mode);
8324 /* Verify some simplifications involving vectors. */
8326 static void
8327 test_vector_ops ()
8329 for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
8331 machine_mode mode = (machine_mode) i;
8332 if (VECTOR_MODE_P (mode))
8334 rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
8335 test_vector_ops_duplicate (mode, scalar_reg);
8336 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
8337 && maybe_gt (GET_MODE_NUNITS (mode), 2))
8339 test_vector_ops_series (mode, scalar_reg);
8340 test_vector_subregs (mode);
8342 test_vec_merge (mode);
8347 template<unsigned int N>
8348 struct simplify_const_poly_int_tests
8350 static void run ();
8353 template<>
8354 struct simplify_const_poly_int_tests<1>
8356 static void run () {}
8359 /* Test various CONST_POLY_INT properties. */
8361 template<unsigned int N>
8362 void
8363 simplify_const_poly_int_tests<N>::run ()
8365 rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
8366 rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
8367 rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
8368 rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
8369 rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
8370 rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
8371 rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
8372 rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
8373 rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
8374 rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
8375 rtx two = GEN_INT (2);
8376 rtx six = GEN_INT (6);
8377 poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
8379 /* These tests only try limited operation combinations. Fuller arithmetic
8380 testing is done directly on poly_ints. */
8381 ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
8382 ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
8383 ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
8384 ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
8385 ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
8386 ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
8387 ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
8388 ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
8389 ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
8390 ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
8391 ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
8394 /* Run all of the selftests within this file. */
8396 void
8397 simplify_rtx_c_tests ()
8399 test_scalar_ops ();
8400 test_vector_ops ();
8401 simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
8404 } // namespace selftest
8406 #endif /* CHECKING_P */